2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include "radeon_drm.h"
31 #include "radeon_reg.h"
34 * Common GART table functions.
36 int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
40 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
41 &rdev->gart.table_addr);
46 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
47 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
48 set_memory_uc((unsigned long)ptr,
49 rdev->gart.table_size >> PAGE_SHIFT);
53 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
57 void radeon_gart_table_ram_free(struct radeon_device *rdev)
59 if (rdev->gart.ptr == NULL) {
63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
65 set_memory_wb((unsigned long)rdev->gart.ptr,
66 rdev->gart.table_size >> PAGE_SHIFT);
69 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
70 (void *)rdev->gart.ptr,
71 rdev->gart.table_addr);
72 rdev->gart.ptr = NULL;
73 rdev->gart.table_addr = 0;
76 int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
80 if (rdev->gart.robj == NULL) {
81 r = radeon_bo_create(rdev, rdev->gart.table_size,
82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 NULL, &rdev->gart.robj);
91 int radeon_gart_table_vram_pin(struct radeon_device *rdev)
96 r = radeon_bo_reserve(rdev->gart.robj, false);
99 r = radeon_bo_pin(rdev->gart.robj,
100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
102 radeon_bo_unreserve(rdev->gart.robj);
105 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
107 radeon_bo_unpin(rdev->gart.robj);
108 radeon_bo_unreserve(rdev->gart.robj);
109 rdev->gart.table_addr = gpu_addr;
113 void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
117 if (rdev->gart.robj == NULL) {
120 r = radeon_bo_reserve(rdev->gart.robj, false);
121 if (likely(r == 0)) {
122 radeon_bo_kunmap(rdev->gart.robj);
123 radeon_bo_unpin(rdev->gart.robj);
124 radeon_bo_unreserve(rdev->gart.robj);
125 rdev->gart.ptr = NULL;
129 void radeon_gart_table_vram_free(struct radeon_device *rdev)
131 if (rdev->gart.robj == NULL) {
134 radeon_gart_table_vram_unpin(rdev);
135 radeon_bo_unref(&rdev->gart.robj);
142 * Common gart functions.
144 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
152 if (!rdev->gart.ready) {
153 WARN(1, "trying to unbind memory from uninitialized GART !\n");
156 t = offset / RADEON_GPU_PAGE_SIZE;
157 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
158 for (i = 0; i < pages; i++, p++) {
159 if (rdev->gart.pages[p]) {
160 rdev->gart.pages[p] = NULL;
161 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
162 page_base = rdev->gart.pages_addr[p];
163 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
164 if (rdev->gart.ptr) {
165 radeon_gart_set_page(rdev, t, page_base);
167 page_base += RADEON_GPU_PAGE_SIZE;
172 radeon_gart_tlb_flush(rdev);
175 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
176 int pages, struct page **pagelist, dma_addr_t *dma_addr)
183 if (!rdev->gart.ready) {
184 WARN(1, "trying to bind memory to uninitialized GART !\n");
187 t = offset / RADEON_GPU_PAGE_SIZE;
188 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
190 for (i = 0; i < pages; i++, p++) {
191 rdev->gart.pages_addr[p] = dma_addr[i];
192 rdev->gart.pages[p] = pagelist[i];
193 if (rdev->gart.ptr) {
194 page_base = rdev->gart.pages_addr[p];
195 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
196 radeon_gart_set_page(rdev, t, page_base);
197 page_base += RADEON_GPU_PAGE_SIZE;
202 radeon_gart_tlb_flush(rdev);
206 void radeon_gart_restore(struct radeon_device *rdev)
211 if (!rdev->gart.ptr) {
214 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
215 page_base = rdev->gart.pages_addr[i];
216 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
217 radeon_gart_set_page(rdev, t, page_base);
218 page_base += RADEON_GPU_PAGE_SIZE;
222 radeon_gart_tlb_flush(rdev);
225 int radeon_gart_init(struct radeon_device *rdev)
229 if (rdev->gart.pages) {
232 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
233 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
234 DRM_ERROR("Page size is smaller than GPU page size!\n");
237 r = radeon_dummy_page_init(rdev);
240 /* Compute table size */
241 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
242 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
243 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
244 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
245 /* Allocate pages table */
246 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
248 if (rdev->gart.pages == NULL) {
249 radeon_gart_fini(rdev);
252 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
253 rdev->gart.num_cpu_pages, GFP_KERNEL);
254 if (rdev->gart.pages_addr == NULL) {
255 radeon_gart_fini(rdev);
258 /* set GART entry to point to the dummy page by default */
259 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
260 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
265 void radeon_gart_fini(struct radeon_device *rdev)
267 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
269 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
271 rdev->gart.ready = false;
272 kfree(rdev->gart.pages);
273 kfree(rdev->gart.pages_addr);
274 rdev->gart.pages = NULL;
275 rdev->gart.pages_addr = NULL;
277 radeon_dummy_page_fini(rdev);
283 * TODO bind a default page at vm initialization for default address
286 int radeon_vm_manager_init(struct radeon_device *rdev)
288 struct radeon_vm *vm;
289 struct radeon_bo_va *bo_va;
292 if (!rdev->vm_manager.enabled) {
293 /* mark first vm as always in use, it's the system one */
294 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
295 rdev->vm_manager.max_pfn * 8,
296 RADEON_GEM_DOMAIN_VRAM);
298 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
299 (rdev->vm_manager.max_pfn * 8) >> 10);
303 r = rdev->vm_manager.funcs->init(rdev);
307 rdev->vm_manager.enabled = true;
309 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
314 /* restore page table */
315 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
319 list_for_each_entry(bo_va, &vm->va, vm_list) {
320 struct ttm_mem_reg *mem = NULL;
322 mem = &bo_va->bo->tbo.mem;
324 bo_va->valid = false;
325 r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
327 DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
331 r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
333 DRM_ERROR("Failed to bind vm %d!\n", vm->id);
339 /* global mutex must be lock */
340 static void radeon_vm_unbind_locked(struct radeon_device *rdev,
341 struct radeon_vm *vm)
343 struct radeon_bo_va *bo_va;
349 /* wait for vm use to end */
352 r = radeon_fence_wait(vm->fence, false);
354 DRM_ERROR("error while waiting for fence: %d\n", r);
356 mutex_unlock(&rdev->vm_manager.lock);
357 r = radeon_gpu_reset(rdev);
358 mutex_lock(&rdev->vm_manager.lock);
364 radeon_fence_unref(&vm->fence);
367 rdev->vm_manager.funcs->unbind(rdev, vm);
368 rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
369 list_del_init(&vm->list);
371 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
374 list_for_each_entry(bo_va, &vm->va, vm_list) {
375 bo_va->valid = false;
379 void radeon_vm_manager_fini(struct radeon_device *rdev)
381 struct radeon_vm *vm, *tmp;
383 if (!rdev->vm_manager.enabled)
386 mutex_lock(&rdev->vm_manager.lock);
387 /* unbind all active vm */
388 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
389 radeon_vm_unbind_locked(rdev, vm);
391 rdev->vm_manager.funcs->fini(rdev);
392 mutex_unlock(&rdev->vm_manager.lock);
394 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
395 radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
396 rdev->vm_manager.enabled = false;
399 /* global mutex must be locked */
400 void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
402 mutex_lock(&vm->mutex);
403 radeon_vm_unbind_locked(rdev, vm);
404 mutex_unlock(&vm->mutex);
407 /* global and local mutex must be locked */
408 int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
410 struct radeon_vm *vm_evict;
420 list_del_init(&vm->list);
421 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
426 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
427 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
428 RADEON_GPU_PAGE_SIZE, false);
430 if (list_empty(&rdev->vm_manager.lru_vm)) {
433 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
434 radeon_vm_unbind(rdev, vm_evict);
437 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
438 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
439 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
442 /* search for free vm */
443 for (i = 0; i < rdev->vm_manager.nvm; i++) {
444 if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
449 /* evict vm if necessary */
451 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
452 radeon_vm_unbind(rdev, vm_evict);
457 r = rdev->vm_manager.funcs->bind(rdev, vm, id);
459 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
462 rdev->vm_manager.use_bitmap |= 1 << id;
464 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
465 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
466 &rdev->ring_tmp_bo.bo->tbo.mem);
469 /* object have to be reserved */
470 int radeon_vm_bo_add(struct radeon_device *rdev,
471 struct radeon_vm *vm,
472 struct radeon_bo *bo,
476 struct radeon_bo_va *bo_va, *tmp;
477 struct list_head *head;
478 uint64_t size = radeon_bo_size(bo), last_offset = 0;
481 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
487 bo_va->soffset = offset;
488 bo_va->eoffset = offset + size;
489 bo_va->flags = flags;
490 bo_va->valid = false;
491 INIT_LIST_HEAD(&bo_va->bo_list);
492 INIT_LIST_HEAD(&bo_va->vm_list);
493 /* make sure object fit at this offset */
494 if (bo_va->soffset >= bo_va->eoffset) {
499 last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
500 if (last_pfn > rdev->vm_manager.max_pfn) {
502 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
503 last_pfn, rdev->vm_manager.max_pfn);
507 mutex_lock(&vm->mutex);
508 if (last_pfn > vm->last_pfn) {
509 /* release mutex and lock in right order */
510 mutex_unlock(&vm->mutex);
511 mutex_lock(&rdev->vm_manager.lock);
512 mutex_lock(&vm->mutex);
513 /* and check again */
514 if (last_pfn > vm->last_pfn) {
515 /* grow va space 32M by 32M */
516 unsigned align = ((32 << 20) >> 12) - 1;
517 radeon_vm_unbind_locked(rdev, vm);
518 vm->last_pfn = (last_pfn + align) & ~align;
520 mutex_unlock(&rdev->vm_manager.lock);
524 list_for_each_entry(tmp, &vm->va, vm_list) {
525 if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
526 /* bo can be added before this one */
529 if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
530 /* bo and tmp overlap, invalid offset */
531 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
532 bo, (unsigned)bo_va->soffset, tmp->bo,
533 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
535 mutex_unlock(&vm->mutex);
538 last_offset = tmp->eoffset;
539 head = &tmp->vm_list;
541 list_add(&bo_va->vm_list, head);
542 list_add_tail(&bo_va->bo_list, &bo->va);
543 mutex_unlock(&vm->mutex);
547 static u64 radeon_vm_get_addr(struct radeon_device *rdev,
548 struct ttm_mem_reg *mem,
553 switch (mem->mem_type) {
555 addr = (mem->start << PAGE_SHIFT);
556 addr += pfn * RADEON_GPU_PAGE_SIZE;
557 addr += rdev->vm_manager.vram_base_offset;
560 /* offset inside page table */
561 addr = mem->start << PAGE_SHIFT;
562 addr += pfn * RADEON_GPU_PAGE_SIZE;
563 addr = addr >> PAGE_SHIFT;
564 /* page table offset */
565 addr = rdev->gart.pages_addr[addr];
566 /* in case cpu page size != gpu page size*/
567 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
575 /* object have to be reserved & global and local mutex must be locked */
576 int radeon_vm_bo_update_pte(struct radeon_device *rdev,
577 struct radeon_vm *vm,
578 struct radeon_bo *bo,
579 struct ttm_mem_reg *mem)
581 struct radeon_bo_va *bo_va;
582 unsigned ngpu_pages, i;
583 uint64_t addr = 0, pfn;
586 /* nothing to do if vm isn't bound */
590 bo_va = radeon_bo_va(bo, vm);
592 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
599 ngpu_pages = radeon_bo_ngpu_pages(bo);
600 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
601 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
603 if (mem->mem_type != TTM_PL_SYSTEM) {
604 bo_va->flags |= RADEON_VM_PAGE_VALID;
607 if (mem->mem_type == TTM_PL_TT) {
608 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
611 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
612 flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
613 for (i = 0, addr = 0; i < ngpu_pages; i++) {
614 if (mem && bo_va->valid) {
615 addr = radeon_vm_get_addr(rdev, mem, i);
617 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
619 rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
623 /* object have to be reserved */
624 int radeon_vm_bo_rmv(struct radeon_device *rdev,
625 struct radeon_vm *vm,
626 struct radeon_bo *bo)
628 struct radeon_bo_va *bo_va;
630 bo_va = radeon_bo_va(bo, vm);
634 mutex_lock(&rdev->vm_manager.lock);
635 mutex_lock(&vm->mutex);
636 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
637 mutex_unlock(&rdev->vm_manager.lock);
638 list_del(&bo_va->vm_list);
639 mutex_unlock(&vm->mutex);
640 list_del(&bo_va->bo_list);
646 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
647 struct radeon_bo *bo)
649 struct radeon_bo_va *bo_va;
651 BUG_ON(!atomic_read(&bo->tbo.reserved));
652 list_for_each_entry(bo_va, &bo->va, bo_list) {
653 bo_va->valid = false;
657 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
663 mutex_init(&vm->mutex);
664 INIT_LIST_HEAD(&vm->list);
665 INIT_LIST_HEAD(&vm->va);
667 /* map the ib pool buffer at 0 in virtual address space, set
670 r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
671 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
675 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
677 struct radeon_bo_va *bo_va, *tmp;
680 mutex_lock(&rdev->vm_manager.lock);
681 mutex_lock(&vm->mutex);
682 radeon_vm_unbind_locked(rdev, vm);
683 mutex_unlock(&rdev->vm_manager.lock);
686 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
688 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
689 list_del_init(&bo_va->bo_list);
690 list_del_init(&bo_va->vm_list);
691 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
694 if (!list_empty(&vm->va)) {
695 dev_err(rdev->dev, "still active bo inside vm\n");
697 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
698 list_del_init(&bo_va->vm_list);
699 r = radeon_bo_reserve(bo_va->bo, false);
701 list_del_init(&bo_va->bo_list);
702 radeon_bo_unreserve(bo_va->bo);
706 mutex_unlock(&vm->mutex);