drm/radeon: remove vm_manager start/suspend
[linux-2.6-block.git] / drivers / gpu / drm / radeon / radeon_gart.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include "drmP.h"
29 #include "radeon_drm.h"
30 #include "radeon.h"
31 #include "radeon_reg.h"
32
33 /*
34  * Common GART table functions.
35  */
36 int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
37 {
38         void *ptr;
39
40         ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
41                                    &rdev->gart.table_addr);
42         if (ptr == NULL) {
43                 return -ENOMEM;
44         }
45 #ifdef CONFIG_X86
46         if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
47             rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
48                 set_memory_uc((unsigned long)ptr,
49                               rdev->gart.table_size >> PAGE_SHIFT);
50         }
51 #endif
52         rdev->gart.ptr = ptr;
53         memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
54         return 0;
55 }
56
57 void radeon_gart_table_ram_free(struct radeon_device *rdev)
58 {
59         if (rdev->gart.ptr == NULL) {
60                 return;
61         }
62 #ifdef CONFIG_X86
63         if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64             rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
65                 set_memory_wb((unsigned long)rdev->gart.ptr,
66                               rdev->gart.table_size >> PAGE_SHIFT);
67         }
68 #endif
69         pci_free_consistent(rdev->pdev, rdev->gart.table_size,
70                             (void *)rdev->gart.ptr,
71                             rdev->gart.table_addr);
72         rdev->gart.ptr = NULL;
73         rdev->gart.table_addr = 0;
74 }
75
76 int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
77 {
78         int r;
79
80         if (rdev->gart.robj == NULL) {
81                 r = radeon_bo_create(rdev, rdev->gart.table_size,
82                                      PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83                                      NULL, &rdev->gart.robj);
84                 if (r) {
85                         return r;
86                 }
87         }
88         return 0;
89 }
90
91 int radeon_gart_table_vram_pin(struct radeon_device *rdev)
92 {
93         uint64_t gpu_addr;
94         int r;
95
96         r = radeon_bo_reserve(rdev->gart.robj, false);
97         if (unlikely(r != 0))
98                 return r;
99         r = radeon_bo_pin(rdev->gart.robj,
100                                 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
101         if (r) {
102                 radeon_bo_unreserve(rdev->gart.robj);
103                 return r;
104         }
105         r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
106         if (r)
107                 radeon_bo_unpin(rdev->gart.robj);
108         radeon_bo_unreserve(rdev->gart.robj);
109         rdev->gart.table_addr = gpu_addr;
110         return r;
111 }
112
113 void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
114 {
115         int r;
116
117         if (rdev->gart.robj == NULL) {
118                 return;
119         }
120         r = radeon_bo_reserve(rdev->gart.robj, false);
121         if (likely(r == 0)) {
122                 radeon_bo_kunmap(rdev->gart.robj);
123                 radeon_bo_unpin(rdev->gart.robj);
124                 radeon_bo_unreserve(rdev->gart.robj);
125                 rdev->gart.ptr = NULL;
126         }
127 }
128
129 void radeon_gart_table_vram_free(struct radeon_device *rdev)
130 {
131         if (rdev->gart.robj == NULL) {
132                 return;
133         }
134         radeon_gart_table_vram_unpin(rdev);
135         radeon_bo_unref(&rdev->gart.robj);
136 }
137
138
139
140
141 /*
142  * Common gart functions.
143  */
144 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
145                         int pages)
146 {
147         unsigned t;
148         unsigned p;
149         int i, j;
150         u64 page_base;
151
152         if (!rdev->gart.ready) {
153                 WARN(1, "trying to unbind memory from uninitialized GART !\n");
154                 return;
155         }
156         t = offset / RADEON_GPU_PAGE_SIZE;
157         p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
158         for (i = 0; i < pages; i++, p++) {
159                 if (rdev->gart.pages[p]) {
160                         rdev->gart.pages[p] = NULL;
161                         rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
162                         page_base = rdev->gart.pages_addr[p];
163                         for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
164                                 if (rdev->gart.ptr) {
165                                         radeon_gart_set_page(rdev, t, page_base);
166                                 }
167                                 page_base += RADEON_GPU_PAGE_SIZE;
168                         }
169                 }
170         }
171         mb();
172         radeon_gart_tlb_flush(rdev);
173 }
174
175 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
176                      int pages, struct page **pagelist, dma_addr_t *dma_addr)
177 {
178         unsigned t;
179         unsigned p;
180         uint64_t page_base;
181         int i, j;
182
183         if (!rdev->gart.ready) {
184                 WARN(1, "trying to bind memory to uninitialized GART !\n");
185                 return -EINVAL;
186         }
187         t = offset / RADEON_GPU_PAGE_SIZE;
188         p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
189
190         for (i = 0; i < pages; i++, p++) {
191                 rdev->gart.pages_addr[p] = dma_addr[i];
192                 rdev->gart.pages[p] = pagelist[i];
193                 if (rdev->gart.ptr) {
194                         page_base = rdev->gart.pages_addr[p];
195                         for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
196                                 radeon_gart_set_page(rdev, t, page_base);
197                                 page_base += RADEON_GPU_PAGE_SIZE;
198                         }
199                 }
200         }
201         mb();
202         radeon_gart_tlb_flush(rdev);
203         return 0;
204 }
205
206 void radeon_gart_restore(struct radeon_device *rdev)
207 {
208         int i, j, t;
209         u64 page_base;
210
211         if (!rdev->gart.ptr) {
212                 return;
213         }
214         for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
215                 page_base = rdev->gart.pages_addr[i];
216                 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
217                         radeon_gart_set_page(rdev, t, page_base);
218                         page_base += RADEON_GPU_PAGE_SIZE;
219                 }
220         }
221         mb();
222         radeon_gart_tlb_flush(rdev);
223 }
224
225 int radeon_gart_init(struct radeon_device *rdev)
226 {
227         int r, i;
228
229         if (rdev->gart.pages) {
230                 return 0;
231         }
232         /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
233         if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
234                 DRM_ERROR("Page size is smaller than GPU page size!\n");
235                 return -EINVAL;
236         }
237         r = radeon_dummy_page_init(rdev);
238         if (r)
239                 return r;
240         /* Compute table size */
241         rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
242         rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
243         DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
244                  rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
245         /* Allocate pages table */
246         rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
247                                    GFP_KERNEL);
248         if (rdev->gart.pages == NULL) {
249                 radeon_gart_fini(rdev);
250                 return -ENOMEM;
251         }
252         rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
253                                         rdev->gart.num_cpu_pages, GFP_KERNEL);
254         if (rdev->gart.pages_addr == NULL) {
255                 radeon_gart_fini(rdev);
256                 return -ENOMEM;
257         }
258         /* set GART entry to point to the dummy page by default */
259         for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
260                 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
261         }
262         return 0;
263 }
264
265 void radeon_gart_fini(struct radeon_device *rdev)
266 {
267         if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
268                 /* unbind pages */
269                 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
270         }
271         rdev->gart.ready = false;
272         kfree(rdev->gart.pages);
273         kfree(rdev->gart.pages_addr);
274         rdev->gart.pages = NULL;
275         rdev->gart.pages_addr = NULL;
276
277         radeon_dummy_page_fini(rdev);
278 }
279
280 /*
281  * vm helpers
282  *
283  * TODO bind a default page at vm initialization for default address
284  */
285
286 int radeon_vm_manager_init(struct radeon_device *rdev)
287 {
288         struct radeon_vm *vm;
289         struct radeon_bo_va *bo_va;
290         int r;
291
292         if (!rdev->vm_manager.enabled) {
293                 /* mark first vm as always in use, it's the system one */
294                 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
295                                               rdev->vm_manager.max_pfn * 8,
296                                               RADEON_GEM_DOMAIN_VRAM);
297                 if (r) {
298                         dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
299                                 (rdev->vm_manager.max_pfn * 8) >> 10);
300                         return r;
301                 }
302
303                 r = rdev->vm_manager.funcs->init(rdev);
304                 if (r)
305                         return r;
306         
307                 rdev->vm_manager.enabled = true;
308
309                 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
310                 if (r)
311                         return r;
312         }
313
314         /* restore page table */
315         list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
316                 if (vm->id == -1)
317                         continue;
318
319                 list_for_each_entry(bo_va, &vm->va, vm_list) {
320                         struct ttm_mem_reg *mem = NULL;
321                         if (bo_va->valid)
322                                 mem = &bo_va->bo->tbo.mem;
323
324                         bo_va->valid = false;
325                         r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
326                         if (r) {
327                                 DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
328                         }
329                 }
330
331                 r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
332                 if (r) {
333                         DRM_ERROR("Failed to bind vm %d!\n", vm->id);
334                 }
335         }
336         return 0;
337 }
338
339 /* global mutex must be lock */
340 static void radeon_vm_unbind_locked(struct radeon_device *rdev,
341                                     struct radeon_vm *vm)
342 {
343         struct radeon_bo_va *bo_va;
344
345         if (vm->id == -1) {
346                 return;
347         }
348
349         /* wait for vm use to end */
350         while (vm->fence) {
351                 int r;
352                 r = radeon_fence_wait(vm->fence, false);
353                 if (r)
354                         DRM_ERROR("error while waiting for fence: %d\n", r);
355                 if (r == -EDEADLK) {
356                         mutex_unlock(&rdev->vm_manager.lock);
357                         r = radeon_gpu_reset(rdev);
358                         mutex_lock(&rdev->vm_manager.lock);
359                         if (!r)
360                                 continue;
361                 }
362                 break;
363         }
364         radeon_fence_unref(&vm->fence);
365
366         /* hw unbind */
367         rdev->vm_manager.funcs->unbind(rdev, vm);
368         rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
369         list_del_init(&vm->list);
370         vm->id = -1;
371         radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
372         vm->pt = NULL;
373
374         list_for_each_entry(bo_va, &vm->va, vm_list) {
375                 bo_va->valid = false;
376         }
377 }
378
379 void radeon_vm_manager_fini(struct radeon_device *rdev)
380 {
381         struct radeon_vm *vm, *tmp;
382
383         if (!rdev->vm_manager.enabled)
384                 return;
385
386         mutex_lock(&rdev->vm_manager.lock);
387         /* unbind all active vm */
388         list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
389                 radeon_vm_unbind_locked(rdev, vm);
390         }
391         rdev->vm_manager.funcs->fini(rdev);
392         mutex_unlock(&rdev->vm_manager.lock);
393
394         radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
395         radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
396         rdev->vm_manager.enabled = false;
397 }
398
399 /* global mutex must be locked */
400 void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
401 {
402         mutex_lock(&vm->mutex);
403         radeon_vm_unbind_locked(rdev, vm);
404         mutex_unlock(&vm->mutex);
405 }
406
407 /* global and local mutex must be locked */
408 int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
409 {
410         struct radeon_vm *vm_evict;
411         unsigned i;
412         int id = -1, r;
413
414         if (vm == NULL) {
415                 return -EINVAL;
416         }
417
418         if (vm->id != -1) {
419                 /* update lru */
420                 list_del_init(&vm->list);
421                 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
422                 return 0;
423         }
424
425 retry:
426         r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
427                              RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
428                              RADEON_GPU_PAGE_SIZE, false);
429         if (r) {
430                 if (list_empty(&rdev->vm_manager.lru_vm)) {
431                         return r;
432                 }
433                 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
434                 radeon_vm_unbind(rdev, vm_evict);
435                 goto retry;
436         }
437         vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
438         vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
439         memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
440
441 retry_id:
442         /* search for free vm */
443         for (i = 0; i < rdev->vm_manager.nvm; i++) {
444                 if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
445                         id = i;
446                         break;
447                 }
448         }
449         /* evict vm if necessary */
450         if (id == -1) {
451                 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
452                 radeon_vm_unbind(rdev, vm_evict);
453                 goto retry_id;
454         }
455
456         /* do hw bind */
457         r = rdev->vm_manager.funcs->bind(rdev, vm, id);
458         if (r) {
459                 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
460                 return r;
461         }
462         rdev->vm_manager.use_bitmap |= 1 << id;
463         vm->id = id;
464         list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
465         return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
466                                        &rdev->ring_tmp_bo.bo->tbo.mem);
467 }
468
469 /* object have to be reserved */
470 int radeon_vm_bo_add(struct radeon_device *rdev,
471                      struct radeon_vm *vm,
472                      struct radeon_bo *bo,
473                      uint64_t offset,
474                      uint32_t flags)
475 {
476         struct radeon_bo_va *bo_va, *tmp;
477         struct list_head *head;
478         uint64_t size = radeon_bo_size(bo), last_offset = 0;
479         unsigned last_pfn;
480
481         bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
482         if (bo_va == NULL) {
483                 return -ENOMEM;
484         }
485         bo_va->vm = vm;
486         bo_va->bo = bo;
487         bo_va->soffset = offset;
488         bo_va->eoffset = offset + size;
489         bo_va->flags = flags;
490         bo_va->valid = false;
491         INIT_LIST_HEAD(&bo_va->bo_list);
492         INIT_LIST_HEAD(&bo_va->vm_list);
493         /* make sure object fit at this offset */
494         if (bo_va->soffset >= bo_va->eoffset) {
495                 kfree(bo_va);
496                 return -EINVAL;
497         }
498
499         last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
500         if (last_pfn > rdev->vm_manager.max_pfn) {
501                 kfree(bo_va);
502                 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
503                         last_pfn, rdev->vm_manager.max_pfn);
504                 return -EINVAL;
505         }
506
507         mutex_lock(&vm->mutex);
508         if (last_pfn > vm->last_pfn) {
509                 /* release mutex and lock in right order */
510                 mutex_unlock(&vm->mutex);
511                 mutex_lock(&rdev->vm_manager.lock);
512                 mutex_lock(&vm->mutex);
513                 /* and check again */
514                 if (last_pfn > vm->last_pfn) {
515                         /* grow va space 32M by 32M */
516                         unsigned align = ((32 << 20) >> 12) - 1;
517                         radeon_vm_unbind_locked(rdev, vm);
518                         vm->last_pfn = (last_pfn + align) & ~align;
519                 }
520                 mutex_unlock(&rdev->vm_manager.lock);
521         }
522         head = &vm->va;
523         last_offset = 0;
524         list_for_each_entry(tmp, &vm->va, vm_list) {
525                 if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
526                         /* bo can be added before this one */
527                         break;
528                 }
529                 if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
530                         /* bo and tmp overlap, invalid offset */
531                         dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
532                                 bo, (unsigned)bo_va->soffset, tmp->bo,
533                                 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
534                         kfree(bo_va);
535                         mutex_unlock(&vm->mutex);
536                         return -EINVAL;
537                 }
538                 last_offset = tmp->eoffset;
539                 head = &tmp->vm_list;
540         }
541         list_add(&bo_va->vm_list, head);
542         list_add_tail(&bo_va->bo_list, &bo->va);
543         mutex_unlock(&vm->mutex);
544         return 0;
545 }
546
547 static u64 radeon_vm_get_addr(struct radeon_device *rdev,
548                               struct ttm_mem_reg *mem,
549                               unsigned pfn)
550 {
551         u64 addr = 0;
552
553         switch (mem->mem_type) {
554         case TTM_PL_VRAM:
555                 addr = (mem->start << PAGE_SHIFT);
556                 addr += pfn * RADEON_GPU_PAGE_SIZE;
557                 addr += rdev->vm_manager.vram_base_offset;
558                 break;
559         case TTM_PL_TT:
560                 /* offset inside page table */
561                 addr = mem->start << PAGE_SHIFT;
562                 addr += pfn * RADEON_GPU_PAGE_SIZE;
563                 addr = addr >> PAGE_SHIFT;
564                 /* page table offset */
565                 addr = rdev->gart.pages_addr[addr];
566                 /* in case cpu page size != gpu page size*/
567                 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
568                 break;
569         default:
570                 break;
571         }
572         return addr;
573 }
574
575 /* object have to be reserved & global and local mutex must be locked */
576 int radeon_vm_bo_update_pte(struct radeon_device *rdev,
577                             struct radeon_vm *vm,
578                             struct radeon_bo *bo,
579                             struct ttm_mem_reg *mem)
580 {
581         struct radeon_bo_va *bo_va;
582         unsigned ngpu_pages, i;
583         uint64_t addr = 0, pfn;
584         uint32_t flags;
585
586         /* nothing to do if vm isn't bound */
587         if (vm->id == -1)
588                 return 0;
589
590         bo_va = radeon_bo_va(bo, vm);
591         if (bo_va == NULL) {
592                 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
593                 return -EINVAL;
594         }
595
596         if (bo_va->valid)
597                 return 0;
598
599         ngpu_pages = radeon_bo_ngpu_pages(bo);
600         bo_va->flags &= ~RADEON_VM_PAGE_VALID;
601         bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
602         if (mem) {
603                 if (mem->mem_type != TTM_PL_SYSTEM) {
604                         bo_va->flags |= RADEON_VM_PAGE_VALID;
605                         bo_va->valid = true;
606                 }
607                 if (mem->mem_type == TTM_PL_TT) {
608                         bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
609                 }
610         }
611         pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
612         flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
613         for (i = 0, addr = 0; i < ngpu_pages; i++) {
614                 if (mem && bo_va->valid) {
615                         addr = radeon_vm_get_addr(rdev, mem, i);
616                 }
617                 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
618         }
619         rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
620         return 0;
621 }
622
623 /* object have to be reserved */
624 int radeon_vm_bo_rmv(struct radeon_device *rdev,
625                      struct radeon_vm *vm,
626                      struct radeon_bo *bo)
627 {
628         struct radeon_bo_va *bo_va;
629
630         bo_va = radeon_bo_va(bo, vm);
631         if (bo_va == NULL)
632                 return 0;
633
634         mutex_lock(&rdev->vm_manager.lock);
635         mutex_lock(&vm->mutex);
636         radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
637         mutex_unlock(&rdev->vm_manager.lock);
638         list_del(&bo_va->vm_list);
639         mutex_unlock(&vm->mutex);
640         list_del(&bo_va->bo_list);
641
642         kfree(bo_va);
643         return 0;
644 }
645
646 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
647                              struct radeon_bo *bo)
648 {
649         struct radeon_bo_va *bo_va;
650
651         BUG_ON(!atomic_read(&bo->tbo.reserved));
652         list_for_each_entry(bo_va, &bo->va, bo_list) {
653                 bo_va->valid = false;
654         }
655 }
656
657 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
658 {
659         int r;
660
661         vm->id = -1;
662         vm->fence = NULL;
663         mutex_init(&vm->mutex);
664         INIT_LIST_HEAD(&vm->list);
665         INIT_LIST_HEAD(&vm->va);
666         vm->last_pfn = 0;
667         /* map the ib pool buffer at 0 in virtual address space, set
668          * read only
669          */
670         r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
671                              RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
672         return r;
673 }
674
675 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
676 {
677         struct radeon_bo_va *bo_va, *tmp;
678         int r;
679
680         mutex_lock(&rdev->vm_manager.lock);
681         mutex_lock(&vm->mutex);
682         radeon_vm_unbind_locked(rdev, vm);
683         mutex_unlock(&rdev->vm_manager.lock);
684
685         /* remove all bo */
686         r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
687         if (!r) {
688                 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
689                 list_del_init(&bo_va->bo_list);
690                 list_del_init(&bo_va->vm_list);
691                 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
692                 kfree(bo_va);
693         }
694         if (!list_empty(&vm->va)) {
695                 dev_err(rdev->dev, "still active bo inside vm\n");
696         }
697         list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
698                 list_del_init(&bo_va->vm_list);
699                 r = radeon_bo_reserve(bo_va->bo, false);
700                 if (!r) {
701                         list_del_init(&bo_va->bo_list);
702                         radeon_bo_unreserve(bo_va->bo);
703                         kfree(bo_va);
704                 }
705         }
706         mutex_unlock(&vm->mutex);
707 }