Commit | Line | Data |
---|---|---|
2280ab57 CK |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/radeon_drm.h> | |
30 | #include "radeon.h" | |
31 | #include "radeon_trace.h" | |
32 | ||
33 | /* | |
34 | * GPUVM | |
35 | * GPUVM is similar to the legacy gart on older asics, however | |
36 | * rather than there being a single global gart table | |
37 | * for the entire GPU, there are multiple VM page tables active | |
38 | * at any given time. The VM page tables can contain a mix | |
39 | * vram pages and system memory pages and system memory pages | |
40 | * can be mapped as snooped (cached system pages) or unsnooped | |
41 | * (uncached system pages). | |
42 | * Each VM has an ID associated with it and there is a page table | |
43 | * associated with each VMID. When execting a command buffer, | |
44 | * the kernel tells the the ring what VMID to use for that command | |
45 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
46 | * The userspace drivers maintain their own address space and the kernel | |
47 | * sets up their pages tables accordingly when they submit their | |
48 | * command buffers and a VMID is assigned. | |
49 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
50 | * SI supports 16. | |
51 | */ | |
52 | ||
53 | /** | |
54 | * radeon_vm_num_pde - return the number of page directory entries | |
55 | * | |
56 | * @rdev: radeon_device pointer | |
57 | * | |
58 | * Calculate the number of page directory entries (cayman+). | |
59 | */ | |
60 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) | |
61 | { | |
4510fb98 | 62 | return rdev->vm_manager.max_pfn >> radeon_vm_block_size; |
2280ab57 CK |
63 | } |
64 | ||
65 | /** | |
66 | * radeon_vm_directory_size - returns the size of the page directory in bytes | |
67 | * | |
68 | * @rdev: radeon_device pointer | |
69 | * | |
70 | * Calculate the size of the page directory in bytes (cayman+). | |
71 | */ | |
72 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) | |
73 | { | |
74 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); | |
75 | } | |
76 | ||
77 | /** | |
78 | * radeon_vm_manager_init - init the vm manager | |
79 | * | |
80 | * @rdev: radeon_device pointer | |
81 | * | |
82 | * Init the vm manager (cayman+). | |
83 | * Returns 0 for success, error for failure. | |
84 | */ | |
85 | int radeon_vm_manager_init(struct radeon_device *rdev) | |
86 | { | |
2280ab57 | 87 | int r; |
2280ab57 CK |
88 | |
89 | if (!rdev->vm_manager.enabled) { | |
2280ab57 CK |
90 | r = radeon_asic_vm_init(rdev); |
91 | if (r) | |
92 | return r; | |
93 | ||
94 | rdev->vm_manager.enabled = true; | |
2280ab57 CK |
95 | } |
96 | return 0; | |
97 | } | |
98 | ||
2280ab57 CK |
99 | /** |
100 | * radeon_vm_manager_fini - tear down the vm manager | |
101 | * | |
102 | * @rdev: radeon_device pointer | |
103 | * | |
104 | * Tear down the VM manager (cayman+). | |
105 | */ | |
106 | void radeon_vm_manager_fini(struct radeon_device *rdev) | |
107 | { | |
2280ab57 CK |
108 | int i; |
109 | ||
110 | if (!rdev->vm_manager.enabled) | |
111 | return; | |
112 | ||
6d2f2944 | 113 | for (i = 0; i < RADEON_NUM_VM; ++i) |
2280ab57 | 114 | radeon_fence_unref(&rdev->vm_manager.active[i]); |
2280ab57 | 115 | radeon_asic_vm_fini(rdev); |
2280ab57 CK |
116 | rdev->vm_manager.enabled = false; |
117 | } | |
118 | ||
119 | /** | |
6d2f2944 | 120 | * radeon_vm_get_bos - add the vm BOs to a validation list |
2280ab57 | 121 | * |
6d2f2944 CK |
122 | * @vm: vm providing the BOs |
123 | * @head: head of validation list | |
2280ab57 | 124 | * |
6d2f2944 CK |
125 | * Add the page directory to the list of BOs to |
126 | * validate for command submission (cayman+). | |
2280ab57 | 127 | */ |
df0af440 CK |
128 | struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, |
129 | struct radeon_vm *vm, | |
130 | struct list_head *head) | |
2280ab57 | 131 | { |
df0af440 | 132 | struct radeon_cs_reloc *list; |
7d95f6cc | 133 | unsigned i, idx; |
2280ab57 | 134 | |
2f93dc32 | 135 | list = kmalloc_array(vm->max_pde_used + 2, |
7d95f6cc | 136 | sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
6d2f2944 CK |
137 | if (!list) |
138 | return NULL; | |
2280ab57 | 139 | |
6d2f2944 | 140 | /* add the vm page table to the list */ |
df0af440 CK |
141 | list[0].gobj = NULL; |
142 | list[0].robj = vm->page_directory; | |
ce6758c8 CK |
143 | list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
144 | list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | |
6d2f2944 | 145 | list[0].tv.bo = &vm->page_directory->tbo; |
df0af440 CK |
146 | list[0].tiling_flags = 0; |
147 | list[0].handle = 0; | |
6d2f2944 | 148 | list_add(&list[0].tv.head, head); |
2280ab57 | 149 | |
6d2f2944 CK |
150 | for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { |
151 | if (!vm->page_tables[i].bo) | |
152 | continue; | |
2280ab57 | 153 | |
df0af440 CK |
154 | list[idx].gobj = NULL; |
155 | list[idx].robj = vm->page_tables[i].bo; | |
ce6758c8 CK |
156 | list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
157 | list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | |
df0af440 CK |
158 | list[idx].tv.bo = &list[idx].robj->tbo; |
159 | list[idx].tiling_flags = 0; | |
160 | list[idx].handle = 0; | |
6d2f2944 | 161 | list_add(&list[idx++].tv.head, head); |
2280ab57 CK |
162 | } |
163 | ||
6d2f2944 | 164 | return list; |
2280ab57 CK |
165 | } |
166 | ||
167 | /** | |
168 | * radeon_vm_grab_id - allocate the next free VMID | |
169 | * | |
170 | * @rdev: radeon_device pointer | |
171 | * @vm: vm to allocate id for | |
172 | * @ring: ring we want to submit job to | |
173 | * | |
174 | * Allocate an id for the vm (cayman+). | |
175 | * Returns the fence we need to sync to (if any). | |
176 | * | |
177 | * Global and local mutex must be locked! | |
178 | */ | |
179 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | |
180 | struct radeon_vm *vm, int ring) | |
181 | { | |
182 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; | |
183 | unsigned choices[2] = {}; | |
184 | unsigned i; | |
185 | ||
186 | /* check if the id is still valid */ | |
187 | if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) | |
188 | return NULL; | |
189 | ||
190 | /* we definately need to flush */ | |
191 | radeon_fence_unref(&vm->last_flush); | |
192 | ||
193 | /* skip over VMID 0, since it is the system VM */ | |
194 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { | |
195 | struct radeon_fence *fence = rdev->vm_manager.active[i]; | |
196 | ||
197 | if (fence == NULL) { | |
198 | /* found a free one */ | |
199 | vm->id = i; | |
200 | trace_radeon_vm_grab_id(vm->id, ring); | |
201 | return NULL; | |
202 | } | |
203 | ||
204 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { | |
205 | best[fence->ring] = fence; | |
206 | choices[fence->ring == ring ? 0 : 1] = i; | |
207 | } | |
208 | } | |
209 | ||
210 | for (i = 0; i < 2; ++i) { | |
211 | if (choices[i]) { | |
212 | vm->id = choices[i]; | |
213 | trace_radeon_vm_grab_id(vm->id, ring); | |
214 | return rdev->vm_manager.active[choices[i]]; | |
215 | } | |
216 | } | |
217 | ||
218 | /* should never happen */ | |
219 | BUG(); | |
220 | return NULL; | |
221 | } | |
222 | ||
fa688343 CK |
223 | /** |
224 | * radeon_vm_flush - hardware flush the vm | |
225 | * | |
226 | * @rdev: radeon_device pointer | |
227 | * @vm: vm we want to flush | |
228 | * @ring: ring to use for flush | |
229 | * | |
230 | * Flush the vm (cayman+). | |
231 | * | |
232 | * Global and local mutex must be locked! | |
233 | */ | |
234 | void radeon_vm_flush(struct radeon_device *rdev, | |
235 | struct radeon_vm *vm, | |
236 | int ring) | |
237 | { | |
6d2f2944 CK |
238 | uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); |
239 | ||
fa688343 CK |
240 | /* if we can't remember our last VM flush then flush now! */ |
241 | /* XXX figure out why we have to flush all the time */ | |
6d2f2944 CK |
242 | if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) { |
243 | vm->pd_gpu_addr = pd_addr; | |
fa688343 | 244 | radeon_ring_vm_flush(rdev, ring, vm); |
6d2f2944 | 245 | } |
fa688343 CK |
246 | } |
247 | ||
2280ab57 CK |
248 | /** |
249 | * radeon_vm_fence - remember fence for vm | |
250 | * | |
251 | * @rdev: radeon_device pointer | |
252 | * @vm: vm we want to fence | |
253 | * @fence: fence to remember | |
254 | * | |
255 | * Fence the vm (cayman+). | |
256 | * Set the fence used to protect page table and id. | |
257 | * | |
258 | * Global and local mutex must be locked! | |
259 | */ | |
260 | void radeon_vm_fence(struct radeon_device *rdev, | |
261 | struct radeon_vm *vm, | |
262 | struct radeon_fence *fence) | |
263 | { | |
2280ab57 CK |
264 | radeon_fence_unref(&vm->fence); |
265 | vm->fence = radeon_fence_ref(fence); | |
266 | ||
fa688343 CK |
267 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); |
268 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); | |
269 | ||
2280ab57 CK |
270 | radeon_fence_unref(&vm->last_id_use); |
271 | vm->last_id_use = radeon_fence_ref(fence); | |
fa688343 CK |
272 | |
273 | /* we just flushed the VM, remember that */ | |
274 | if (!vm->last_flush) | |
275 | vm->last_flush = radeon_fence_ref(fence); | |
2280ab57 CK |
276 | } |
277 | ||
278 | /** | |
279 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo | |
280 | * | |
281 | * @vm: requested vm | |
282 | * @bo: requested buffer object | |
283 | * | |
284 | * Find @bo inside the requested vm (cayman+). | |
285 | * Search inside the @bos vm list for the requested vm | |
286 | * Returns the found bo_va or NULL if none is found | |
287 | * | |
288 | * Object has to be reserved! | |
289 | */ | |
290 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, | |
291 | struct radeon_bo *bo) | |
292 | { | |
293 | struct radeon_bo_va *bo_va; | |
294 | ||
295 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
296 | if (bo_va->vm == vm) { | |
297 | return bo_va; | |
298 | } | |
299 | } | |
300 | return NULL; | |
301 | } | |
302 | ||
303 | /** | |
304 | * radeon_vm_bo_add - add a bo to a specific vm | |
305 | * | |
306 | * @rdev: radeon_device pointer | |
307 | * @vm: requested vm | |
308 | * @bo: radeon buffer object | |
309 | * | |
310 | * Add @bo into the requested vm (cayman+). | |
311 | * Add @bo to the list of bos associated with the vm | |
312 | * Returns newly added bo_va or NULL for failure | |
313 | * | |
314 | * Object has to be reserved! | |
315 | */ | |
316 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, | |
317 | struct radeon_vm *vm, | |
318 | struct radeon_bo *bo) | |
319 | { | |
320 | struct radeon_bo_va *bo_va; | |
321 | ||
322 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | |
323 | if (bo_va == NULL) { | |
324 | return NULL; | |
325 | } | |
326 | bo_va->vm = vm; | |
327 | bo_va->bo = bo; | |
328 | bo_va->soffset = 0; | |
329 | bo_va->eoffset = 0; | |
330 | bo_va->flags = 0; | |
331 | bo_va->valid = false; | |
332 | bo_va->ref_count = 1; | |
333 | INIT_LIST_HEAD(&bo_va->bo_list); | |
334 | INIT_LIST_HEAD(&bo_va->vm_list); | |
036bf46a | 335 | INIT_LIST_HEAD(&bo_va->vm_status); |
2280ab57 CK |
336 | |
337 | mutex_lock(&vm->mutex); | |
338 | list_add(&bo_va->vm_list, &vm->va); | |
339 | list_add_tail(&bo_va->bo_list, &bo->va); | |
340 | mutex_unlock(&vm->mutex); | |
341 | ||
342 | return bo_va; | |
343 | } | |
344 | ||
6d2f2944 CK |
345 | /** |
346 | * radeon_vm_clear_bo - initially clear the page dir/table | |
347 | * | |
348 | * @rdev: radeon_device pointer | |
349 | * @bo: bo to clear | |
350 | */ | |
351 | static int radeon_vm_clear_bo(struct radeon_device *rdev, | |
352 | struct radeon_bo *bo) | |
353 | { | |
354 | struct ttm_validate_buffer tv; | |
355 | struct ww_acquire_ctx ticket; | |
356 | struct list_head head; | |
357 | struct radeon_ib ib; | |
358 | unsigned entries; | |
359 | uint64_t addr; | |
360 | int r; | |
361 | ||
362 | memset(&tv, 0, sizeof(tv)); | |
363 | tv.bo = &bo->tbo; | |
364 | ||
365 | INIT_LIST_HEAD(&head); | |
366 | list_add(&tv.head, &head); | |
367 | ||
368 | r = ttm_eu_reserve_buffers(&ticket, &head); | |
369 | if (r) | |
370 | return r; | |
371 | ||
372 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
373 | if (r) | |
374 | goto error; | |
375 | ||
376 | addr = radeon_bo_gpu_offset(bo); | |
377 | entries = radeon_bo_size(bo) / 8; | |
378 | ||
379 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, | |
380 | NULL, entries * 2 + 64); | |
381 | if (r) | |
382 | goto error; | |
383 | ||
384 | ib.length_dw = 0; | |
385 | ||
386 | radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); | |
387 | ||
388 | r = radeon_ib_schedule(rdev, &ib, NULL); | |
389 | if (r) | |
390 | goto error; | |
391 | ||
392 | ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); | |
393 | radeon_ib_free(rdev, &ib); | |
394 | ||
395 | return 0; | |
396 | ||
397 | error: | |
398 | ttm_eu_backoff_reservation(&ticket, &head); | |
399 | return r; | |
400 | } | |
401 | ||
2280ab57 CK |
402 | /** |
403 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm | |
404 | * | |
405 | * @rdev: radeon_device pointer | |
406 | * @bo_va: bo_va to store the address | |
407 | * @soffset: requested offset of the buffer in the VM address space | |
408 | * @flags: attributes of pages (read/write/valid/etc.) | |
409 | * | |
410 | * Set offset of @bo_va (cayman+). | |
411 | * Validate and set the offset requested within the vm address space. | |
412 | * Returns 0 for success, error for failure. | |
413 | * | |
414 | * Object has to be reserved! | |
415 | */ | |
416 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |
417 | struct radeon_bo_va *bo_va, | |
418 | uint64_t soffset, | |
419 | uint32_t flags) | |
420 | { | |
421 | uint64_t size = radeon_bo_size(bo_va->bo); | |
422 | uint64_t eoffset, last_offset = 0; | |
423 | struct radeon_vm *vm = bo_va->vm; | |
424 | struct radeon_bo_va *tmp; | |
425 | struct list_head *head; | |
6d2f2944 CK |
426 | unsigned last_pfn, pt_idx; |
427 | int r; | |
2280ab57 CK |
428 | |
429 | if (soffset) { | |
430 | /* make sure object fit at this offset */ | |
431 | eoffset = soffset + size; | |
432 | if (soffset >= eoffset) { | |
433 | return -EINVAL; | |
434 | } | |
435 | ||
436 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; | |
437 | if (last_pfn > rdev->vm_manager.max_pfn) { | |
438 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", | |
439 | last_pfn, rdev->vm_manager.max_pfn); | |
440 | return -EINVAL; | |
441 | } | |
442 | ||
443 | } else { | |
444 | eoffset = last_pfn = 0; | |
445 | } | |
446 | ||
447 | mutex_lock(&vm->mutex); | |
448 | head = &vm->va; | |
449 | last_offset = 0; | |
450 | list_for_each_entry(tmp, &vm->va, vm_list) { | |
451 | if (bo_va == tmp) { | |
452 | /* skip over currently modified bo */ | |
453 | continue; | |
454 | } | |
455 | ||
456 | if (soffset >= last_offset && eoffset <= tmp->soffset) { | |
457 | /* bo can be added before this one */ | |
458 | break; | |
459 | } | |
460 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { | |
461 | /* bo and tmp overlap, invalid offset */ | |
462 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", | |
463 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, | |
464 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); | |
465 | mutex_unlock(&vm->mutex); | |
466 | return -EINVAL; | |
467 | } | |
468 | last_offset = tmp->eoffset; | |
469 | head = &tmp->vm_list; | |
470 | } | |
471 | ||
036bf46a CK |
472 | if (bo_va->soffset) { |
473 | /* add a clone of the bo_va to clear the old address */ | |
474 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | |
5b753275 CK |
475 | if (!tmp) { |
476 | mutex_unlock(&vm->mutex); | |
477 | return -ENOMEM; | |
478 | } | |
036bf46a CK |
479 | tmp->soffset = bo_va->soffset; |
480 | tmp->eoffset = bo_va->eoffset; | |
481 | tmp->vm = vm; | |
482 | list_add(&tmp->vm_status, &vm->freed); | |
483 | } | |
484 | ||
2280ab57 CK |
485 | bo_va->soffset = soffset; |
486 | bo_va->eoffset = eoffset; | |
487 | bo_va->flags = flags; | |
488 | bo_va->valid = false; | |
489 | list_move(&bo_va->vm_list, head); | |
490 | ||
4510fb98 CK |
491 | soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; |
492 | eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; | |
493 | ||
494 | BUG_ON(eoffset >= radeon_vm_num_pdes(rdev)); | |
6d2f2944 CK |
495 | |
496 | if (eoffset > vm->max_pde_used) | |
497 | vm->max_pde_used = eoffset; | |
498 | ||
499 | radeon_bo_unreserve(bo_va->bo); | |
500 | ||
501 | /* walk over the address space and allocate the page tables */ | |
502 | for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) { | |
503 | struct radeon_bo *pt; | |
504 | ||
505 | if (vm->page_tables[pt_idx].bo) | |
506 | continue; | |
507 | ||
508 | /* drop mutex to allocate and clear page table */ | |
509 | mutex_unlock(&vm->mutex); | |
510 | ||
511 | r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, | |
7dae77f8 | 512 | RADEON_GPU_PAGE_SIZE, true, |
6d2f2944 CK |
513 | RADEON_GEM_DOMAIN_VRAM, NULL, &pt); |
514 | if (r) | |
515 | return r; | |
516 | ||
517 | r = radeon_vm_clear_bo(rdev, pt); | |
518 | if (r) { | |
519 | radeon_bo_unref(&pt); | |
520 | radeon_bo_reserve(bo_va->bo, false); | |
521 | return r; | |
522 | } | |
523 | ||
524 | /* aquire mutex again */ | |
525 | mutex_lock(&vm->mutex); | |
526 | if (vm->page_tables[pt_idx].bo) { | |
527 | /* someone else allocated the pt in the meantime */ | |
528 | mutex_unlock(&vm->mutex); | |
529 | radeon_bo_unref(&pt); | |
530 | mutex_lock(&vm->mutex); | |
531 | continue; | |
532 | } | |
533 | ||
534 | vm->page_tables[pt_idx].addr = 0; | |
535 | vm->page_tables[pt_idx].bo = pt; | |
536 | } | |
537 | ||
2280ab57 | 538 | mutex_unlock(&vm->mutex); |
6d2f2944 | 539 | return radeon_bo_reserve(bo_va->bo, false); |
2280ab57 CK |
540 | } |
541 | ||
542 | /** | |
543 | * radeon_vm_map_gart - get the physical address of a gart page | |
544 | * | |
545 | * @rdev: radeon_device pointer | |
546 | * @addr: the unmapped addr | |
547 | * | |
548 | * Look up the physical address of the page that the pte resolves | |
549 | * to (cayman+). | |
550 | * Returns the physical address of the page. | |
551 | */ | |
552 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | |
553 | { | |
554 | uint64_t result; | |
555 | ||
556 | /* page table offset */ | |
557 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; | |
558 | ||
559 | /* in case cpu page size != gpu page size*/ | |
560 | result |= addr & (~PAGE_MASK); | |
561 | ||
562 | return result; | |
563 | } | |
564 | ||
565 | /** | |
566 | * radeon_vm_page_flags - translate page flags to what the hw uses | |
567 | * | |
568 | * @flags: flags comming from userspace | |
569 | * | |
570 | * Translate the flags the userspace ABI uses to hw flags. | |
571 | */ | |
572 | static uint32_t radeon_vm_page_flags(uint32_t flags) | |
573 | { | |
574 | uint32_t hw_flags = 0; | |
575 | hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; | |
576 | hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; | |
577 | hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; | |
578 | if (flags & RADEON_VM_PAGE_SYSTEM) { | |
579 | hw_flags |= R600_PTE_SYSTEM; | |
580 | hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; | |
581 | } | |
582 | return hw_flags; | |
583 | } | |
584 | ||
585 | /** | |
586 | * radeon_vm_update_pdes - make sure that page directory is valid | |
587 | * | |
588 | * @rdev: radeon_device pointer | |
589 | * @vm: requested vm | |
590 | * @start: start of GPU address range | |
591 | * @end: end of GPU address range | |
592 | * | |
593 | * Allocates new page tables if necessary | |
594 | * and updates the page directory (cayman+). | |
595 | * Returns 0 for success, error for failure. | |
596 | * | |
597 | * Global and local mutex must be locked! | |
598 | */ | |
6d2f2944 CK |
599 | int radeon_vm_update_page_directory(struct radeon_device *rdev, |
600 | struct radeon_vm *vm) | |
2280ab57 | 601 | { |
37903b5e CK |
602 | struct radeon_bo *pd = vm->page_directory; |
603 | uint64_t pd_addr = radeon_bo_gpu_offset(pd); | |
4510fb98 | 604 | uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
2280ab57 | 605 | uint64_t last_pde = ~0, last_pt = ~0; |
6d2f2944 CK |
606 | unsigned count = 0, pt_idx, ndw; |
607 | struct radeon_ib ib; | |
2280ab57 CK |
608 | int r; |
609 | ||
6d2f2944 CK |
610 | /* padding, etc. */ |
611 | ndw = 64; | |
612 | ||
613 | /* assume the worst case */ | |
4906f689 | 614 | ndw += vm->max_pde_used * 16; |
6d2f2944 CK |
615 | |
616 | /* update too big for an IB */ | |
617 | if (ndw > 0xfffff) | |
618 | return -ENOMEM; | |
619 | ||
620 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); | |
621 | if (r) | |
622 | return r; | |
623 | ib.length_dw = 0; | |
2280ab57 CK |
624 | |
625 | /* walk over the address space and update the page directory */ | |
6d2f2944 CK |
626 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
627 | struct radeon_bo *bo = vm->page_tables[pt_idx].bo; | |
2280ab57 CK |
628 | uint64_t pde, pt; |
629 | ||
6d2f2944 | 630 | if (bo == NULL) |
2280ab57 CK |
631 | continue; |
632 | ||
6d2f2944 CK |
633 | pt = radeon_bo_gpu_offset(bo); |
634 | if (vm->page_tables[pt_idx].addr == pt) | |
635 | continue; | |
636 | vm->page_tables[pt_idx].addr = pt; | |
2280ab57 | 637 | |
6d2f2944 | 638 | pde = pd_addr + pt_idx * 8; |
2280ab57 CK |
639 | if (((last_pde + 8 * count) != pde) || |
640 | ((last_pt + incr * count) != pt)) { | |
641 | ||
642 | if (count) { | |
6d2f2944 | 643 | radeon_asic_vm_set_page(rdev, &ib, last_pde, |
2280ab57 CK |
644 | last_pt, count, incr, |
645 | R600_PTE_VALID); | |
2280ab57 CK |
646 | } |
647 | ||
648 | count = 1; | |
649 | last_pde = pde; | |
650 | last_pt = pt; | |
651 | } else { | |
652 | ++count; | |
653 | } | |
654 | } | |
655 | ||
6d2f2944 CK |
656 | if (count) |
657 | radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, | |
2280ab57 CK |
658 | incr, R600_PTE_VALID); |
659 | ||
6d2f2944 | 660 | if (ib.length_dw != 0) { |
37903b5e | 661 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); |
6d2f2944 CK |
662 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
663 | r = radeon_ib_schedule(rdev, &ib, NULL); | |
664 | if (r) { | |
665 | radeon_ib_free(rdev, &ib); | |
666 | return r; | |
667 | } | |
668 | radeon_fence_unref(&vm->fence); | |
669 | vm->fence = radeon_fence_ref(ib.fence); | |
670 | radeon_fence_unref(&vm->last_flush); | |
2280ab57 | 671 | } |
6d2f2944 | 672 | radeon_ib_free(rdev, &ib); |
2280ab57 CK |
673 | |
674 | return 0; | |
675 | } | |
676 | ||
ec3dbbcb CK |
677 | /** |
678 | * radeon_vm_frag_ptes - add fragment information to PTEs | |
679 | * | |
680 | * @rdev: radeon_device pointer | |
681 | * @ib: IB for the update | |
682 | * @pe_start: first PTE to handle | |
683 | * @pe_end: last PTE to handle | |
684 | * @addr: addr those PTEs should point to | |
685 | * @flags: hw mapping flags | |
686 | * | |
687 | * Global and local mutex must be locked! | |
688 | */ | |
689 | static void radeon_vm_frag_ptes(struct radeon_device *rdev, | |
690 | struct radeon_ib *ib, | |
691 | uint64_t pe_start, uint64_t pe_end, | |
692 | uint64_t addr, uint32_t flags) | |
693 | { | |
694 | /** | |
695 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
696 | * field in the PTE. When this field is set to a non-zero value, page | |
697 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
698 | * flags are considered valid for all PTEs within the fragment range | |
699 | * and corresponding mappings are assumed to be physically contiguous. | |
700 | * | |
701 | * The L1 TLB can store a single PTE for the whole fragment, | |
702 | * significantly increasing the space available for translation | |
703 | * caching. This leads to large improvements in throughput when the | |
704 | * TLB is under pressure. | |
705 | * | |
706 | * The L2 TLB distributes small and large fragments into two | |
707 | * asymmetric partitions. The large fragment cache is significantly | |
708 | * larger. Thus, we try to use large fragments wherever possible. | |
709 | * Userspace can support this by aligning virtual base address and | |
710 | * allocation size to the fragment size. | |
711 | */ | |
712 | ||
713 | /* NI is optimized for 256KB fragments, SI and newer for 64KB */ | |
714 | uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? | |
715 | R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; | |
716 | uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; | |
717 | ||
718 | uint64_t frag_start = ALIGN(pe_start, frag_align); | |
719 | uint64_t frag_end = pe_end & ~(frag_align - 1); | |
720 | ||
721 | unsigned count; | |
722 | ||
723 | /* system pages are non continuously */ | |
724 | if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) || | |
725 | (frag_start >= frag_end)) { | |
726 | ||
727 | count = (pe_end - pe_start) / 8; | |
728 | radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, | |
729 | RADEON_GPU_PAGE_SIZE, flags); | |
730 | return; | |
731 | } | |
732 | ||
733 | /* handle the 4K area at the beginning */ | |
734 | if (pe_start != frag_start) { | |
735 | count = (frag_start - pe_start) / 8; | |
736 | radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, | |
737 | RADEON_GPU_PAGE_SIZE, flags); | |
738 | addr += RADEON_GPU_PAGE_SIZE * count; | |
739 | } | |
740 | ||
741 | /* handle the area in the middle */ | |
742 | count = (frag_end - frag_start) / 8; | |
743 | radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count, | |
744 | RADEON_GPU_PAGE_SIZE, flags | frag_flags); | |
745 | ||
746 | /* handle the 4K area at the end */ | |
747 | if (frag_end != pe_end) { | |
748 | addr += RADEON_GPU_PAGE_SIZE * count; | |
749 | count = (pe_end - frag_end) / 8; | |
750 | radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count, | |
751 | RADEON_GPU_PAGE_SIZE, flags); | |
752 | } | |
753 | } | |
754 | ||
2280ab57 CK |
755 | /** |
756 | * radeon_vm_update_ptes - make sure that page tables are valid | |
757 | * | |
758 | * @rdev: radeon_device pointer | |
759 | * @vm: requested vm | |
760 | * @start: start of GPU address range | |
761 | * @end: end of GPU address range | |
762 | * @dst: destination address to map to | |
763 | * @flags: mapping flags | |
764 | * | |
765 | * Update the page tables in the range @start - @end (cayman+). | |
766 | * | |
767 | * Global and local mutex must be locked! | |
768 | */ | |
769 | static void radeon_vm_update_ptes(struct radeon_device *rdev, | |
770 | struct radeon_vm *vm, | |
771 | struct radeon_ib *ib, | |
772 | uint64_t start, uint64_t end, | |
773 | uint64_t dst, uint32_t flags) | |
774 | { | |
4510fb98 | 775 | uint64_t mask = RADEON_VM_PTE_COUNT - 1; |
2280ab57 CK |
776 | uint64_t last_pte = ~0, last_dst = ~0; |
777 | unsigned count = 0; | |
778 | uint64_t addr; | |
779 | ||
780 | start = start / RADEON_GPU_PAGE_SIZE; | |
781 | end = end / RADEON_GPU_PAGE_SIZE; | |
782 | ||
783 | /* walk over the address space and update the page tables */ | |
784 | for (addr = start; addr < end; ) { | |
4510fb98 | 785 | uint64_t pt_idx = addr >> radeon_vm_block_size; |
37903b5e | 786 | struct radeon_bo *pt = vm->page_tables[pt_idx].bo; |
2280ab57 CK |
787 | unsigned nptes; |
788 | uint64_t pte; | |
789 | ||
37903b5e CK |
790 | radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); |
791 | ||
2280ab57 CK |
792 | if ((addr & ~mask) == (end & ~mask)) |
793 | nptes = end - addr; | |
794 | else | |
795 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); | |
796 | ||
37903b5e | 797 | pte = radeon_bo_gpu_offset(pt); |
2280ab57 CK |
798 | pte += (addr & mask) * 8; |
799 | ||
800 | if ((last_pte + 8 * count) != pte) { | |
801 | ||
802 | if (count) { | |
ec3dbbcb CK |
803 | radeon_vm_frag_ptes(rdev, ib, last_pte, |
804 | last_pte + 8 * count, | |
805 | last_dst, flags); | |
2280ab57 CK |
806 | } |
807 | ||
808 | count = nptes; | |
809 | last_pte = pte; | |
810 | last_dst = dst; | |
811 | } else { | |
812 | count += nptes; | |
813 | } | |
814 | ||
815 | addr += nptes; | |
816 | dst += nptes * RADEON_GPU_PAGE_SIZE; | |
817 | } | |
818 | ||
819 | if (count) { | |
ec3dbbcb CK |
820 | radeon_vm_frag_ptes(rdev, ib, last_pte, |
821 | last_pte + 8 * count, | |
822 | last_dst, flags); | |
2280ab57 CK |
823 | } |
824 | } | |
825 | ||
826 | /** | |
827 | * radeon_vm_bo_update - map a bo into the vm page table | |
828 | * | |
829 | * @rdev: radeon_device pointer | |
830 | * @vm: requested vm | |
831 | * @bo: radeon buffer object | |
832 | * @mem: ttm mem | |
833 | * | |
834 | * Fill in the page table entries for @bo (cayman+). | |
835 | * Returns 0 for success, -EINVAL for failure. | |
836 | * | |
529364e0 | 837 | * Object have to be reserved and mutex must be locked! |
2280ab57 CK |
838 | */ |
839 | int radeon_vm_bo_update(struct radeon_device *rdev, | |
036bf46a | 840 | struct radeon_bo_va *bo_va, |
2280ab57 CK |
841 | struct ttm_mem_reg *mem) |
842 | { | |
036bf46a | 843 | struct radeon_vm *vm = bo_va->vm; |
2280ab57 | 844 | struct radeon_ib ib; |
6d2f2944 | 845 | unsigned nptes, ndw; |
2280ab57 CK |
846 | uint64_t addr; |
847 | int r; | |
848 | ||
2280ab57 CK |
849 | |
850 | if (!bo_va->soffset) { | |
851 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", | |
036bf46a | 852 | bo_va->bo, vm); |
2280ab57 CK |
853 | return -EINVAL; |
854 | } | |
855 | ||
856 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) | |
857 | return 0; | |
858 | ||
859 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; | |
860 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; | |
861 | if (mem) { | |
862 | addr = mem->start << PAGE_SHIFT; | |
863 | if (mem->mem_type != TTM_PL_SYSTEM) { | |
864 | bo_va->flags |= RADEON_VM_PAGE_VALID; | |
865 | bo_va->valid = true; | |
866 | } | |
867 | if (mem->mem_type == TTM_PL_TT) { | |
868 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; | |
869 | } else { | |
870 | addr += rdev->vm_manager.vram_base_offset; | |
871 | } | |
872 | } else { | |
873 | addr = 0; | |
874 | bo_va->valid = false; | |
875 | } | |
876 | ||
877 | trace_radeon_vm_bo_update(bo_va); | |
878 | ||
036bf46a | 879 | nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE; |
2280ab57 | 880 | |
2280ab57 CK |
881 | /* padding, etc. */ |
882 | ndw = 64; | |
883 | ||
4510fb98 | 884 | if (radeon_vm_block_size > 11) |
2280ab57 CK |
885 | /* reserve space for one header for every 2k dwords */ |
886 | ndw += (nptes >> 11) * 4; | |
887 | else | |
888 | /* reserve space for one header for | |
889 | every (1 << BLOCK_SIZE) entries */ | |
4510fb98 | 890 | ndw += (nptes >> radeon_vm_block_size) * 4; |
2280ab57 CK |
891 | |
892 | /* reserve space for pte addresses */ | |
893 | ndw += nptes * 2; | |
894 | ||
2280ab57 CK |
895 | /* update too big for an IB */ |
896 | if (ndw > 0xfffff) | |
897 | return -ENOMEM; | |
898 | ||
899 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); | |
900 | if (r) | |
901 | return r; | |
902 | ib.length_dw = 0; | |
903 | ||
2280ab57 CK |
904 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, |
905 | addr, radeon_vm_page_flags(bo_va->flags)); | |
906 | ||
907 | radeon_semaphore_sync_to(ib.semaphore, vm->fence); | |
908 | r = radeon_ib_schedule(rdev, &ib, NULL); | |
909 | if (r) { | |
910 | radeon_ib_free(rdev, &ib); | |
911 | return r; | |
912 | } | |
913 | radeon_fence_unref(&vm->fence); | |
914 | vm->fence = radeon_fence_ref(ib.fence); | |
915 | radeon_ib_free(rdev, &ib); | |
916 | radeon_fence_unref(&vm->last_flush); | |
917 | ||
918 | return 0; | |
919 | } | |
920 | ||
036bf46a CK |
921 | /** |
922 | * radeon_vm_clear_freed - clear freed BOs in the PT | |
923 | * | |
924 | * @rdev: radeon_device pointer | |
925 | * @vm: requested vm | |
926 | * | |
927 | * Make sure all freed BOs are cleared in the PT. | |
928 | * Returns 0 for success. | |
929 | * | |
930 | * PTs have to be reserved and mutex must be locked! | |
931 | */ | |
932 | int radeon_vm_clear_freed(struct radeon_device *rdev, | |
933 | struct radeon_vm *vm) | |
934 | { | |
935 | struct radeon_bo_va *bo_va, *tmp; | |
936 | int r; | |
937 | ||
938 | list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { | |
939 | list_del(&bo_va->vm_status); | |
940 | r = radeon_vm_bo_update(rdev, bo_va, NULL); | |
941 | kfree(bo_va); | |
942 | if (r) | |
943 | return r; | |
944 | } | |
945 | return 0; | |
946 | ||
947 | } | |
948 | ||
2280ab57 CK |
949 | /** |
950 | * radeon_vm_bo_rmv - remove a bo to a specific vm | |
951 | * | |
952 | * @rdev: radeon_device pointer | |
953 | * @bo_va: requested bo_va | |
954 | * | |
955 | * Remove @bo_va->bo from the requested vm (cayman+). | |
2280ab57 CK |
956 | * |
957 | * Object have to be reserved! | |
958 | */ | |
036bf46a CK |
959 | void radeon_vm_bo_rmv(struct radeon_device *rdev, |
960 | struct radeon_bo_va *bo_va) | |
2280ab57 | 961 | { |
036bf46a | 962 | struct radeon_vm *vm = bo_va->vm; |
2280ab57 | 963 | |
036bf46a | 964 | list_del(&bo_va->bo_list); |
529364e0 | 965 | |
036bf46a | 966 | mutex_lock(&vm->mutex); |
2280ab57 | 967 | list_del(&bo_va->vm_list); |
2280ab57 | 968 | |
036bf46a CK |
969 | if (bo_va->soffset) { |
970 | bo_va->bo = NULL; | |
971 | list_add(&bo_va->vm_status, &vm->freed); | |
972 | } else { | |
973 | kfree(bo_va); | |
974 | } | |
975 | ||
976 | mutex_unlock(&vm->mutex); | |
2280ab57 CK |
977 | } |
978 | ||
979 | /** | |
980 | * radeon_vm_bo_invalidate - mark the bo as invalid | |
981 | * | |
982 | * @rdev: radeon_device pointer | |
983 | * @vm: requested vm | |
984 | * @bo: radeon buffer object | |
985 | * | |
986 | * Mark @bo as invalid (cayman+). | |
987 | */ | |
988 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, | |
989 | struct radeon_bo *bo) | |
990 | { | |
991 | struct radeon_bo_va *bo_va; | |
992 | ||
993 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
994 | bo_va->valid = false; | |
995 | } | |
996 | } | |
997 | ||
998 | /** | |
999 | * radeon_vm_init - initialize a vm instance | |
1000 | * | |
1001 | * @rdev: radeon_device pointer | |
1002 | * @vm: requested vm | |
1003 | * | |
1004 | * Init @vm fields (cayman+). | |
1005 | */ | |
6d2f2944 | 1006 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
2280ab57 | 1007 | { |
1c89d27f CK |
1008 | const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE, |
1009 | RADEON_VM_PTE_COUNT * 8); | |
6d2f2944 CK |
1010 | unsigned pd_size, pd_entries, pts_size; |
1011 | int r; | |
1012 | ||
2280ab57 | 1013 | vm->id = 0; |
cc9e67e3 | 1014 | vm->ib_bo_va = NULL; |
2280ab57 CK |
1015 | vm->fence = NULL; |
1016 | vm->last_flush = NULL; | |
1017 | vm->last_id_use = NULL; | |
1018 | mutex_init(&vm->mutex); | |
2280ab57 | 1019 | INIT_LIST_HEAD(&vm->va); |
036bf46a | 1020 | INIT_LIST_HEAD(&vm->freed); |
6d2f2944 CK |
1021 | |
1022 | pd_size = radeon_vm_directory_size(rdev); | |
1023 | pd_entries = radeon_vm_num_pdes(rdev); | |
1024 | ||
1025 | /* allocate page table array */ | |
1026 | pts_size = pd_entries * sizeof(struct radeon_vm_pt); | |
1027 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | |
1028 | if (vm->page_tables == NULL) { | |
1029 | DRM_ERROR("Cannot allocate memory for page table array\n"); | |
1030 | return -ENOMEM; | |
1031 | } | |
1032 | ||
7dae77f8 | 1033 | r = radeon_bo_create(rdev, pd_size, align, true, |
6d2f2944 CK |
1034 | RADEON_GEM_DOMAIN_VRAM, NULL, |
1035 | &vm->page_directory); | |
1036 | if (r) | |
1037 | return r; | |
1038 | ||
1039 | r = radeon_vm_clear_bo(rdev, vm->page_directory); | |
1040 | if (r) { | |
1041 | radeon_bo_unref(&vm->page_directory); | |
1042 | vm->page_directory = NULL; | |
1043 | return r; | |
1044 | } | |
1045 | ||
1046 | return 0; | |
2280ab57 CK |
1047 | } |
1048 | ||
1049 | /** | |
1050 | * radeon_vm_fini - tear down a vm instance | |
1051 | * | |
1052 | * @rdev: radeon_device pointer | |
1053 | * @vm: requested vm | |
1054 | * | |
1055 | * Tear down @vm (cayman+). | |
1056 | * Unbind the VM and remove all bos from the vm bo list | |
1057 | */ | |
1058 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |
1059 | { | |
1060 | struct radeon_bo_va *bo_va, *tmp; | |
6d2f2944 | 1061 | int i, r; |
2280ab57 CK |
1062 | |
1063 | if (!list_empty(&vm->va)) { | |
1064 | dev_err(rdev->dev, "still active bo inside vm\n"); | |
1065 | } | |
1066 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { | |
1067 | list_del_init(&bo_va->vm_list); | |
1068 | r = radeon_bo_reserve(bo_va->bo, false); | |
1069 | if (!r) { | |
1070 | list_del_init(&bo_va->bo_list); | |
1071 | radeon_bo_unreserve(bo_va->bo); | |
1072 | kfree(bo_va); | |
1073 | } | |
1074 | } | |
036bf46a CK |
1075 | list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) |
1076 | kfree(bo_va); | |
6d2f2944 CK |
1077 | |
1078 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) | |
1079 | radeon_bo_unref(&vm->page_tables[i].bo); | |
1080 | kfree(vm->page_tables); | |
1081 | ||
1082 | radeon_bo_unref(&vm->page_directory); | |
1083 | ||
2280ab57 CK |
1084 | radeon_fence_unref(&vm->fence); |
1085 | radeon_fence_unref(&vm->last_flush); | |
1086 | radeon_fence_unref(&vm->last_id_use); | |
6d2f2944 CK |
1087 | |
1088 | mutex_destroy(&vm->mutex); | |
2280ab57 | 1089 | } |