Commit | Line | Data |
---|---|---|
2280ab57 CK |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/radeon_drm.h> | |
30 | #include "radeon.h" | |
31 | #include "radeon_trace.h" | |
32 | ||
33 | /* | |
34 | * GPUVM | |
35 | * GPUVM is similar to the legacy gart on older asics, however | |
36 | * rather than there being a single global gart table | |
37 | * for the entire GPU, there are multiple VM page tables active | |
38 | * at any given time. The VM page tables can contain a mix | |
39 | * vram pages and system memory pages and system memory pages | |
40 | * can be mapped as snooped (cached system pages) or unsnooped | |
41 | * (uncached system pages). | |
42 | * Each VM has an ID associated with it and there is a page table | |
43 | * associated with each VMID. When execting a command buffer, | |
44 | * the kernel tells the the ring what VMID to use for that command | |
45 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
46 | * The userspace drivers maintain their own address space and the kernel | |
47 | * sets up their pages tables accordingly when they submit their | |
48 | * command buffers and a VMID is assigned. | |
49 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
50 | * SI supports 16. | |
51 | */ | |
52 | ||
53 | /** | |
54 | * radeon_vm_num_pde - return the number of page directory entries | |
55 | * | |
56 | * @rdev: radeon_device pointer | |
57 | * | |
58 | * Calculate the number of page directory entries (cayman+). | |
59 | */ | |
60 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) | |
61 | { | |
4510fb98 | 62 | return rdev->vm_manager.max_pfn >> radeon_vm_block_size; |
2280ab57 CK |
63 | } |
64 | ||
65 | /** | |
66 | * radeon_vm_directory_size - returns the size of the page directory in bytes | |
67 | * | |
68 | * @rdev: radeon_device pointer | |
69 | * | |
70 | * Calculate the size of the page directory in bytes (cayman+). | |
71 | */ | |
72 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) | |
73 | { | |
74 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); | |
75 | } | |
76 | ||
77 | /** | |
78 | * radeon_vm_manager_init - init the vm manager | |
79 | * | |
80 | * @rdev: radeon_device pointer | |
81 | * | |
82 | * Init the vm manager (cayman+). | |
83 | * Returns 0 for success, error for failure. | |
84 | */ | |
85 | int radeon_vm_manager_init(struct radeon_device *rdev) | |
86 | { | |
2280ab57 | 87 | int r; |
2280ab57 CK |
88 | |
89 | if (!rdev->vm_manager.enabled) { | |
2280ab57 CK |
90 | r = radeon_asic_vm_init(rdev); |
91 | if (r) | |
92 | return r; | |
93 | ||
94 | rdev->vm_manager.enabled = true; | |
2280ab57 CK |
95 | } |
96 | return 0; | |
97 | } | |
98 | ||
2280ab57 CK |
99 | /** |
100 | * radeon_vm_manager_fini - tear down the vm manager | |
101 | * | |
102 | * @rdev: radeon_device pointer | |
103 | * | |
104 | * Tear down the VM manager (cayman+). | |
105 | */ | |
106 | void radeon_vm_manager_fini(struct radeon_device *rdev) | |
107 | { | |
2280ab57 CK |
108 | int i; |
109 | ||
110 | if (!rdev->vm_manager.enabled) | |
111 | return; | |
112 | ||
6d2f2944 | 113 | for (i = 0; i < RADEON_NUM_VM; ++i) |
2280ab57 | 114 | radeon_fence_unref(&rdev->vm_manager.active[i]); |
2280ab57 | 115 | radeon_asic_vm_fini(rdev); |
2280ab57 CK |
116 | rdev->vm_manager.enabled = false; |
117 | } | |
118 | ||
119 | /** | |
6d2f2944 | 120 | * radeon_vm_get_bos - add the vm BOs to a validation list |
2280ab57 | 121 | * |
6d2f2944 CK |
122 | * @vm: vm providing the BOs |
123 | * @head: head of validation list | |
2280ab57 | 124 | * |
6d2f2944 CK |
125 | * Add the page directory to the list of BOs to |
126 | * validate for command submission (cayman+). | |
2280ab57 | 127 | */ |
1d0c0942 | 128 | struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, |
df0af440 CK |
129 | struct radeon_vm *vm, |
130 | struct list_head *head) | |
2280ab57 | 131 | { |
1d0c0942 | 132 | struct radeon_bo_list *list; |
7d95f6cc | 133 | unsigned i, idx; |
2280ab57 | 134 | |
e5a5fd4d | 135 | list = drm_malloc_ab(vm->max_pde_used + 2, |
1d0c0942 | 136 | sizeof(struct radeon_bo_list)); |
6d2f2944 CK |
137 | if (!list) |
138 | return NULL; | |
2280ab57 | 139 | |
6d2f2944 | 140 | /* add the vm page table to the list */ |
df0af440 | 141 | list[0].robj = vm->page_directory; |
ce6758c8 CK |
142 | list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
143 | list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | |
6d2f2944 | 144 | list[0].tv.bo = &vm->page_directory->tbo; |
587cdda8 | 145 | list[0].tv.shared = true; |
df0af440 | 146 | list[0].tiling_flags = 0; |
6d2f2944 | 147 | list_add(&list[0].tv.head, head); |
2280ab57 | 148 | |
6d2f2944 CK |
149 | for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { |
150 | if (!vm->page_tables[i].bo) | |
151 | continue; | |
2280ab57 | 152 | |
df0af440 | 153 | list[idx].robj = vm->page_tables[i].bo; |
ce6758c8 CK |
154 | list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
155 | list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | |
df0af440 | 156 | list[idx].tv.bo = &list[idx].robj->tbo; |
587cdda8 | 157 | list[idx].tv.shared = true; |
df0af440 | 158 | list[idx].tiling_flags = 0; |
6d2f2944 | 159 | list_add(&list[idx++].tv.head, head); |
2280ab57 CK |
160 | } |
161 | ||
6d2f2944 | 162 | return list; |
2280ab57 CK |
163 | } |
164 | ||
165 | /** | |
166 | * radeon_vm_grab_id - allocate the next free VMID | |
167 | * | |
168 | * @rdev: radeon_device pointer | |
169 | * @vm: vm to allocate id for | |
170 | * @ring: ring we want to submit job to | |
171 | * | |
172 | * Allocate an id for the vm (cayman+). | |
173 | * Returns the fence we need to sync to (if any). | |
174 | * | |
175 | * Global and local mutex must be locked! | |
176 | */ | |
177 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | |
178 | struct radeon_vm *vm, int ring) | |
179 | { | |
180 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; | |
7c42bc1a CK |
181 | struct radeon_vm_id *vm_id = &vm->ids[ring]; |
182 | ||
2280ab57 CK |
183 | unsigned choices[2] = {}; |
184 | unsigned i; | |
185 | ||
186 | /* check if the id is still valid */ | |
7c42bc1a CK |
187 | if (vm_id->id && vm_id->last_id_use && |
188 | vm_id->last_id_use == rdev->vm_manager.active[vm_id->id]) | |
2280ab57 CK |
189 | return NULL; |
190 | ||
191 | /* we definately need to flush */ | |
7c42bc1a | 192 | vm_id->pd_gpu_addr = ~0ll; |
2280ab57 CK |
193 | |
194 | /* skip over VMID 0, since it is the system VM */ | |
195 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { | |
196 | struct radeon_fence *fence = rdev->vm_manager.active[i]; | |
197 | ||
198 | if (fence == NULL) { | |
199 | /* found a free one */ | |
7c42bc1a CK |
200 | vm_id->id = i; |
201 | trace_radeon_vm_grab_id(i, ring); | |
2280ab57 CK |
202 | return NULL; |
203 | } | |
204 | ||
205 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { | |
206 | best[fence->ring] = fence; | |
207 | choices[fence->ring == ring ? 0 : 1] = i; | |
208 | } | |
209 | } | |
210 | ||
211 | for (i = 0; i < 2; ++i) { | |
212 | if (choices[i]) { | |
7c42bc1a CK |
213 | vm_id->id = choices[i]; |
214 | trace_radeon_vm_grab_id(choices[i], ring); | |
2280ab57 CK |
215 | return rdev->vm_manager.active[choices[i]]; |
216 | } | |
217 | } | |
218 | ||
219 | /* should never happen */ | |
220 | BUG(); | |
221 | return NULL; | |
222 | } | |
223 | ||
fa688343 CK |
224 | /** |
225 | * radeon_vm_flush - hardware flush the vm | |
226 | * | |
227 | * @rdev: radeon_device pointer | |
228 | * @vm: vm we want to flush | |
229 | * @ring: ring to use for flush | |
ad1a58a4 | 230 | * @updates: last vm update that is waited for |
fa688343 CK |
231 | * |
232 | * Flush the vm (cayman+). | |
233 | * | |
234 | * Global and local mutex must be locked! | |
235 | */ | |
236 | void radeon_vm_flush(struct radeon_device *rdev, | |
237 | struct radeon_vm *vm, | |
ad1a58a4 | 238 | int ring, struct radeon_fence *updates) |
fa688343 | 239 | { |
6d2f2944 | 240 | uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); |
7c42bc1a | 241 | struct radeon_vm_id *vm_id = &vm->ids[ring]; |
6d2f2944 | 242 | |
7c42bc1a CK |
243 | if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || |
244 | radeon_fence_is_earlier(vm_id->flushed_updates, updates)) { | |
ad1a58a4 | 245 | |
7c42bc1a CK |
246 | trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); |
247 | radeon_fence_unref(&vm_id->flushed_updates); | |
248 | vm_id->flushed_updates = radeon_fence_ref(updates); | |
249 | vm_id->pd_gpu_addr = pd_addr; | |
faffaf62 | 250 | radeon_ring_vm_flush(rdev, &rdev->ring[ring], |
7c42bc1a CK |
251 | vm_id->id, vm_id->pd_gpu_addr); |
252 | ||
6d2f2944 | 253 | } |
fa688343 CK |
254 | } |
255 | ||
2280ab57 CK |
256 | /** |
257 | * radeon_vm_fence - remember fence for vm | |
258 | * | |
259 | * @rdev: radeon_device pointer | |
260 | * @vm: vm we want to fence | |
261 | * @fence: fence to remember | |
262 | * | |
263 | * Fence the vm (cayman+). | |
264 | * Set the fence used to protect page table and id. | |
265 | * | |
266 | * Global and local mutex must be locked! | |
267 | */ | |
268 | void radeon_vm_fence(struct radeon_device *rdev, | |
269 | struct radeon_vm *vm, | |
270 | struct radeon_fence *fence) | |
271 | { | |
7c42bc1a CK |
272 | unsigned vm_id = vm->ids[fence->ring].id; |
273 | ||
7c42bc1a CK |
274 | radeon_fence_unref(&rdev->vm_manager.active[vm_id]); |
275 | rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence); | |
fa688343 | 276 | |
7c42bc1a CK |
277 | radeon_fence_unref(&vm->ids[fence->ring].last_id_use); |
278 | vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence); | |
2280ab57 CK |
279 | } |
280 | ||
281 | /** | |
282 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo | |
283 | * | |
284 | * @vm: requested vm | |
285 | * @bo: requested buffer object | |
286 | * | |
287 | * Find @bo inside the requested vm (cayman+). | |
288 | * Search inside the @bos vm list for the requested vm | |
289 | * Returns the found bo_va or NULL if none is found | |
290 | * | |
291 | * Object has to be reserved! | |
292 | */ | |
293 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, | |
294 | struct radeon_bo *bo) | |
295 | { | |
296 | struct radeon_bo_va *bo_va; | |
297 | ||
298 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
299 | if (bo_va->vm == vm) { | |
300 | return bo_va; | |
301 | } | |
302 | } | |
303 | return NULL; | |
304 | } | |
305 | ||
306 | /** | |
307 | * radeon_vm_bo_add - add a bo to a specific vm | |
308 | * | |
309 | * @rdev: radeon_device pointer | |
310 | * @vm: requested vm | |
311 | * @bo: radeon buffer object | |
312 | * | |
313 | * Add @bo into the requested vm (cayman+). | |
314 | * Add @bo to the list of bos associated with the vm | |
315 | * Returns newly added bo_va or NULL for failure | |
316 | * | |
317 | * Object has to be reserved! | |
318 | */ | |
319 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, | |
320 | struct radeon_vm *vm, | |
321 | struct radeon_bo *bo) | |
322 | { | |
323 | struct radeon_bo_va *bo_va; | |
324 | ||
325 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | |
326 | if (bo_va == NULL) { | |
327 | return NULL; | |
328 | } | |
329 | bo_va->vm = vm; | |
330 | bo_va->bo = bo; | |
0aea5e4a AD |
331 | bo_va->it.start = 0; |
332 | bo_va->it.last = 0; | |
2280ab57 | 333 | bo_va->flags = 0; |
e31ad969 | 334 | bo_va->addr = 0; |
2280ab57 CK |
335 | bo_va->ref_count = 1; |
336 | INIT_LIST_HEAD(&bo_va->bo_list); | |
036bf46a | 337 | INIT_LIST_HEAD(&bo_va->vm_status); |
2280ab57 CK |
338 | |
339 | mutex_lock(&vm->mutex); | |
2280ab57 CK |
340 | list_add_tail(&bo_va->bo_list, &bo->va); |
341 | mutex_unlock(&vm->mutex); | |
342 | ||
343 | return bo_va; | |
344 | } | |
345 | ||
03f62abd CK |
346 | /** |
347 | * radeon_vm_set_pages - helper to call the right asic function | |
348 | * | |
349 | * @rdev: radeon_device pointer | |
350 | * @ib: indirect buffer to fill with commands | |
351 | * @pe: addr of the page entry | |
352 | * @addr: dst addr to write into pe | |
353 | * @count: number of page entries to update | |
354 | * @incr: increase next addr by incr bytes | |
355 | * @flags: hw access flags | |
356 | * | |
357 | * Traces the parameters and calls the right asic functions | |
358 | * to setup the page table using the DMA. | |
359 | */ | |
360 | static void radeon_vm_set_pages(struct radeon_device *rdev, | |
361 | struct radeon_ib *ib, | |
362 | uint64_t pe, | |
363 | uint64_t addr, unsigned count, | |
364 | uint32_t incr, uint32_t flags) | |
365 | { | |
366 | trace_radeon_vm_set_page(pe, addr, count, incr, flags); | |
367 | ||
368 | if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { | |
369 | uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; | |
370 | radeon_asic_vm_copy_pages(rdev, ib, pe, src, count); | |
371 | ||
372 | } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) { | |
373 | radeon_asic_vm_write_pages(rdev, ib, pe, addr, | |
374 | count, incr, flags); | |
375 | ||
376 | } else { | |
377 | radeon_asic_vm_set_pages(rdev, ib, pe, addr, | |
378 | count, incr, flags); | |
379 | } | |
380 | } | |
381 | ||
6d2f2944 CK |
382 | /** |
383 | * radeon_vm_clear_bo - initially clear the page dir/table | |
384 | * | |
385 | * @rdev: radeon_device pointer | |
386 | * @bo: bo to clear | |
387 | */ | |
388 | static int radeon_vm_clear_bo(struct radeon_device *rdev, | |
389 | struct radeon_bo *bo) | |
390 | { | |
6d2f2944 CK |
391 | struct radeon_ib ib; |
392 | unsigned entries; | |
393 | uint64_t addr; | |
394 | int r; | |
395 | ||
587cdda8 CK |
396 | r = radeon_bo_reserve(bo, false); |
397 | if (r) | |
6d2f2944 CK |
398 | return r; |
399 | ||
587cdda8 CK |
400 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
401 | if (r) | |
402 | goto error_unreserve; | |
6d2f2944 CK |
403 | |
404 | addr = radeon_bo_gpu_offset(bo); | |
405 | entries = radeon_bo_size(bo) / 8; | |
406 | ||
cc6f3536 | 407 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); |
6d2f2944 | 408 | if (r) |
587cdda8 | 409 | goto error_unreserve; |
6d2f2944 CK |
410 | |
411 | ib.length_dw = 0; | |
412 | ||
03f62abd CK |
413 | radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); |
414 | radeon_asic_vm_pad_ib(rdev, &ib); | |
cc6f3536 | 415 | WARN_ON(ib.length_dw > 64); |
6d2f2944 | 416 | |
1538a9e0 | 417 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
6d2f2944 | 418 | if (r) |
587cdda8 | 419 | goto error_free; |
6d2f2944 | 420 | |
ad1a58a4 | 421 | ib.fence->is_vm_update = true; |
587cdda8 | 422 | radeon_bo_fence(bo, ib.fence, false); |
6d2f2944 | 423 | |
587cdda8 CK |
424 | error_free: |
425 | radeon_ib_free(rdev, &ib); | |
6d2f2944 | 426 | |
587cdda8 CK |
427 | error_unreserve: |
428 | radeon_bo_unreserve(bo); | |
6d2f2944 CK |
429 | return r; |
430 | } | |
431 | ||
2280ab57 CK |
432 | /** |
433 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm | |
434 | * | |
435 | * @rdev: radeon_device pointer | |
436 | * @bo_va: bo_va to store the address | |
437 | * @soffset: requested offset of the buffer in the VM address space | |
438 | * @flags: attributes of pages (read/write/valid/etc.) | |
439 | * | |
440 | * Set offset of @bo_va (cayman+). | |
441 | * Validate and set the offset requested within the vm address space. | |
442 | * Returns 0 for success, error for failure. | |
443 | * | |
85761f60 | 444 | * Object has to be reserved and gets unreserved by this function! |
2280ab57 CK |
445 | */ |
446 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |
447 | struct radeon_bo_va *bo_va, | |
448 | uint64_t soffset, | |
449 | uint32_t flags) | |
450 | { | |
451 | uint64_t size = radeon_bo_size(bo_va->bo); | |
2280ab57 | 452 | struct radeon_vm *vm = bo_va->vm; |
6d2f2944 | 453 | unsigned last_pfn, pt_idx; |
0aea5e4a | 454 | uint64_t eoffset; |
6d2f2944 | 455 | int r; |
2280ab57 CK |
456 | |
457 | if (soffset) { | |
458 | /* make sure object fit at this offset */ | |
459 | eoffset = soffset + size; | |
460 | if (soffset >= eoffset) { | |
461 | return -EINVAL; | |
462 | } | |
463 | ||
464 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; | |
465 | if (last_pfn > rdev->vm_manager.max_pfn) { | |
466 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", | |
467 | last_pfn, rdev->vm_manager.max_pfn); | |
468 | return -EINVAL; | |
469 | } | |
470 | ||
471 | } else { | |
472 | eoffset = last_pfn = 0; | |
473 | } | |
474 | ||
475 | mutex_lock(&vm->mutex); | |
c29c0876 CK |
476 | soffset /= RADEON_GPU_PAGE_SIZE; |
477 | eoffset /= RADEON_GPU_PAGE_SIZE; | |
478 | if (soffset || eoffset) { | |
479 | struct interval_tree_node *it; | |
480 | it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); | |
481 | if (it && it != &bo_va->it) { | |
482 | struct radeon_bo_va *tmp; | |
483 | tmp = container_of(it, struct radeon_bo_va, it); | |
484 | /* bo and tmp overlap, invalid offset */ | |
485 | dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " | |
486 | "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, | |
487 | soffset, tmp->bo, tmp->it.start, tmp->it.last); | |
488 | mutex_unlock(&vm->mutex); | |
489 | return -EINVAL; | |
490 | } | |
491 | } | |
492 | ||
0aea5e4a AD |
493 | if (bo_va->it.start || bo_va->it.last) { |
494 | if (bo_va->addr) { | |
495 | /* add a clone of the bo_va to clear the old address */ | |
496 | struct radeon_bo_va *tmp; | |
497 | tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | |
68b1ea30 DC |
498 | if (!tmp) { |
499 | mutex_unlock(&vm->mutex); | |
500 | return -ENOMEM; | |
501 | } | |
0aea5e4a AD |
502 | tmp->it.start = bo_va->it.start; |
503 | tmp->it.last = bo_va->it.last; | |
504 | tmp->vm = vm; | |
505 | tmp->addr = bo_va->addr; | |
ee26d83f | 506 | tmp->bo = radeon_bo_ref(bo_va->bo); |
f7a3db75 | 507 | spin_lock(&vm->status_lock); |
0aea5e4a | 508 | list_add(&tmp->vm_status, &vm->freed); |
f7a3db75 | 509 | spin_unlock(&vm->status_lock); |
48afbd70 CK |
510 | |
511 | bo_va->addr = 0; | |
2280ab57 CK |
512 | } |
513 | ||
0aea5e4a AD |
514 | interval_tree_remove(&bo_va->it, &vm->va); |
515 | bo_va->it.start = 0; | |
516 | bo_va->it.last = 0; | |
2280ab57 CK |
517 | } |
518 | ||
0aea5e4a | 519 | if (soffset || eoffset) { |
0aea5e4a AD |
520 | bo_va->it.start = soffset; |
521 | bo_va->it.last = eoffset - 1; | |
522 | interval_tree_insert(&bo_va->it, &vm->va); | |
036bf46a CK |
523 | } |
524 | ||
2280ab57 | 525 | bo_va->flags = flags; |
e31ad969 | 526 | bo_va->addr = 0; |
2280ab57 | 527 | |
0aea5e4a AD |
528 | soffset >>= radeon_vm_block_size; |
529 | eoffset >>= radeon_vm_block_size; | |
4510fb98 CK |
530 | |
531 | BUG_ON(eoffset >= radeon_vm_num_pdes(rdev)); | |
6d2f2944 CK |
532 | |
533 | if (eoffset > vm->max_pde_used) | |
534 | vm->max_pde_used = eoffset; | |
535 | ||
536 | radeon_bo_unreserve(bo_va->bo); | |
537 | ||
538 | /* walk over the address space and allocate the page tables */ | |
539 | for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) { | |
540 | struct radeon_bo *pt; | |
541 | ||
542 | if (vm->page_tables[pt_idx].bo) | |
543 | continue; | |
544 | ||
545 | /* drop mutex to allocate and clear page table */ | |
546 | mutex_unlock(&vm->mutex); | |
547 | ||
548 | r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, | |
7dae77f8 | 549 | RADEON_GPU_PAGE_SIZE, true, |
831b6966 ML |
550 | RADEON_GEM_DOMAIN_VRAM, 0, |
551 | NULL, NULL, &pt); | |
6d2f2944 CK |
552 | if (r) |
553 | return r; | |
554 | ||
555 | r = radeon_vm_clear_bo(rdev, pt); | |
556 | if (r) { | |
557 | radeon_bo_unref(&pt); | |
558 | radeon_bo_reserve(bo_va->bo, false); | |
559 | return r; | |
560 | } | |
561 | ||
562 | /* aquire mutex again */ | |
563 | mutex_lock(&vm->mutex); | |
564 | if (vm->page_tables[pt_idx].bo) { | |
565 | /* someone else allocated the pt in the meantime */ | |
566 | mutex_unlock(&vm->mutex); | |
567 | radeon_bo_unref(&pt); | |
568 | mutex_lock(&vm->mutex); | |
569 | continue; | |
570 | } | |
571 | ||
572 | vm->page_tables[pt_idx].addr = 0; | |
573 | vm->page_tables[pt_idx].bo = pt; | |
574 | } | |
575 | ||
2280ab57 | 576 | mutex_unlock(&vm->mutex); |
85761f60 | 577 | return 0; |
2280ab57 CK |
578 | } |
579 | ||
580 | /** | |
581 | * radeon_vm_map_gart - get the physical address of a gart page | |
582 | * | |
583 | * @rdev: radeon_device pointer | |
584 | * @addr: the unmapped addr | |
585 | * | |
586 | * Look up the physical address of the page that the pte resolves | |
587 | * to (cayman+). | |
588 | * Returns the physical address of the page. | |
589 | */ | |
590 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | |
591 | { | |
592 | uint64_t result; | |
593 | ||
594 | /* page table offset */ | |
16653dba MD |
595 | result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; |
596 | result &= ~RADEON_GPU_PAGE_MASK; | |
2280ab57 CK |
597 | |
598 | return result; | |
599 | } | |
600 | ||
601 | /** | |
602 | * radeon_vm_page_flags - translate page flags to what the hw uses | |
603 | * | |
604 | * @flags: flags comming from userspace | |
605 | * | |
606 | * Translate the flags the userspace ABI uses to hw flags. | |
607 | */ | |
608 | static uint32_t radeon_vm_page_flags(uint32_t flags) | |
609 | { | |
610 | uint32_t hw_flags = 0; | |
611 | hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; | |
612 | hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; | |
613 | hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; | |
614 | if (flags & RADEON_VM_PAGE_SYSTEM) { | |
615 | hw_flags |= R600_PTE_SYSTEM; | |
616 | hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; | |
617 | } | |
618 | return hw_flags; | |
619 | } | |
620 | ||
621 | /** | |
622 | * radeon_vm_update_pdes - make sure that page directory is valid | |
623 | * | |
624 | * @rdev: radeon_device pointer | |
625 | * @vm: requested vm | |
626 | * @start: start of GPU address range | |
627 | * @end: end of GPU address range | |
628 | * | |
629 | * Allocates new page tables if necessary | |
630 | * and updates the page directory (cayman+). | |
631 | * Returns 0 for success, error for failure. | |
632 | * | |
633 | * Global and local mutex must be locked! | |
634 | */ | |
6d2f2944 CK |
635 | int radeon_vm_update_page_directory(struct radeon_device *rdev, |
636 | struct radeon_vm *vm) | |
2280ab57 | 637 | { |
37903b5e CK |
638 | struct radeon_bo *pd = vm->page_directory; |
639 | uint64_t pd_addr = radeon_bo_gpu_offset(pd); | |
4510fb98 | 640 | uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
2280ab57 | 641 | uint64_t last_pde = ~0, last_pt = ~0; |
6d2f2944 CK |
642 | unsigned count = 0, pt_idx, ndw; |
643 | struct radeon_ib ib; | |
2280ab57 CK |
644 | int r; |
645 | ||
6d2f2944 CK |
646 | /* padding, etc. */ |
647 | ndw = 64; | |
648 | ||
649 | /* assume the worst case */ | |
cc6f3536 | 650 | ndw += vm->max_pde_used * 6; |
6d2f2944 CK |
651 | |
652 | /* update too big for an IB */ | |
653 | if (ndw > 0xfffff) | |
654 | return -ENOMEM; | |
655 | ||
656 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); | |
657 | if (r) | |
658 | return r; | |
659 | ib.length_dw = 0; | |
2280ab57 CK |
660 | |
661 | /* walk over the address space and update the page directory */ | |
6d2f2944 CK |
662 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
663 | struct radeon_bo *bo = vm->page_tables[pt_idx].bo; | |
2280ab57 CK |
664 | uint64_t pde, pt; |
665 | ||
6d2f2944 | 666 | if (bo == NULL) |
2280ab57 CK |
667 | continue; |
668 | ||
6d2f2944 CK |
669 | pt = radeon_bo_gpu_offset(bo); |
670 | if (vm->page_tables[pt_idx].addr == pt) | |
671 | continue; | |
672 | vm->page_tables[pt_idx].addr = pt; | |
2280ab57 | 673 | |
6d2f2944 | 674 | pde = pd_addr + pt_idx * 8; |
2280ab57 CK |
675 | if (((last_pde + 8 * count) != pde) || |
676 | ((last_pt + incr * count) != pt)) { | |
677 | ||
678 | if (count) { | |
03f62abd CK |
679 | radeon_vm_set_pages(rdev, &ib, last_pde, |
680 | last_pt, count, incr, | |
681 | R600_PTE_VALID); | |
2280ab57 CK |
682 | } |
683 | ||
684 | count = 1; | |
685 | last_pde = pde; | |
686 | last_pt = pt; | |
687 | } else { | |
688 | ++count; | |
689 | } | |
690 | } | |
691 | ||
6d2f2944 | 692 | if (count) |
03f62abd CK |
693 | radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count, |
694 | incr, R600_PTE_VALID); | |
2280ab57 | 695 | |
6d2f2944 | 696 | if (ib.length_dw != 0) { |
03f62abd | 697 | radeon_asic_vm_pad_ib(rdev, &ib); |
f2c24b83 | 698 | |
43ac8857 | 699 | radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true); |
cc6f3536 | 700 | WARN_ON(ib.length_dw > ndw); |
1538a9e0 | 701 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
6d2f2944 CK |
702 | if (r) { |
703 | radeon_ib_free(rdev, &ib); | |
704 | return r; | |
705 | } | |
ad1a58a4 | 706 | ib.fence->is_vm_update = true; |
587cdda8 | 707 | radeon_bo_fence(pd, ib.fence, false); |
2280ab57 | 708 | } |
6d2f2944 | 709 | radeon_ib_free(rdev, &ib); |
2280ab57 CK |
710 | |
711 | return 0; | |
712 | } | |
713 | ||
ec3dbbcb CK |
714 | /** |
715 | * radeon_vm_frag_ptes - add fragment information to PTEs | |
716 | * | |
717 | * @rdev: radeon_device pointer | |
718 | * @ib: IB for the update | |
719 | * @pe_start: first PTE to handle | |
720 | * @pe_end: last PTE to handle | |
721 | * @addr: addr those PTEs should point to | |
722 | * @flags: hw mapping flags | |
723 | * | |
724 | * Global and local mutex must be locked! | |
725 | */ | |
726 | static void radeon_vm_frag_ptes(struct radeon_device *rdev, | |
727 | struct radeon_ib *ib, | |
728 | uint64_t pe_start, uint64_t pe_end, | |
729 | uint64_t addr, uint32_t flags) | |
730 | { | |
731 | /** | |
732 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
733 | * field in the PTE. When this field is set to a non-zero value, page | |
734 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
735 | * flags are considered valid for all PTEs within the fragment range | |
736 | * and corresponding mappings are assumed to be physically contiguous. | |
737 | * | |
738 | * The L1 TLB can store a single PTE for the whole fragment, | |
739 | * significantly increasing the space available for translation | |
740 | * caching. This leads to large improvements in throughput when the | |
741 | * TLB is under pressure. | |
742 | * | |
743 | * The L2 TLB distributes small and large fragments into two | |
744 | * asymmetric partitions. The large fragment cache is significantly | |
745 | * larger. Thus, we try to use large fragments wherever possible. | |
746 | * Userspace can support this by aligning virtual base address and | |
747 | * allocation size to the fragment size. | |
748 | */ | |
749 | ||
750 | /* NI is optimized for 256KB fragments, SI and newer for 64KB */ | |
a124d068 AD |
751 | uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || |
752 | (rdev->family == CHIP_ARUBA)) ? | |
ec3dbbcb | 753 | R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; |
a124d068 AD |
754 | uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || |
755 | (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; | |
ec3dbbcb CK |
756 | |
757 | uint64_t frag_start = ALIGN(pe_start, frag_align); | |
758 | uint64_t frag_end = pe_end & ~(frag_align - 1); | |
759 | ||
760 | unsigned count; | |
761 | ||
762 | /* system pages are non continuously */ | |
763 | if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) || | |
764 | (frag_start >= frag_end)) { | |
765 | ||
766 | count = (pe_end - pe_start) / 8; | |
03f62abd CK |
767 | radeon_vm_set_pages(rdev, ib, pe_start, addr, count, |
768 | RADEON_GPU_PAGE_SIZE, flags); | |
ec3dbbcb CK |
769 | return; |
770 | } | |
771 | ||
772 | /* handle the 4K area at the beginning */ | |
773 | if (pe_start != frag_start) { | |
774 | count = (frag_start - pe_start) / 8; | |
03f62abd CK |
775 | radeon_vm_set_pages(rdev, ib, pe_start, addr, count, |
776 | RADEON_GPU_PAGE_SIZE, flags); | |
ec3dbbcb CK |
777 | addr += RADEON_GPU_PAGE_SIZE * count; |
778 | } | |
779 | ||
780 | /* handle the area in the middle */ | |
781 | count = (frag_end - frag_start) / 8; | |
03f62abd CK |
782 | radeon_vm_set_pages(rdev, ib, frag_start, addr, count, |
783 | RADEON_GPU_PAGE_SIZE, flags | frag_flags); | |
ec3dbbcb CK |
784 | |
785 | /* handle the 4K area at the end */ | |
786 | if (frag_end != pe_end) { | |
787 | addr += RADEON_GPU_PAGE_SIZE * count; | |
788 | count = (pe_end - frag_end) / 8; | |
03f62abd CK |
789 | radeon_vm_set_pages(rdev, ib, frag_end, addr, count, |
790 | RADEON_GPU_PAGE_SIZE, flags); | |
ec3dbbcb CK |
791 | } |
792 | } | |
793 | ||
2280ab57 CK |
794 | /** |
795 | * radeon_vm_update_ptes - make sure that page tables are valid | |
796 | * | |
797 | * @rdev: radeon_device pointer | |
798 | * @vm: requested vm | |
799 | * @start: start of GPU address range | |
800 | * @end: end of GPU address range | |
801 | * @dst: destination address to map to | |
802 | * @flags: mapping flags | |
803 | * | |
804 | * Update the page tables in the range @start - @end (cayman+). | |
805 | * | |
806 | * Global and local mutex must be locked! | |
807 | */ | |
44c4bd21 CK |
808 | static int radeon_vm_update_ptes(struct radeon_device *rdev, |
809 | struct radeon_vm *vm, | |
810 | struct radeon_ib *ib, | |
811 | uint64_t start, uint64_t end, | |
812 | uint64_t dst, uint32_t flags) | |
2280ab57 | 813 | { |
4510fb98 | 814 | uint64_t mask = RADEON_VM_PTE_COUNT - 1; |
2280ab57 CK |
815 | uint64_t last_pte = ~0, last_dst = ~0; |
816 | unsigned count = 0; | |
817 | uint64_t addr; | |
818 | ||
2280ab57 CK |
819 | /* walk over the address space and update the page tables */ |
820 | for (addr = start; addr < end; ) { | |
4510fb98 | 821 | uint64_t pt_idx = addr >> radeon_vm_block_size; |
37903b5e | 822 | struct radeon_bo *pt = vm->page_tables[pt_idx].bo; |
2280ab57 CK |
823 | unsigned nptes; |
824 | uint64_t pte; | |
44c4bd21 | 825 | int r; |
2280ab57 | 826 | |
d1968e1d | 827 | radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); |
44c4bd21 CK |
828 | r = reservation_object_reserve_shared(pt->tbo.resv); |
829 | if (r) | |
830 | return r; | |
37903b5e | 831 | |
2280ab57 CK |
832 | if ((addr & ~mask) == (end & ~mask)) |
833 | nptes = end - addr; | |
834 | else | |
835 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); | |
836 | ||
37903b5e | 837 | pte = radeon_bo_gpu_offset(pt); |
2280ab57 CK |
838 | pte += (addr & mask) * 8; |
839 | ||
840 | if ((last_pte + 8 * count) != pte) { | |
841 | ||
842 | if (count) { | |
ec3dbbcb CK |
843 | radeon_vm_frag_ptes(rdev, ib, last_pte, |
844 | last_pte + 8 * count, | |
845 | last_dst, flags); | |
2280ab57 CK |
846 | } |
847 | ||
848 | count = nptes; | |
849 | last_pte = pte; | |
850 | last_dst = dst; | |
851 | } else { | |
852 | count += nptes; | |
853 | } | |
854 | ||
855 | addr += nptes; | |
856 | dst += nptes * RADEON_GPU_PAGE_SIZE; | |
857 | } | |
858 | ||
859 | if (count) { | |
ec3dbbcb CK |
860 | radeon_vm_frag_ptes(rdev, ib, last_pte, |
861 | last_pte + 8 * count, | |
862 | last_dst, flags); | |
2280ab57 | 863 | } |
44c4bd21 CK |
864 | |
865 | return 0; | |
2280ab57 CK |
866 | } |
867 | ||
587cdda8 CK |
868 | /** |
869 | * radeon_vm_fence_pts - fence page tables after an update | |
870 | * | |
871 | * @vm: requested vm | |
872 | * @start: start of GPU address range | |
873 | * @end: end of GPU address range | |
874 | * @fence: fence to use | |
875 | * | |
876 | * Fence the page tables in the range @start - @end (cayman+). | |
877 | * | |
878 | * Global and local mutex must be locked! | |
879 | */ | |
880 | static void radeon_vm_fence_pts(struct radeon_vm *vm, | |
881 | uint64_t start, uint64_t end, | |
882 | struct radeon_fence *fence) | |
883 | { | |
884 | unsigned i; | |
885 | ||
886 | start >>= radeon_vm_block_size; | |
887 | end >>= radeon_vm_block_size; | |
888 | ||
889 | for (i = start; i <= end; ++i) | |
44c4bd21 | 890 | radeon_bo_fence(vm->page_tables[i].bo, fence, true); |
587cdda8 CK |
891 | } |
892 | ||
2280ab57 CK |
893 | /** |
894 | * radeon_vm_bo_update - map a bo into the vm page table | |
895 | * | |
896 | * @rdev: radeon_device pointer | |
897 | * @vm: requested vm | |
898 | * @bo: radeon buffer object | |
899 | * @mem: ttm mem | |
900 | * | |
901 | * Fill in the page table entries for @bo (cayman+). | |
902 | * Returns 0 for success, -EINVAL for failure. | |
903 | * | |
529364e0 | 904 | * Object have to be reserved and mutex must be locked! |
2280ab57 CK |
905 | */ |
906 | int radeon_vm_bo_update(struct radeon_device *rdev, | |
036bf46a | 907 | struct radeon_bo_va *bo_va, |
2280ab57 CK |
908 | struct ttm_mem_reg *mem) |
909 | { | |
036bf46a | 910 | struct radeon_vm *vm = bo_va->vm; |
2280ab57 | 911 | struct radeon_ib ib; |
cc6f3536 | 912 | unsigned nptes, ncmds, ndw; |
2280ab57 | 913 | uint64_t addr; |
cc6f3536 | 914 | uint32_t flags; |
2280ab57 CK |
915 | int r; |
916 | ||
0aea5e4a | 917 | if (!bo_va->it.start) { |
2280ab57 | 918 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", |
036bf46a | 919 | bo_va->bo, vm); |
2280ab57 CK |
920 | return -EINVAL; |
921 | } | |
922 | ||
f7a3db75 | 923 | spin_lock(&vm->status_lock); |
e31ad969 | 924 | list_del_init(&bo_va->vm_status); |
f7a3db75 | 925 | spin_unlock(&vm->status_lock); |
2280ab57 CK |
926 | |
927 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; | |
928 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; | |
02376d82 | 929 | bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; |
f72a113a CK |
930 | if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) |
931 | bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; | |
932 | ||
2280ab57 CK |
933 | if (mem) { |
934 | addr = mem->start << PAGE_SHIFT; | |
935 | if (mem->mem_type != TTM_PL_SYSTEM) { | |
936 | bo_va->flags |= RADEON_VM_PAGE_VALID; | |
2280ab57 CK |
937 | } |
938 | if (mem->mem_type == TTM_PL_TT) { | |
939 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; | |
02376d82 MD |
940 | if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC))) |
941 | bo_va->flags |= RADEON_VM_PAGE_SNOOPED; | |
942 | ||
2280ab57 CK |
943 | } else { |
944 | addr += rdev->vm_manager.vram_base_offset; | |
945 | } | |
946 | } else { | |
947 | addr = 0; | |
2280ab57 CK |
948 | } |
949 | ||
e31ad969 CK |
950 | if (addr == bo_va->addr) |
951 | return 0; | |
952 | bo_va->addr = addr; | |
953 | ||
2280ab57 CK |
954 | trace_radeon_vm_bo_update(bo_va); |
955 | ||
0aea5e4a | 956 | nptes = bo_va->it.last - bo_va->it.start + 1; |
2280ab57 | 957 | |
cc6f3536 CK |
958 | /* reserve space for one command every (1 << BLOCK_SIZE) entries |
959 | or 2k dwords (whatever is smaller) */ | |
960 | ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1; | |
961 | ||
2280ab57 CK |
962 | /* padding, etc. */ |
963 | ndw = 64; | |
964 | ||
cc6f3536 CK |
965 | flags = radeon_vm_page_flags(bo_va->flags); |
966 | if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { | |
967 | /* only copy commands needed */ | |
968 | ndw += ncmds * 7; | |
969 | ||
970 | } else if (flags & R600_PTE_SYSTEM) { | |
971 | /* header for write data commands */ | |
972 | ndw += ncmds * 4; | |
973 | ||
974 | /* body of write data command */ | |
975 | ndw += nptes * 2; | |
2280ab57 | 976 | |
cc6f3536 CK |
977 | } else { |
978 | /* set page commands needed */ | |
979 | ndw += ncmds * 10; | |
980 | ||
981 | /* two extra commands for begin/end of fragment */ | |
982 | ndw += 2 * 10; | |
983 | } | |
2280ab57 | 984 | |
2280ab57 CK |
985 | /* update too big for an IB */ |
986 | if (ndw > 0xfffff) | |
987 | return -ENOMEM; | |
988 | ||
989 | r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); | |
990 | if (r) | |
991 | return r; | |
992 | ib.length_dw = 0; | |
993 | ||
d1968e1d CK |
994 | if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) { |
995 | unsigned i; | |
996 | ||
997 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | |
998 | radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); | |
999 | } | |
1000 | ||
44c4bd21 CK |
1001 | r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, |
1002 | bo_va->it.last + 1, addr, | |
1003 | radeon_vm_page_flags(bo_va->flags)); | |
1004 | if (r) { | |
1005 | radeon_ib_free(rdev, &ib); | |
1006 | return r; | |
1007 | } | |
2280ab57 | 1008 | |
03f62abd | 1009 | radeon_asic_vm_pad_ib(rdev, &ib); |
cc6f3536 CK |
1010 | WARN_ON(ib.length_dw > ndw); |
1011 | ||
1538a9e0 | 1012 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
2280ab57 CK |
1013 | if (r) { |
1014 | radeon_ib_free(rdev, &ib); | |
1015 | return r; | |
1016 | } | |
ad1a58a4 | 1017 | ib.fence->is_vm_update = true; |
587cdda8 | 1018 | radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); |
94214635 CK |
1019 | radeon_fence_unref(&bo_va->last_pt_update); |
1020 | bo_va->last_pt_update = radeon_fence_ref(ib.fence); | |
2280ab57 | 1021 | radeon_ib_free(rdev, &ib); |
2280ab57 CK |
1022 | |
1023 | return 0; | |
1024 | } | |
1025 | ||
036bf46a CK |
1026 | /** |
1027 | * radeon_vm_clear_freed - clear freed BOs in the PT | |
1028 | * | |
1029 | * @rdev: radeon_device pointer | |
1030 | * @vm: requested vm | |
1031 | * | |
1032 | * Make sure all freed BOs are cleared in the PT. | |
1033 | * Returns 0 for success. | |
1034 | * | |
1035 | * PTs have to be reserved and mutex must be locked! | |
1036 | */ | |
1037 | int radeon_vm_clear_freed(struct radeon_device *rdev, | |
1038 | struct radeon_vm *vm) | |
1039 | { | |
f7a3db75 | 1040 | struct radeon_bo_va *bo_va; |
036bf46a CK |
1041 | int r; |
1042 | ||
f7a3db75 CK |
1043 | spin_lock(&vm->status_lock); |
1044 | while (!list_empty(&vm->freed)) { | |
1045 | bo_va = list_first_entry(&vm->freed, | |
1046 | struct radeon_bo_va, vm_status); | |
1047 | spin_unlock(&vm->status_lock); | |
1048 | ||
036bf46a | 1049 | r = radeon_vm_bo_update(rdev, bo_va, NULL); |
ee26d83f | 1050 | radeon_bo_unref(&bo_va->bo); |
94214635 | 1051 | radeon_fence_unref(&bo_va->last_pt_update); |
036bf46a CK |
1052 | kfree(bo_va); |
1053 | if (r) | |
1054 | return r; | |
f7a3db75 CK |
1055 | |
1056 | spin_lock(&vm->status_lock); | |
036bf46a | 1057 | } |
f7a3db75 | 1058 | spin_unlock(&vm->status_lock); |
036bf46a CK |
1059 | return 0; |
1060 | ||
1061 | } | |
1062 | ||
e31ad969 CK |
1063 | /** |
1064 | * radeon_vm_clear_invalids - clear invalidated BOs in the PT | |
1065 | * | |
1066 | * @rdev: radeon_device pointer | |
1067 | * @vm: requested vm | |
1068 | * | |
1069 | * Make sure all invalidated BOs are cleared in the PT. | |
1070 | * Returns 0 for success. | |
1071 | * | |
1072 | * PTs have to be reserved and mutex must be locked! | |
1073 | */ | |
1074 | int radeon_vm_clear_invalids(struct radeon_device *rdev, | |
1075 | struct radeon_vm *vm) | |
1076 | { | |
f7a3db75 | 1077 | struct radeon_bo_va *bo_va; |
e31ad969 CK |
1078 | int r; |
1079 | ||
f7a3db75 CK |
1080 | spin_lock(&vm->status_lock); |
1081 | while (!list_empty(&vm->invalidated)) { | |
1082 | bo_va = list_first_entry(&vm->invalidated, | |
1083 | struct radeon_bo_va, vm_status); | |
1084 | spin_unlock(&vm->status_lock); | |
1085 | ||
e31ad969 CK |
1086 | r = radeon_vm_bo_update(rdev, bo_va, NULL); |
1087 | if (r) | |
1088 | return r; | |
f7a3db75 CK |
1089 | |
1090 | spin_lock(&vm->status_lock); | |
e31ad969 | 1091 | } |
f7a3db75 CK |
1092 | spin_unlock(&vm->status_lock); |
1093 | ||
e31ad969 CK |
1094 | return 0; |
1095 | } | |
1096 | ||
2280ab57 CK |
1097 | /** |
1098 | * radeon_vm_bo_rmv - remove a bo to a specific vm | |
1099 | * | |
1100 | * @rdev: radeon_device pointer | |
1101 | * @bo_va: requested bo_va | |
1102 | * | |
1103 | * Remove @bo_va->bo from the requested vm (cayman+). | |
2280ab57 CK |
1104 | * |
1105 | * Object have to be reserved! | |
1106 | */ | |
036bf46a CK |
1107 | void radeon_vm_bo_rmv(struct radeon_device *rdev, |
1108 | struct radeon_bo_va *bo_va) | |
2280ab57 | 1109 | { |
036bf46a | 1110 | struct radeon_vm *vm = bo_va->vm; |
2280ab57 | 1111 | |
036bf46a | 1112 | list_del(&bo_va->bo_list); |
529364e0 | 1113 | |
036bf46a | 1114 | mutex_lock(&vm->mutex); |
26d4d129 CK |
1115 | if (bo_va->it.start || bo_va->it.last) |
1116 | interval_tree_remove(&bo_va->it, &vm->va); | |
f7a3db75 | 1117 | spin_lock(&vm->status_lock); |
e31ad969 | 1118 | list_del(&bo_va->vm_status); |
2280ab57 | 1119 | |
e31ad969 | 1120 | if (bo_va->addr) { |
ee26d83f | 1121 | bo_va->bo = radeon_bo_ref(bo_va->bo); |
036bf46a CK |
1122 | list_add(&bo_va->vm_status, &vm->freed); |
1123 | } else { | |
94214635 | 1124 | radeon_fence_unref(&bo_va->last_pt_update); |
036bf46a CK |
1125 | kfree(bo_va); |
1126 | } | |
f7a3db75 | 1127 | spin_unlock(&vm->status_lock); |
036bf46a CK |
1128 | |
1129 | mutex_unlock(&vm->mutex); | |
2280ab57 CK |
1130 | } |
1131 | ||
1132 | /** | |
1133 | * radeon_vm_bo_invalidate - mark the bo as invalid | |
1134 | * | |
1135 | * @rdev: radeon_device pointer | |
1136 | * @vm: requested vm | |
1137 | * @bo: radeon buffer object | |
1138 | * | |
1139 | * Mark @bo as invalid (cayman+). | |
1140 | */ | |
1141 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, | |
1142 | struct radeon_bo *bo) | |
1143 | { | |
1144 | struct radeon_bo_va *bo_va; | |
1145 | ||
1146 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
e31ad969 | 1147 | if (bo_va->addr) { |
f7a3db75 | 1148 | spin_lock(&bo_va->vm->status_lock); |
e31ad969 CK |
1149 | list_del(&bo_va->vm_status); |
1150 | list_add(&bo_va->vm_status, &bo_va->vm->invalidated); | |
f7a3db75 | 1151 | spin_unlock(&bo_va->vm->status_lock); |
e31ad969 | 1152 | } |
2280ab57 CK |
1153 | } |
1154 | } | |
1155 | ||
1156 | /** | |
1157 | * radeon_vm_init - initialize a vm instance | |
1158 | * | |
1159 | * @rdev: radeon_device pointer | |
1160 | * @vm: requested vm | |
1161 | * | |
1162 | * Init @vm fields (cayman+). | |
1163 | */ | |
6d2f2944 | 1164 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
2280ab57 | 1165 | { |
1c89d27f CK |
1166 | const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE, |
1167 | RADEON_VM_PTE_COUNT * 8); | |
6d2f2944 | 1168 | unsigned pd_size, pd_entries, pts_size; |
7c42bc1a | 1169 | int i, r; |
6d2f2944 | 1170 | |
cc9e67e3 | 1171 | vm->ib_bo_va = NULL; |
7c42bc1a CK |
1172 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
1173 | vm->ids[i].id = 0; | |
1174 | vm->ids[i].flushed_updates = NULL; | |
1175 | vm->ids[i].last_id_use = NULL; | |
1176 | } | |
2280ab57 | 1177 | mutex_init(&vm->mutex); |
0aea5e4a | 1178 | vm->va = RB_ROOT; |
f7a3db75 | 1179 | spin_lock_init(&vm->status_lock); |
e31ad969 | 1180 | INIT_LIST_HEAD(&vm->invalidated); |
036bf46a | 1181 | INIT_LIST_HEAD(&vm->freed); |
6d2f2944 CK |
1182 | |
1183 | pd_size = radeon_vm_directory_size(rdev); | |
1184 | pd_entries = radeon_vm_num_pdes(rdev); | |
1185 | ||
1186 | /* allocate page table array */ | |
1187 | pts_size = pd_entries * sizeof(struct radeon_vm_pt); | |
1188 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | |
1189 | if (vm->page_tables == NULL) { | |
1190 | DRM_ERROR("Cannot allocate memory for page table array\n"); | |
1191 | return -ENOMEM; | |
1192 | } | |
1193 | ||
7dae77f8 | 1194 | r = radeon_bo_create(rdev, pd_size, align, true, |
02376d82 | 1195 | RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
831b6966 | 1196 | NULL, &vm->page_directory); |
6d2f2944 CK |
1197 | if (r) |
1198 | return r; | |
1199 | ||
1200 | r = radeon_vm_clear_bo(rdev, vm->page_directory); | |
1201 | if (r) { | |
1202 | radeon_bo_unref(&vm->page_directory); | |
1203 | vm->page_directory = NULL; | |
1204 | return r; | |
1205 | } | |
1206 | ||
1207 | return 0; | |
2280ab57 CK |
1208 | } |
1209 | ||
1210 | /** | |
1211 | * radeon_vm_fini - tear down a vm instance | |
1212 | * | |
1213 | * @rdev: radeon_device pointer | |
1214 | * @vm: requested vm | |
1215 | * | |
1216 | * Tear down @vm (cayman+). | |
1217 | * Unbind the VM and remove all bos from the vm bo list | |
1218 | */ | |
1219 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |
1220 | { | |
1221 | struct radeon_bo_va *bo_va, *tmp; | |
6d2f2944 | 1222 | int i, r; |
2280ab57 | 1223 | |
0aea5e4a | 1224 | if (!RB_EMPTY_ROOT(&vm->va)) { |
2280ab57 CK |
1225 | dev_err(rdev->dev, "still active bo inside vm\n"); |
1226 | } | |
0aea5e4a AD |
1227 | rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) { |
1228 | interval_tree_remove(&bo_va->it, &vm->va); | |
2280ab57 CK |
1229 | r = radeon_bo_reserve(bo_va->bo, false); |
1230 | if (!r) { | |
1231 | list_del_init(&bo_va->bo_list); | |
1232 | radeon_bo_unreserve(bo_va->bo); | |
94214635 | 1233 | radeon_fence_unref(&bo_va->last_pt_update); |
2280ab57 CK |
1234 | kfree(bo_va); |
1235 | } | |
1236 | } | |
ee26d83f CK |
1237 | list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { |
1238 | radeon_bo_unref(&bo_va->bo); | |
94214635 | 1239 | radeon_fence_unref(&bo_va->last_pt_update); |
036bf46a | 1240 | kfree(bo_va); |
ee26d83f | 1241 | } |
6d2f2944 CK |
1242 | |
1243 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) | |
1244 | radeon_bo_unref(&vm->page_tables[i].bo); | |
1245 | kfree(vm->page_tables); | |
1246 | ||
1247 | radeon_bo_unref(&vm->page_directory); | |
1248 | ||
7c42bc1a CK |
1249 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
1250 | radeon_fence_unref(&vm->ids[i].flushed_updates); | |
1251 | radeon_fence_unref(&vm->ids[i].last_id_use); | |
1252 | } | |
6d2f2944 CK |
1253 | |
1254 | mutex_destroy(&vm->mutex); | |
2280ab57 | 1255 | } |