Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_trace.h" | |
32 | ||
33 | /* | |
34 | * GPUVM | |
35 | * GPUVM is similar to the legacy gart on older asics, however | |
36 | * rather than there being a single global gart table | |
37 | * for the entire GPU, there are multiple VM page tables active | |
38 | * at any given time. The VM page tables can contain a mix | |
39 | * vram pages and system memory pages and system memory pages | |
40 | * can be mapped as snooped (cached system pages) or unsnooped | |
41 | * (uncached system pages). | |
42 | * Each VM has an ID associated with it and there is a page table | |
43 | * associated with each VMID. When execting a command buffer, | |
44 | * the kernel tells the the ring what VMID to use for that command | |
45 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
46 | * The userspace drivers maintain their own address space and the kernel | |
47 | * sets up their pages tables accordingly when they submit their | |
48 | * command buffers and a VMID is assigned. | |
49 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
50 | * SI supports 16. | |
51 | */ | |
52 | ||
53 | /** | |
54 | * amdgpu_vm_num_pde - return the number of page directory entries | |
55 | * | |
56 | * @adev: amdgpu_device pointer | |
57 | * | |
58 | * Calculate the number of page directory entries (cayman+). | |
59 | */ | |
60 | static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) | |
61 | { | |
62 | return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; | |
63 | } | |
64 | ||
65 | /** | |
66 | * amdgpu_vm_directory_size - returns the size of the page directory in bytes | |
67 | * | |
68 | * @adev: amdgpu_device pointer | |
69 | * | |
70 | * Calculate the size of the page directory in bytes (cayman+). | |
71 | */ | |
72 | static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) | |
73 | { | |
74 | return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); | |
75 | } | |
76 | ||
77 | /** | |
56467ebf | 78 | * amdgpu_vm_get_pd_bo - add the VM PD to a validation list |
d38ceaf9 AD |
79 | * |
80 | * @vm: vm providing the BOs | |
3c0eea6c | 81 | * @validated: head of validation list |
56467ebf CK |
82 | * @entry: entry to add |
83 | * | |
84 | * Add the page directory to the list of BOs to | |
85 | * validate for command submission. | |
86 | */ | |
87 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | |
88 | struct list_head *validated, | |
89 | struct amdgpu_bo_list_entry *entry) | |
90 | { | |
91 | entry->robj = vm->page_directory; | |
92 | entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
93 | entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
94 | entry->priority = 0; | |
95 | entry->tv.bo = &vm->page_directory->tbo; | |
96 | entry->tv.shared = true; | |
97 | list_add(&entry->tv.head, validated); | |
98 | } | |
99 | ||
100 | /** | |
ee1782c3 | 101 | * amdgpu_vm_get_bos - add the vm BOs to a duplicates list |
56467ebf CK |
102 | * |
103 | * @vm: vm providing the BOs | |
3c0eea6c | 104 | * @duplicates: head of duplicates list |
d38ceaf9 | 105 | * |
ee1782c3 CK |
106 | * Add the page directory to the BO duplicates list |
107 | * for command submission. | |
d38ceaf9 | 108 | */ |
ee1782c3 | 109 | void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) |
d38ceaf9 | 110 | { |
ee1782c3 | 111 | unsigned i; |
d38ceaf9 AD |
112 | |
113 | /* add the vm page table to the list */ | |
ee1782c3 CK |
114 | for (i = 0; i <= vm->max_pde_used; ++i) { |
115 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | |
116 | ||
117 | if (!entry->robj) | |
d38ceaf9 AD |
118 | continue; |
119 | ||
ee1782c3 | 120 | list_add(&entry->tv.head, duplicates); |
d38ceaf9 | 121 | } |
d38ceaf9 AD |
122 | } |
123 | ||
124 | /** | |
125 | * amdgpu_vm_grab_id - allocate the next free VMID | |
126 | * | |
d38ceaf9 | 127 | * @vm: vm to allocate id for |
7f8a5290 CK |
128 | * @ring: ring we want to submit job to |
129 | * @sync: sync object where we add dependencies | |
d38ceaf9 | 130 | * |
7f8a5290 | 131 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
d38ceaf9 | 132 | * |
7f8a5290 | 133 | * Global mutex must be locked! |
d38ceaf9 | 134 | */ |
7f8a5290 CK |
135 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
136 | struct amdgpu_sync *sync) | |
d38ceaf9 | 137 | { |
d5283298 | 138 | struct fence *best[AMDGPU_MAX_RINGS] = {}; |
d38ceaf9 AD |
139 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; |
140 | struct amdgpu_device *adev = ring->adev; | |
141 | ||
142 | unsigned choices[2] = {}; | |
143 | unsigned i; | |
144 | ||
145 | /* check if the id is still valid */ | |
1c16c0a7 CK |
146 | if (vm_id->id) { |
147 | unsigned id = vm_id->id; | |
148 | long owner; | |
149 | ||
150 | owner = atomic_long_read(&adev->vm_manager.ids[id].owner); | |
151 | if (owner == (long)vm) { | |
152 | trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); | |
153 | return 0; | |
154 | } | |
39ff8449 | 155 | } |
d38ceaf9 AD |
156 | |
157 | /* we definately need to flush */ | |
158 | vm_id->pd_gpu_addr = ~0ll; | |
159 | ||
160 | /* skip over VMID 0, since it is the system VM */ | |
161 | for (i = 1; i < adev->vm_manager.nvm; ++i) { | |
1c16c0a7 | 162 | struct fence *fence = adev->vm_manager.ids[i].active; |
d5283298 | 163 | struct amdgpu_ring *fring; |
d38ceaf9 AD |
164 | |
165 | if (fence == NULL) { | |
166 | /* found a free one */ | |
167 | vm_id->id = i; | |
168 | trace_amdgpu_vm_grab_id(i, ring->idx); | |
7f8a5290 | 169 | return 0; |
d38ceaf9 AD |
170 | } |
171 | ||
d5283298 CK |
172 | fring = amdgpu_ring_from_fence(fence); |
173 | if (best[fring->idx] == NULL || | |
174 | fence_is_later(best[fring->idx], fence)) { | |
175 | best[fring->idx] = fence; | |
176 | choices[fring == ring ? 0 : 1] = i; | |
d38ceaf9 AD |
177 | } |
178 | } | |
179 | ||
180 | for (i = 0; i < 2; ++i) { | |
181 | if (choices[i]) { | |
d5283298 | 182 | struct fence *fence; |
7f8a5290 | 183 | |
1c16c0a7 | 184 | fence = adev->vm_manager.ids[choices[i]].active; |
d38ceaf9 | 185 | vm_id->id = choices[i]; |
7f8a5290 | 186 | |
d38ceaf9 | 187 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); |
d5283298 | 188 | return amdgpu_sync_fence(ring->adev, sync, fence); |
d38ceaf9 AD |
189 | } |
190 | } | |
191 | ||
192 | /* should never happen */ | |
193 | BUG(); | |
7f8a5290 | 194 | return -EINVAL; |
d38ceaf9 AD |
195 | } |
196 | ||
197 | /** | |
198 | * amdgpu_vm_flush - hardware flush the vm | |
199 | * | |
200 | * @ring: ring to use for flush | |
201 | * @vm: vm we want to flush | |
202 | * @updates: last vm update that we waited for | |
203 | * | |
204 | * Flush the vm (cayman+). | |
205 | * | |
206 | * Global and local mutex must be locked! | |
207 | */ | |
208 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | |
209 | struct amdgpu_vm *vm, | |
3c62338c | 210 | struct fence *updates) |
d38ceaf9 AD |
211 | { |
212 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | |
213 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | |
3c62338c | 214 | struct fence *flushed_updates = vm_id->flushed_updates; |
b56c2285 | 215 | bool is_later; |
3c62338c | 216 | |
b56c2285 CK |
217 | if (!flushed_updates) |
218 | is_later = true; | |
219 | else if (!updates) | |
220 | is_later = false; | |
221 | else | |
222 | is_later = fence_is_later(updates, flushed_updates); | |
d38ceaf9 | 223 | |
b56c2285 | 224 | if (pd_addr != vm_id->pd_gpu_addr || is_later) { |
d38ceaf9 | 225 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); |
b56c2285 | 226 | if (is_later) { |
3c62338c CZ |
227 | vm_id->flushed_updates = fence_get(updates); |
228 | fence_put(flushed_updates); | |
229 | } | |
d38ceaf9 AD |
230 | vm_id->pd_gpu_addr = pd_addr; |
231 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); | |
232 | } | |
233 | } | |
234 | ||
235 | /** | |
236 | * amdgpu_vm_fence - remember fence for vm | |
237 | * | |
238 | * @adev: amdgpu_device pointer | |
239 | * @vm: vm we want to fence | |
240 | * @fence: fence to remember | |
241 | * | |
242 | * Fence the vm (cayman+). | |
243 | * Set the fence used to protect page table and id. | |
244 | * | |
245 | * Global and local mutex must be locked! | |
246 | */ | |
247 | void amdgpu_vm_fence(struct amdgpu_device *adev, | |
248 | struct amdgpu_vm *vm, | |
16ae42fe | 249 | struct fence *fence) |
d38ceaf9 | 250 | { |
16ae42fe CK |
251 | struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); |
252 | unsigned vm_id = vm->ids[ring->idx].id; | |
d38ceaf9 | 253 | |
1c16c0a7 CK |
254 | fence_put(adev->vm_manager.ids[vm_id].active); |
255 | adev->vm_manager.ids[vm_id].active = fence_get(fence); | |
256 | atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); | |
d38ceaf9 AD |
257 | } |
258 | ||
259 | /** | |
260 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | |
261 | * | |
262 | * @vm: requested vm | |
263 | * @bo: requested buffer object | |
264 | * | |
265 | * Find @bo inside the requested vm (cayman+). | |
266 | * Search inside the @bos vm list for the requested vm | |
267 | * Returns the found bo_va or NULL if none is found | |
268 | * | |
269 | * Object has to be reserved! | |
270 | */ | |
271 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |
272 | struct amdgpu_bo *bo) | |
273 | { | |
274 | struct amdgpu_bo_va *bo_va; | |
275 | ||
276 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
277 | if (bo_va->vm == vm) { | |
278 | return bo_va; | |
279 | } | |
280 | } | |
281 | return NULL; | |
282 | } | |
283 | ||
284 | /** | |
285 | * amdgpu_vm_update_pages - helper to call the right asic function | |
286 | * | |
287 | * @adev: amdgpu_device pointer | |
288 | * @ib: indirect buffer to fill with commands | |
289 | * @pe: addr of the page entry | |
290 | * @addr: dst addr to write into pe | |
291 | * @count: number of page entries to update | |
292 | * @incr: increase next addr by incr bytes | |
293 | * @flags: hw access flags | |
294 | * @gtt_flags: GTT hw access flags | |
295 | * | |
296 | * Traces the parameters and calls the right asic functions | |
297 | * to setup the page table using the DMA. | |
298 | */ | |
299 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | |
300 | struct amdgpu_ib *ib, | |
301 | uint64_t pe, uint64_t addr, | |
302 | unsigned count, uint32_t incr, | |
303 | uint32_t flags, uint32_t gtt_flags) | |
304 | { | |
305 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | |
306 | ||
307 | if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { | |
308 | uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; | |
309 | amdgpu_vm_copy_pte(adev, ib, pe, src, count); | |
310 | ||
311 | } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { | |
312 | amdgpu_vm_write_pte(adev, ib, pe, addr, | |
313 | count, incr, flags); | |
314 | ||
315 | } else { | |
316 | amdgpu_vm_set_pte_pde(adev, ib, pe, addr, | |
317 | count, incr, flags); | |
318 | } | |
319 | } | |
320 | ||
4c7eb91c | 321 | int amdgpu_vm_free_job(struct amdgpu_job *job) |
d5fc5e82 CZ |
322 | { |
323 | int i; | |
4c7eb91c JZ |
324 | for (i = 0; i < job->num_ibs; i++) |
325 | amdgpu_ib_free(job->adev, &job->ibs[i]); | |
326 | kfree(job->ibs); | |
d5fc5e82 CZ |
327 | return 0; |
328 | } | |
329 | ||
d38ceaf9 AD |
330 | /** |
331 | * amdgpu_vm_clear_bo - initially clear the page dir/table | |
332 | * | |
333 | * @adev: amdgpu_device pointer | |
334 | * @bo: bo to clear | |
ef9f0a83 CZ |
335 | * |
336 | * need to reserve bo first before calling it. | |
d38ceaf9 AD |
337 | */ |
338 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |
339 | struct amdgpu_bo *bo) | |
340 | { | |
341 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | |
4af9f07c | 342 | struct fence *fence = NULL; |
d5fc5e82 | 343 | struct amdgpu_ib *ib; |
d38ceaf9 AD |
344 | unsigned entries; |
345 | uint64_t addr; | |
346 | int r; | |
347 | ||
ca952613 | 348 | r = reservation_object_reserve_shared(bo->tbo.resv); |
349 | if (r) | |
350 | return r; | |
351 | ||
d38ceaf9 AD |
352 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
353 | if (r) | |
ef9f0a83 | 354 | goto error; |
d38ceaf9 AD |
355 | |
356 | addr = amdgpu_bo_gpu_offset(bo); | |
357 | entries = amdgpu_bo_size(bo) / 8; | |
358 | ||
d5fc5e82 CZ |
359 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
360 | if (!ib) | |
ef9f0a83 | 361 | goto error; |
d38ceaf9 | 362 | |
d5fc5e82 | 363 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); |
d38ceaf9 AD |
364 | if (r) |
365 | goto error_free; | |
366 | ||
d5fc5e82 CZ |
367 | ib->length_dw = 0; |
368 | ||
369 | amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); | |
370 | amdgpu_vm_pad_ib(adev, ib); | |
371 | WARN_ON(ib->length_dw > 64); | |
4af9f07c CZ |
372 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, |
373 | &amdgpu_vm_free_job, | |
374 | AMDGPU_FENCE_OWNER_VM, | |
375 | &fence); | |
376 | if (!r) | |
377 | amdgpu_bo_fence(bo, fence, true); | |
281b4223 | 378 | fence_put(fence); |
ef9f0a83 | 379 | if (amdgpu_enable_scheduler) |
d5fc5e82 | 380 | return 0; |
ef9f0a83 | 381 | |
d38ceaf9 | 382 | error_free: |
d5fc5e82 CZ |
383 | amdgpu_ib_free(adev, ib); |
384 | kfree(ib); | |
d38ceaf9 | 385 | |
ef9f0a83 | 386 | error: |
d38ceaf9 AD |
387 | return r; |
388 | } | |
389 | ||
390 | /** | |
391 | * amdgpu_vm_map_gart - get the physical address of a gart page | |
392 | * | |
393 | * @adev: amdgpu_device pointer | |
394 | * @addr: the unmapped addr | |
395 | * | |
396 | * Look up the physical address of the page that the pte resolves | |
397 | * to (cayman+). | |
398 | * Returns the physical address of the page. | |
399 | */ | |
400 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) | |
401 | { | |
402 | uint64_t result; | |
403 | ||
404 | /* page table offset */ | |
405 | result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; | |
406 | ||
407 | /* in case cpu page size != gpu page size*/ | |
408 | result |= addr & (~PAGE_MASK); | |
409 | ||
410 | return result; | |
411 | } | |
412 | ||
413 | /** | |
414 | * amdgpu_vm_update_pdes - make sure that page directory is valid | |
415 | * | |
416 | * @adev: amdgpu_device pointer | |
417 | * @vm: requested vm | |
418 | * @start: start of GPU address range | |
419 | * @end: end of GPU address range | |
420 | * | |
421 | * Allocates new page tables if necessary | |
422 | * and updates the page directory (cayman+). | |
423 | * Returns 0 for success, error for failure. | |
424 | * | |
425 | * Global and local mutex must be locked! | |
426 | */ | |
427 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |
428 | struct amdgpu_vm *vm) | |
429 | { | |
430 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | |
431 | struct amdgpu_bo *pd = vm->page_directory; | |
432 | uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); | |
433 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | |
434 | uint64_t last_pde = ~0, last_pt = ~0; | |
435 | unsigned count = 0, pt_idx, ndw; | |
d5fc5e82 | 436 | struct amdgpu_ib *ib; |
4af9f07c | 437 | struct fence *fence = NULL; |
d5fc5e82 | 438 | |
d38ceaf9 AD |
439 | int r; |
440 | ||
441 | /* padding, etc. */ | |
442 | ndw = 64; | |
443 | ||
444 | /* assume the worst case */ | |
445 | ndw += vm->max_pde_used * 6; | |
446 | ||
447 | /* update too big for an IB */ | |
448 | if (ndw > 0xfffff) | |
449 | return -ENOMEM; | |
450 | ||
d5fc5e82 CZ |
451 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
452 | if (!ib) | |
453 | return -ENOMEM; | |
454 | ||
455 | r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); | |
7a574557 SM |
456 | if (r) { |
457 | kfree(ib); | |
d38ceaf9 | 458 | return r; |
7a574557 | 459 | } |
d5fc5e82 | 460 | ib->length_dw = 0; |
d38ceaf9 AD |
461 | |
462 | /* walk over the address space and update the page directory */ | |
463 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | |
ee1782c3 | 464 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; |
d38ceaf9 AD |
465 | uint64_t pde, pt; |
466 | ||
467 | if (bo == NULL) | |
468 | continue; | |
469 | ||
470 | pt = amdgpu_bo_gpu_offset(bo); | |
471 | if (vm->page_tables[pt_idx].addr == pt) | |
472 | continue; | |
473 | vm->page_tables[pt_idx].addr = pt; | |
474 | ||
475 | pde = pd_addr + pt_idx * 8; | |
476 | if (((last_pde + 8 * count) != pde) || | |
477 | ((last_pt + incr * count) != pt)) { | |
478 | ||
479 | if (count) { | |
d5fc5e82 | 480 | amdgpu_vm_update_pages(adev, ib, last_pde, |
d38ceaf9 AD |
481 | last_pt, count, incr, |
482 | AMDGPU_PTE_VALID, 0); | |
483 | } | |
484 | ||
485 | count = 1; | |
486 | last_pde = pde; | |
487 | last_pt = pt; | |
488 | } else { | |
489 | ++count; | |
490 | } | |
491 | } | |
492 | ||
493 | if (count) | |
d5fc5e82 | 494 | amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, |
d38ceaf9 AD |
495 | incr, AMDGPU_PTE_VALID, 0); |
496 | ||
d5fc5e82 CZ |
497 | if (ib->length_dw != 0) { |
498 | amdgpu_vm_pad_ib(adev, ib); | |
499 | amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); | |
500 | WARN_ON(ib->length_dw > ndw); | |
4af9f07c CZ |
501 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, |
502 | &amdgpu_vm_free_job, | |
503 | AMDGPU_FENCE_OWNER_VM, | |
504 | &fence); | |
505 | if (r) | |
506 | goto error_free; | |
05906dec | 507 | |
4af9f07c | 508 | amdgpu_bo_fence(pd, fence, true); |
05906dec BN |
509 | fence_put(vm->page_directory_fence); |
510 | vm->page_directory_fence = fence_get(fence); | |
281b4223 | 511 | fence_put(fence); |
d38ceaf9 | 512 | } |
d5fc5e82 CZ |
513 | |
514 | if (!amdgpu_enable_scheduler || ib->length_dw == 0) { | |
515 | amdgpu_ib_free(adev, ib); | |
516 | kfree(ib); | |
517 | } | |
d38ceaf9 AD |
518 | |
519 | return 0; | |
d5fc5e82 CZ |
520 | |
521 | error_free: | |
d5fc5e82 CZ |
522 | amdgpu_ib_free(adev, ib); |
523 | kfree(ib); | |
4af9f07c | 524 | return r; |
d38ceaf9 AD |
525 | } |
526 | ||
527 | /** | |
528 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | |
529 | * | |
530 | * @adev: amdgpu_device pointer | |
531 | * @ib: IB for the update | |
532 | * @pe_start: first PTE to handle | |
533 | * @pe_end: last PTE to handle | |
534 | * @addr: addr those PTEs should point to | |
535 | * @flags: hw mapping flags | |
536 | * @gtt_flags: GTT hw mapping flags | |
537 | * | |
538 | * Global and local mutex must be locked! | |
539 | */ | |
540 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |
541 | struct amdgpu_ib *ib, | |
542 | uint64_t pe_start, uint64_t pe_end, | |
543 | uint64_t addr, uint32_t flags, | |
544 | uint32_t gtt_flags) | |
545 | { | |
546 | /** | |
547 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
548 | * field in the PTE. When this field is set to a non-zero value, page | |
549 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
550 | * flags are considered valid for all PTEs within the fragment range | |
551 | * and corresponding mappings are assumed to be physically contiguous. | |
552 | * | |
553 | * The L1 TLB can store a single PTE for the whole fragment, | |
554 | * significantly increasing the space available for translation | |
555 | * caching. This leads to large improvements in throughput when the | |
556 | * TLB is under pressure. | |
557 | * | |
558 | * The L2 TLB distributes small and large fragments into two | |
559 | * asymmetric partitions. The large fragment cache is significantly | |
560 | * larger. Thus, we try to use large fragments wherever possible. | |
561 | * Userspace can support this by aligning virtual base address and | |
562 | * allocation size to the fragment size. | |
563 | */ | |
564 | ||
565 | /* SI and newer are optimized for 64KB */ | |
566 | uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; | |
567 | uint64_t frag_align = 0x80; | |
568 | ||
569 | uint64_t frag_start = ALIGN(pe_start, frag_align); | |
570 | uint64_t frag_end = pe_end & ~(frag_align - 1); | |
571 | ||
572 | unsigned count; | |
573 | ||
574 | /* system pages are non continuously */ | |
575 | if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) || | |
576 | (frag_start >= frag_end)) { | |
577 | ||
578 | count = (pe_end - pe_start) / 8; | |
579 | amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, | |
580 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | |
581 | return; | |
582 | } | |
583 | ||
584 | /* handle the 4K area at the beginning */ | |
585 | if (pe_start != frag_start) { | |
586 | count = (frag_start - pe_start) / 8; | |
587 | amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, | |
588 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | |
589 | addr += AMDGPU_GPU_PAGE_SIZE * count; | |
590 | } | |
591 | ||
592 | /* handle the area in the middle */ | |
593 | count = (frag_end - frag_start) / 8; | |
594 | amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, | |
595 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags, | |
596 | gtt_flags); | |
597 | ||
598 | /* handle the 4K area at the end */ | |
599 | if (frag_end != pe_end) { | |
600 | addr += AMDGPU_GPU_PAGE_SIZE * count; | |
601 | count = (pe_end - frag_end) / 8; | |
602 | amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, | |
603 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | |
604 | } | |
605 | } | |
606 | ||
607 | /** | |
608 | * amdgpu_vm_update_ptes - make sure that page tables are valid | |
609 | * | |
610 | * @adev: amdgpu_device pointer | |
611 | * @vm: requested vm | |
612 | * @start: start of GPU address range | |
613 | * @end: end of GPU address range | |
614 | * @dst: destination address to map to | |
615 | * @flags: mapping flags | |
616 | * | |
617 | * Update the page tables in the range @start - @end (cayman+). | |
618 | * | |
619 | * Global and local mutex must be locked! | |
620 | */ | |
621 | static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |
622 | struct amdgpu_vm *vm, | |
623 | struct amdgpu_ib *ib, | |
624 | uint64_t start, uint64_t end, | |
625 | uint64_t dst, uint32_t flags, | |
626 | uint32_t gtt_flags) | |
627 | { | |
628 | uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; | |
629 | uint64_t last_pte = ~0, last_dst = ~0; | |
a60c4232 | 630 | void *owner = AMDGPU_FENCE_OWNER_VM; |
d38ceaf9 AD |
631 | unsigned count = 0; |
632 | uint64_t addr; | |
633 | ||
a60c4232 CK |
634 | /* sync to everything on unmapping */ |
635 | if (!(flags & AMDGPU_PTE_VALID)) | |
636 | owner = AMDGPU_FENCE_OWNER_UNDEFINED; | |
637 | ||
d38ceaf9 AD |
638 | /* walk over the address space and update the page tables */ |
639 | for (addr = start; addr < end; ) { | |
640 | uint64_t pt_idx = addr >> amdgpu_vm_block_size; | |
ee1782c3 | 641 | struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; |
d38ceaf9 AD |
642 | unsigned nptes; |
643 | uint64_t pte; | |
644 | int r; | |
645 | ||
a60c4232 | 646 | amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); |
d38ceaf9 AD |
647 | r = reservation_object_reserve_shared(pt->tbo.resv); |
648 | if (r) | |
649 | return r; | |
650 | ||
651 | if ((addr & ~mask) == (end & ~mask)) | |
652 | nptes = end - addr; | |
653 | else | |
654 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | |
655 | ||
656 | pte = amdgpu_bo_gpu_offset(pt); | |
657 | pte += (addr & mask) * 8; | |
658 | ||
659 | if ((last_pte + 8 * count) != pte) { | |
660 | ||
661 | if (count) { | |
662 | amdgpu_vm_frag_ptes(adev, ib, last_pte, | |
663 | last_pte + 8 * count, | |
664 | last_dst, flags, | |
665 | gtt_flags); | |
666 | } | |
667 | ||
668 | count = nptes; | |
669 | last_pte = pte; | |
670 | last_dst = dst; | |
671 | } else { | |
672 | count += nptes; | |
673 | } | |
674 | ||
675 | addr += nptes; | |
676 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | |
677 | } | |
678 | ||
679 | if (count) { | |
680 | amdgpu_vm_frag_ptes(adev, ib, last_pte, | |
681 | last_pte + 8 * count, | |
682 | last_dst, flags, gtt_flags); | |
683 | } | |
684 | ||
685 | return 0; | |
686 | } | |
687 | ||
d38ceaf9 AD |
688 | /** |
689 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | |
690 | * | |
691 | * @adev: amdgpu_device pointer | |
692 | * @vm: requested vm | |
693 | * @mapping: mapped range and flags to use for the update | |
694 | * @addr: addr to set the area to | |
695 | * @gtt_flags: flags as they are used for GTT | |
696 | * @fence: optional resulting fence | |
697 | * | |
698 | * Fill in the page table entries for @mapping. | |
699 | * Returns 0 for success, -EINVAL for failure. | |
700 | * | |
701 | * Object have to be reserved and mutex must be locked! | |
702 | */ | |
703 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |
704 | struct amdgpu_vm *vm, | |
705 | struct amdgpu_bo_va_mapping *mapping, | |
706 | uint64_t addr, uint32_t gtt_flags, | |
bb1e38a4 | 707 | struct fence **fence) |
d38ceaf9 AD |
708 | { |
709 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | |
710 | unsigned nptes, ncmds, ndw; | |
711 | uint32_t flags = gtt_flags; | |
d5fc5e82 | 712 | struct amdgpu_ib *ib; |
4af9f07c | 713 | struct fence *f = NULL; |
d38ceaf9 AD |
714 | int r; |
715 | ||
716 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | |
717 | * but in case of something, we filter the flags in first place | |
718 | */ | |
719 | if (!(mapping->flags & AMDGPU_PTE_READABLE)) | |
720 | flags &= ~AMDGPU_PTE_READABLE; | |
721 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) | |
722 | flags &= ~AMDGPU_PTE_WRITEABLE; | |
723 | ||
724 | trace_amdgpu_vm_bo_update(mapping); | |
725 | ||
726 | nptes = mapping->it.last - mapping->it.start + 1; | |
727 | ||
728 | /* | |
729 | * reserve space for one command every (1 << BLOCK_SIZE) | |
730 | * entries or 2k dwords (whatever is smaller) | |
731 | */ | |
732 | ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; | |
733 | ||
734 | /* padding, etc. */ | |
735 | ndw = 64; | |
736 | ||
737 | if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { | |
738 | /* only copy commands needed */ | |
739 | ndw += ncmds * 7; | |
740 | ||
741 | } else if (flags & AMDGPU_PTE_SYSTEM) { | |
742 | /* header for write data commands */ | |
743 | ndw += ncmds * 4; | |
744 | ||
745 | /* body of write data command */ | |
746 | ndw += nptes * 2; | |
747 | ||
748 | } else { | |
749 | /* set page commands needed */ | |
750 | ndw += ncmds * 10; | |
751 | ||
752 | /* two extra commands for begin/end of fragment */ | |
753 | ndw += 2 * 10; | |
754 | } | |
755 | ||
756 | /* update too big for an IB */ | |
757 | if (ndw > 0xfffff) | |
758 | return -ENOMEM; | |
759 | ||
d5fc5e82 CZ |
760 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
761 | if (!ib) | |
762 | return -ENOMEM; | |
763 | ||
764 | r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); | |
765 | if (r) { | |
766 | kfree(ib); | |
d38ceaf9 | 767 | return r; |
d5fc5e82 CZ |
768 | } |
769 | ||
770 | ib->length_dw = 0; | |
d38ceaf9 | 771 | |
d5fc5e82 | 772 | r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, |
d38ceaf9 AD |
773 | mapping->it.last + 1, addr + mapping->offset, |
774 | flags, gtt_flags); | |
775 | ||
776 | if (r) { | |
d5fc5e82 CZ |
777 | amdgpu_ib_free(adev, ib); |
778 | kfree(ib); | |
d38ceaf9 AD |
779 | return r; |
780 | } | |
781 | ||
d5fc5e82 CZ |
782 | amdgpu_vm_pad_ib(adev, ib); |
783 | WARN_ON(ib->length_dw > ndw); | |
4af9f07c CZ |
784 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, |
785 | &amdgpu_vm_free_job, | |
786 | AMDGPU_FENCE_OWNER_VM, | |
787 | &f); | |
788 | if (r) | |
789 | goto error_free; | |
d38ceaf9 | 790 | |
bf60efd3 | 791 | amdgpu_bo_fence(vm->page_directory, f, true); |
4af9f07c CZ |
792 | if (fence) { |
793 | fence_put(*fence); | |
794 | *fence = fence_get(f); | |
795 | } | |
281b4223 | 796 | fence_put(f); |
4af9f07c | 797 | if (!amdgpu_enable_scheduler) { |
d5fc5e82 CZ |
798 | amdgpu_ib_free(adev, ib); |
799 | kfree(ib); | |
800 | } | |
d38ceaf9 | 801 | return 0; |
d5fc5e82 CZ |
802 | |
803 | error_free: | |
d5fc5e82 CZ |
804 | amdgpu_ib_free(adev, ib); |
805 | kfree(ib); | |
4af9f07c | 806 | return r; |
d38ceaf9 AD |
807 | } |
808 | ||
809 | /** | |
810 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table | |
811 | * | |
812 | * @adev: amdgpu_device pointer | |
813 | * @bo_va: requested BO and VM object | |
814 | * @mem: ttm mem | |
815 | * | |
816 | * Fill in the page table entries for @bo_va. | |
817 | * Returns 0 for success, -EINVAL for failure. | |
818 | * | |
819 | * Object have to be reserved and mutex must be locked! | |
820 | */ | |
821 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |
822 | struct amdgpu_bo_va *bo_va, | |
823 | struct ttm_mem_reg *mem) | |
824 | { | |
825 | struct amdgpu_vm *vm = bo_va->vm; | |
826 | struct amdgpu_bo_va_mapping *mapping; | |
827 | uint32_t flags; | |
828 | uint64_t addr; | |
829 | int r; | |
830 | ||
831 | if (mem) { | |
b7d698d7 | 832 | addr = (u64)mem->start << PAGE_SHIFT; |
d38ceaf9 AD |
833 | if (mem->mem_type != TTM_PL_TT) |
834 | addr += adev->vm_manager.vram_base_offset; | |
835 | } else { | |
836 | addr = 0; | |
837 | } | |
838 | ||
d38ceaf9 AD |
839 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); |
840 | ||
7fc11959 CK |
841 | spin_lock(&vm->status_lock); |
842 | if (!list_empty(&bo_va->vm_status)) | |
843 | list_splice_init(&bo_va->valids, &bo_va->invalids); | |
844 | spin_unlock(&vm->status_lock); | |
845 | ||
846 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
d38ceaf9 AD |
847 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, |
848 | flags, &bo_va->last_pt_update); | |
849 | if (r) | |
850 | return r; | |
851 | } | |
852 | ||
d6c10f6b CK |
853 | if (trace_amdgpu_vm_bo_mapping_enabled()) { |
854 | list_for_each_entry(mapping, &bo_va->valids, list) | |
855 | trace_amdgpu_vm_bo_mapping(mapping); | |
856 | ||
857 | list_for_each_entry(mapping, &bo_va->invalids, list) | |
858 | trace_amdgpu_vm_bo_mapping(mapping); | |
859 | } | |
860 | ||
d38ceaf9 | 861 | spin_lock(&vm->status_lock); |
6d1d0ef7 | 862 | list_splice_init(&bo_va->invalids, &bo_va->valids); |
d38ceaf9 | 863 | list_del_init(&bo_va->vm_status); |
7fc11959 CK |
864 | if (!mem) |
865 | list_add(&bo_va->vm_status, &vm->cleared); | |
d38ceaf9 AD |
866 | spin_unlock(&vm->status_lock); |
867 | ||
868 | return 0; | |
869 | } | |
870 | ||
871 | /** | |
872 | * amdgpu_vm_clear_freed - clear freed BOs in the PT | |
873 | * | |
874 | * @adev: amdgpu_device pointer | |
875 | * @vm: requested vm | |
876 | * | |
877 | * Make sure all freed BOs are cleared in the PT. | |
878 | * Returns 0 for success. | |
879 | * | |
880 | * PTs have to be reserved and mutex must be locked! | |
881 | */ | |
882 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |
883 | struct amdgpu_vm *vm) | |
884 | { | |
885 | struct amdgpu_bo_va_mapping *mapping; | |
886 | int r; | |
887 | ||
9c4153b1 | 888 | spin_lock(&vm->freed_lock); |
d38ceaf9 AD |
889 | while (!list_empty(&vm->freed)) { |
890 | mapping = list_first_entry(&vm->freed, | |
891 | struct amdgpu_bo_va_mapping, list); | |
892 | list_del(&mapping->list); | |
9c4153b1 | 893 | spin_unlock(&vm->freed_lock); |
d38ceaf9 AD |
894 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); |
895 | kfree(mapping); | |
896 | if (r) | |
897 | return r; | |
898 | ||
9c4153b1 | 899 | spin_lock(&vm->freed_lock); |
d38ceaf9 | 900 | } |
9c4153b1 | 901 | spin_unlock(&vm->freed_lock); |
902 | ||
d38ceaf9 AD |
903 | return 0; |
904 | ||
905 | } | |
906 | ||
907 | /** | |
908 | * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT | |
909 | * | |
910 | * @adev: amdgpu_device pointer | |
911 | * @vm: requested vm | |
912 | * | |
913 | * Make sure all invalidated BOs are cleared in the PT. | |
914 | * Returns 0 for success. | |
915 | * | |
916 | * PTs have to be reserved and mutex must be locked! | |
917 | */ | |
918 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |
cfe2c978 | 919 | struct amdgpu_vm *vm, struct amdgpu_sync *sync) |
d38ceaf9 | 920 | { |
cfe2c978 | 921 | struct amdgpu_bo_va *bo_va = NULL; |
91e1a520 | 922 | int r = 0; |
d38ceaf9 AD |
923 | |
924 | spin_lock(&vm->status_lock); | |
925 | while (!list_empty(&vm->invalidated)) { | |
926 | bo_va = list_first_entry(&vm->invalidated, | |
927 | struct amdgpu_bo_va, vm_status); | |
928 | spin_unlock(&vm->status_lock); | |
69b576a1 | 929 | mutex_lock(&bo_va->mutex); |
d38ceaf9 | 930 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); |
69b576a1 | 931 | mutex_unlock(&bo_va->mutex); |
d38ceaf9 AD |
932 | if (r) |
933 | return r; | |
934 | ||
935 | spin_lock(&vm->status_lock); | |
936 | } | |
937 | spin_unlock(&vm->status_lock); | |
938 | ||
cfe2c978 | 939 | if (bo_va) |
bb1e38a4 | 940 | r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); |
91e1a520 CK |
941 | |
942 | return r; | |
d38ceaf9 AD |
943 | } |
944 | ||
945 | /** | |
946 | * amdgpu_vm_bo_add - add a bo to a specific vm | |
947 | * | |
948 | * @adev: amdgpu_device pointer | |
949 | * @vm: requested vm | |
950 | * @bo: amdgpu buffer object | |
951 | * | |
952 | * Add @bo into the requested vm (cayman+). | |
953 | * Add @bo to the list of bos associated with the vm | |
954 | * Returns newly added bo_va or NULL for failure | |
955 | * | |
956 | * Object has to be reserved! | |
957 | */ | |
958 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |
959 | struct amdgpu_vm *vm, | |
960 | struct amdgpu_bo *bo) | |
961 | { | |
962 | struct amdgpu_bo_va *bo_va; | |
963 | ||
964 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); | |
965 | if (bo_va == NULL) { | |
966 | return NULL; | |
967 | } | |
968 | bo_va->vm = vm; | |
969 | bo_va->bo = bo; | |
d38ceaf9 AD |
970 | bo_va->ref_count = 1; |
971 | INIT_LIST_HEAD(&bo_va->bo_list); | |
7fc11959 CK |
972 | INIT_LIST_HEAD(&bo_va->valids); |
973 | INIT_LIST_HEAD(&bo_va->invalids); | |
d38ceaf9 | 974 | INIT_LIST_HEAD(&bo_va->vm_status); |
69b576a1 | 975 | mutex_init(&bo_va->mutex); |
d38ceaf9 | 976 | list_add_tail(&bo_va->bo_list, &bo->va); |
d38ceaf9 AD |
977 | |
978 | return bo_va; | |
979 | } | |
980 | ||
981 | /** | |
982 | * amdgpu_vm_bo_map - map bo inside a vm | |
983 | * | |
984 | * @adev: amdgpu_device pointer | |
985 | * @bo_va: bo_va to store the address | |
986 | * @saddr: where to map the BO | |
987 | * @offset: requested offset in the BO | |
988 | * @flags: attributes of pages (read/write/valid/etc.) | |
989 | * | |
990 | * Add a mapping of the BO at the specefied addr into the VM. | |
991 | * Returns 0 for success, error for failure. | |
992 | * | |
49b02b18 | 993 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
994 | */ |
995 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |
996 | struct amdgpu_bo_va *bo_va, | |
997 | uint64_t saddr, uint64_t offset, | |
998 | uint64_t size, uint32_t flags) | |
999 | { | |
1000 | struct amdgpu_bo_va_mapping *mapping; | |
1001 | struct amdgpu_vm *vm = bo_va->vm; | |
1002 | struct interval_tree_node *it; | |
1003 | unsigned last_pfn, pt_idx; | |
1004 | uint64_t eaddr; | |
1005 | int r; | |
1006 | ||
0be52de9 CK |
1007 | /* validate the parameters */ |
1008 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | |
49b02b18 | 1009 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
0be52de9 | 1010 | return -EINVAL; |
0be52de9 | 1011 | |
d38ceaf9 AD |
1012 | /* make sure object fit at this offset */ |
1013 | eaddr = saddr + size; | |
49b02b18 | 1014 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) |
d38ceaf9 | 1015 | return -EINVAL; |
d38ceaf9 AD |
1016 | |
1017 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | |
1018 | if (last_pfn > adev->vm_manager.max_pfn) { | |
1019 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", | |
1020 | last_pfn, adev->vm_manager.max_pfn); | |
d38ceaf9 AD |
1021 | return -EINVAL; |
1022 | } | |
1023 | ||
d38ceaf9 AD |
1024 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
1025 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
1026 | ||
c25867df | 1027 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1028 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); |
c25867df | 1029 | spin_unlock(&vm->it_lock); |
d38ceaf9 AD |
1030 | if (it) { |
1031 | struct amdgpu_bo_va_mapping *tmp; | |
1032 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | |
1033 | /* bo and tmp overlap, invalid addr */ | |
1034 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | |
1035 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | |
1036 | tmp->it.start, tmp->it.last + 1); | |
d38ceaf9 | 1037 | r = -EINVAL; |
f48b2659 | 1038 | goto error; |
d38ceaf9 AD |
1039 | } |
1040 | ||
1041 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | |
1042 | if (!mapping) { | |
d38ceaf9 | 1043 | r = -ENOMEM; |
f48b2659 | 1044 | goto error; |
d38ceaf9 AD |
1045 | } |
1046 | ||
1047 | INIT_LIST_HEAD(&mapping->list); | |
1048 | mapping->it.start = saddr; | |
1049 | mapping->it.last = eaddr - 1; | |
1050 | mapping->offset = offset; | |
1051 | mapping->flags = flags; | |
1052 | ||
69b576a1 | 1053 | mutex_lock(&bo_va->mutex); |
7fc11959 | 1054 | list_add(&mapping->list, &bo_va->invalids); |
69b576a1 | 1055 | mutex_unlock(&bo_va->mutex); |
c25867df | 1056 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1057 | interval_tree_insert(&mapping->it, &vm->va); |
c25867df | 1058 | spin_unlock(&vm->it_lock); |
93e3e438 | 1059 | trace_amdgpu_vm_bo_map(bo_va, mapping); |
d38ceaf9 AD |
1060 | |
1061 | /* Make sure the page tables are allocated */ | |
1062 | saddr >>= amdgpu_vm_block_size; | |
1063 | eaddr >>= amdgpu_vm_block_size; | |
1064 | ||
1065 | BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); | |
1066 | ||
1067 | if (eaddr > vm->max_pde_used) | |
1068 | vm->max_pde_used = eaddr; | |
1069 | ||
d38ceaf9 AD |
1070 | /* walk over the address space and allocate the page tables */ |
1071 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | |
bf60efd3 | 1072 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
ee1782c3 | 1073 | struct amdgpu_bo_list_entry *entry; |
d38ceaf9 AD |
1074 | struct amdgpu_bo *pt; |
1075 | ||
ee1782c3 CK |
1076 | entry = &vm->page_tables[pt_idx].entry; |
1077 | if (entry->robj) | |
d38ceaf9 AD |
1078 | continue; |
1079 | ||
d38ceaf9 AD |
1080 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1081 | AMDGPU_GPU_PAGE_SIZE, true, | |
857d913d AD |
1082 | AMDGPU_GEM_DOMAIN_VRAM, |
1083 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | |
bf60efd3 | 1084 | NULL, resv, &pt); |
49b02b18 | 1085 | if (r) |
d38ceaf9 | 1086 | goto error_free; |
49b02b18 | 1087 | |
d38ceaf9 AD |
1088 | r = amdgpu_vm_clear_bo(adev, pt); |
1089 | if (r) { | |
1090 | amdgpu_bo_unref(&pt); | |
1091 | goto error_free; | |
1092 | } | |
1093 | ||
ee1782c3 CK |
1094 | entry->robj = pt; |
1095 | entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
1096 | entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
1097 | entry->priority = 0; | |
1098 | entry->tv.bo = &entry->robj->tbo; | |
1099 | entry->tv.shared = true; | |
d38ceaf9 | 1100 | vm->page_tables[pt_idx].addr = 0; |
d38ceaf9 AD |
1101 | } |
1102 | ||
d38ceaf9 AD |
1103 | return 0; |
1104 | ||
1105 | error_free: | |
d38ceaf9 | 1106 | list_del(&mapping->list); |
c25867df | 1107 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1108 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1109 | spin_unlock(&vm->it_lock); |
93e3e438 | 1110 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 AD |
1111 | kfree(mapping); |
1112 | ||
f48b2659 | 1113 | error: |
d38ceaf9 AD |
1114 | return r; |
1115 | } | |
1116 | ||
1117 | /** | |
1118 | * amdgpu_vm_bo_unmap - remove bo mapping from vm | |
1119 | * | |
1120 | * @adev: amdgpu_device pointer | |
1121 | * @bo_va: bo_va to remove the address from | |
1122 | * @saddr: where to the BO is mapped | |
1123 | * | |
1124 | * Remove a mapping of the BO at the specefied addr from the VM. | |
1125 | * Returns 0 for success, error for failure. | |
1126 | * | |
49b02b18 | 1127 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
1128 | */ |
1129 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |
1130 | struct amdgpu_bo_va *bo_va, | |
1131 | uint64_t saddr) | |
1132 | { | |
1133 | struct amdgpu_bo_va_mapping *mapping; | |
1134 | struct amdgpu_vm *vm = bo_va->vm; | |
7fc11959 | 1135 | bool valid = true; |
d38ceaf9 | 1136 | |
6c7fc503 | 1137 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
69b576a1 | 1138 | mutex_lock(&bo_va->mutex); |
7fc11959 | 1139 | list_for_each_entry(mapping, &bo_va->valids, list) { |
d38ceaf9 AD |
1140 | if (mapping->it.start == saddr) |
1141 | break; | |
1142 | } | |
1143 | ||
7fc11959 CK |
1144 | if (&mapping->list == &bo_va->valids) { |
1145 | valid = false; | |
1146 | ||
1147 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
1148 | if (mapping->it.start == saddr) | |
1149 | break; | |
1150 | } | |
1151 | ||
69b576a1 CZ |
1152 | if (&mapping->list == &bo_va->invalids) { |
1153 | mutex_unlock(&bo_va->mutex); | |
7fc11959 | 1154 | return -ENOENT; |
69b576a1 | 1155 | } |
d38ceaf9 | 1156 | } |
69b576a1 | 1157 | mutex_unlock(&bo_va->mutex); |
d38ceaf9 | 1158 | list_del(&mapping->list); |
c25867df | 1159 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1160 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1161 | spin_unlock(&vm->it_lock); |
93e3e438 | 1162 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 | 1163 | |
9c4153b1 | 1164 | if (valid) { |
1165 | spin_lock(&vm->freed_lock); | |
d38ceaf9 | 1166 | list_add(&mapping->list, &vm->freed); |
9c4153b1 | 1167 | spin_unlock(&vm->freed_lock); |
1168 | } else { | |
d38ceaf9 | 1169 | kfree(mapping); |
9c4153b1 | 1170 | } |
d38ceaf9 AD |
1171 | |
1172 | return 0; | |
1173 | } | |
1174 | ||
1175 | /** | |
1176 | * amdgpu_vm_bo_rmv - remove a bo to a specific vm | |
1177 | * | |
1178 | * @adev: amdgpu_device pointer | |
1179 | * @bo_va: requested bo_va | |
1180 | * | |
1181 | * Remove @bo_va->bo from the requested vm (cayman+). | |
1182 | * | |
1183 | * Object have to be reserved! | |
1184 | */ | |
1185 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |
1186 | struct amdgpu_bo_va *bo_va) | |
1187 | { | |
1188 | struct amdgpu_bo_va_mapping *mapping, *next; | |
1189 | struct amdgpu_vm *vm = bo_va->vm; | |
1190 | ||
1191 | list_del(&bo_va->bo_list); | |
1192 | ||
d38ceaf9 AD |
1193 | spin_lock(&vm->status_lock); |
1194 | list_del(&bo_va->vm_status); | |
1195 | spin_unlock(&vm->status_lock); | |
1196 | ||
7fc11959 | 1197 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
d38ceaf9 | 1198 | list_del(&mapping->list); |
c25867df | 1199 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1200 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1201 | spin_unlock(&vm->it_lock); |
93e3e438 | 1202 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
9c4153b1 | 1203 | spin_lock(&vm->freed_lock); |
7fc11959 | 1204 | list_add(&mapping->list, &vm->freed); |
9c4153b1 | 1205 | spin_unlock(&vm->freed_lock); |
7fc11959 CK |
1206 | } |
1207 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | |
1208 | list_del(&mapping->list); | |
c25867df | 1209 | spin_lock(&vm->it_lock); |
7fc11959 | 1210 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1211 | spin_unlock(&vm->it_lock); |
7fc11959 | 1212 | kfree(mapping); |
d38ceaf9 | 1213 | } |
bb1e38a4 | 1214 | fence_put(bo_va->last_pt_update); |
69b576a1 | 1215 | mutex_destroy(&bo_va->mutex); |
d38ceaf9 | 1216 | kfree(bo_va); |
d38ceaf9 AD |
1217 | } |
1218 | ||
1219 | /** | |
1220 | * amdgpu_vm_bo_invalidate - mark the bo as invalid | |
1221 | * | |
1222 | * @adev: amdgpu_device pointer | |
1223 | * @vm: requested vm | |
1224 | * @bo: amdgpu buffer object | |
1225 | * | |
1226 | * Mark @bo as invalid (cayman+). | |
1227 | */ | |
1228 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | |
1229 | struct amdgpu_bo *bo) | |
1230 | { | |
1231 | struct amdgpu_bo_va *bo_va; | |
1232 | ||
1233 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
7fc11959 CK |
1234 | spin_lock(&bo_va->vm->status_lock); |
1235 | if (list_empty(&bo_va->vm_status)) | |
d38ceaf9 | 1236 | list_add(&bo_va->vm_status, &bo_va->vm->invalidated); |
7fc11959 | 1237 | spin_unlock(&bo_va->vm->status_lock); |
d38ceaf9 AD |
1238 | } |
1239 | } | |
1240 | ||
1241 | /** | |
1242 | * amdgpu_vm_init - initialize a vm instance | |
1243 | * | |
1244 | * @adev: amdgpu_device pointer | |
1245 | * @vm: requested vm | |
1246 | * | |
1247 | * Init @vm fields (cayman+). | |
1248 | */ | |
1249 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1250 | { | |
1251 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, | |
1252 | AMDGPU_VM_PTE_COUNT * 8); | |
1253 | unsigned pd_size, pd_entries, pts_size; | |
1254 | int i, r; | |
1255 | ||
1256 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
1257 | vm->ids[i].id = 0; | |
1258 | vm->ids[i].flushed_updates = NULL; | |
d38ceaf9 | 1259 | } |
d38ceaf9 AD |
1260 | vm->va = RB_ROOT; |
1261 | spin_lock_init(&vm->status_lock); | |
1262 | INIT_LIST_HEAD(&vm->invalidated); | |
7fc11959 | 1263 | INIT_LIST_HEAD(&vm->cleared); |
d38ceaf9 | 1264 | INIT_LIST_HEAD(&vm->freed); |
c25867df | 1265 | spin_lock_init(&vm->it_lock); |
9c4153b1 | 1266 | spin_lock_init(&vm->freed_lock); |
d38ceaf9 AD |
1267 | pd_size = amdgpu_vm_directory_size(adev); |
1268 | pd_entries = amdgpu_vm_num_pdes(adev); | |
1269 | ||
1270 | /* allocate page table array */ | |
1271 | pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); | |
1272 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | |
1273 | if (vm->page_tables == NULL) { | |
1274 | DRM_ERROR("Cannot allocate memory for page table array\n"); | |
1275 | return -ENOMEM; | |
1276 | } | |
1277 | ||
05906dec BN |
1278 | vm->page_directory_fence = NULL; |
1279 | ||
d38ceaf9 | 1280 | r = amdgpu_bo_create(adev, pd_size, align, true, |
857d913d AD |
1281 | AMDGPU_GEM_DOMAIN_VRAM, |
1282 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | |
72d7668b | 1283 | NULL, NULL, &vm->page_directory); |
d38ceaf9 AD |
1284 | if (r) |
1285 | return r; | |
ef9f0a83 CZ |
1286 | r = amdgpu_bo_reserve(vm->page_directory, false); |
1287 | if (r) { | |
1288 | amdgpu_bo_unref(&vm->page_directory); | |
1289 | vm->page_directory = NULL; | |
1290 | return r; | |
1291 | } | |
d38ceaf9 | 1292 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); |
ef9f0a83 | 1293 | amdgpu_bo_unreserve(vm->page_directory); |
d38ceaf9 AD |
1294 | if (r) { |
1295 | amdgpu_bo_unref(&vm->page_directory); | |
1296 | vm->page_directory = NULL; | |
1297 | return r; | |
1298 | } | |
1299 | ||
1300 | return 0; | |
1301 | } | |
1302 | ||
1303 | /** | |
1304 | * amdgpu_vm_fini - tear down a vm instance | |
1305 | * | |
1306 | * @adev: amdgpu_device pointer | |
1307 | * @vm: requested vm | |
1308 | * | |
1309 | * Tear down @vm (cayman+). | |
1310 | * Unbind the VM and remove all bos from the vm bo list | |
1311 | */ | |
1312 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1313 | { | |
1314 | struct amdgpu_bo_va_mapping *mapping, *tmp; | |
1315 | int i; | |
1316 | ||
1317 | if (!RB_EMPTY_ROOT(&vm->va)) { | |
1318 | dev_err(adev->dev, "still active bo inside vm\n"); | |
1319 | } | |
1320 | rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { | |
1321 | list_del(&mapping->list); | |
1322 | interval_tree_remove(&mapping->it, &vm->va); | |
1323 | kfree(mapping); | |
1324 | } | |
1325 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { | |
1326 | list_del(&mapping->list); | |
1327 | kfree(mapping); | |
1328 | } | |
1329 | ||
1330 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) | |
ee1782c3 | 1331 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); |
d38ceaf9 AD |
1332 | kfree(vm->page_tables); |
1333 | ||
1334 | amdgpu_bo_unref(&vm->page_directory); | |
05906dec | 1335 | fence_put(vm->page_directory_fence); |
d38ceaf9 | 1336 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
1c16c0a7 CK |
1337 | unsigned id = vm->ids[i].id; |
1338 | ||
1339 | atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, | |
1340 | (long)vm, 0); | |
3c62338c | 1341 | fence_put(vm->ids[i].flushed_updates); |
d38ceaf9 AD |
1342 | } |
1343 | ||
d38ceaf9 | 1344 | } |
ea89f8c9 CK |
1345 | |
1346 | /** | |
1347 | * amdgpu_vm_manager_fini - cleanup VM manager | |
1348 | * | |
1349 | * @adev: amdgpu_device pointer | |
1350 | * | |
1351 | * Cleanup the VM manager and free resources. | |
1352 | */ | |
1353 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |
1354 | { | |
1355 | unsigned i; | |
1356 | ||
1357 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | |
1c16c0a7 | 1358 | fence_put(adev->vm_manager.ids[i].active); |
ea89f8c9 | 1359 | } |