Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
248a1d6f MY |
32 | #include <drm/ttm/ttm_bo_api.h> |
33 | #include <drm/ttm/ttm_bo_driver.h> | |
34 | #include <drm/ttm/ttm_placement.h> | |
35 | #include <drm/ttm/ttm_module.h> | |
36 | #include <drm/ttm/ttm_page_alloc.h> | |
d38ceaf9 AD |
37 | #include <drm/drmP.h> |
38 | #include <drm/amdgpu_drm.h> | |
39 | #include <linux/seq_file.h> | |
40 | #include <linux/slab.h> | |
41 | #include <linux/swiotlb.h> | |
42 | #include <linux/swap.h> | |
43 | #include <linux/pagemap.h> | |
44 | #include <linux/debugfs.h> | |
38290b2c | 45 | #include <linux/iommu.h> |
d38ceaf9 | 46 | #include "amdgpu.h" |
aca81718 | 47 | #include "amdgpu_trace.h" |
d38ceaf9 AD |
48 | #include "bif/bif_4_1_d.h" |
49 | ||
50 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
51 | ||
abca90f1 CK |
52 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
53 | struct ttm_mem_reg *mem, unsigned num_pages, | |
54 | uint64_t offset, unsigned window, | |
55 | struct amdgpu_ring *ring, | |
56 | uint64_t *addr); | |
57 | ||
d38ceaf9 AD |
58 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
59 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | |
60 | ||
d38ceaf9 AD |
61 | /* |
62 | * Global memory. | |
63 | */ | |
64 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) | |
65 | { | |
66 | return ttm_mem_global_init(ref->object); | |
67 | } | |
68 | ||
69 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) | |
70 | { | |
71 | ttm_mem_global_release(ref->object); | |
72 | } | |
73 | ||
70b5c5aa | 74 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) |
d38ceaf9 AD |
75 | { |
76 | struct drm_global_reference *global_ref; | |
703297c1 CK |
77 | struct amdgpu_ring *ring; |
78 | struct amd_sched_rq *rq; | |
d38ceaf9 AD |
79 | int r; |
80 | ||
81 | adev->mman.mem_global_referenced = false; | |
82 | global_ref = &adev->mman.mem_global_ref; | |
83 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | |
84 | global_ref->size = sizeof(struct ttm_mem_global); | |
85 | global_ref->init = &amdgpu_ttm_mem_global_init; | |
86 | global_ref->release = &amdgpu_ttm_mem_global_release; | |
87 | r = drm_global_item_ref(global_ref); | |
e9d035ec | 88 | if (r) { |
d38ceaf9 AD |
89 | DRM_ERROR("Failed setting up TTM memory accounting " |
90 | "subsystem.\n"); | |
e9d035ec | 91 | goto error_mem; |
d38ceaf9 AD |
92 | } |
93 | ||
94 | adev->mman.bo_global_ref.mem_glob = | |
95 | adev->mman.mem_global_ref.object; | |
96 | global_ref = &adev->mman.bo_global_ref.ref; | |
97 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | |
98 | global_ref->size = sizeof(struct ttm_bo_global); | |
99 | global_ref->init = &ttm_bo_global_init; | |
100 | global_ref->release = &ttm_bo_global_release; | |
101 | r = drm_global_item_ref(global_ref); | |
e9d035ec | 102 | if (r) { |
d38ceaf9 | 103 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); |
e9d035ec | 104 | goto error_bo; |
d38ceaf9 AD |
105 | } |
106 | ||
abca90f1 CK |
107 | mutex_init(&adev->mman.gtt_window_lock); |
108 | ||
703297c1 CK |
109 | ring = adev->mman.buffer_funcs_ring; |
110 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; | |
111 | r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, | |
112 | rq, amdgpu_sched_jobs); | |
e9d035ec | 113 | if (r) { |
703297c1 | 114 | DRM_ERROR("Failed setting up TTM BO move run queue.\n"); |
e9d035ec | 115 | goto error_entity; |
703297c1 CK |
116 | } |
117 | ||
d38ceaf9 | 118 | adev->mman.mem_global_referenced = true; |
703297c1 | 119 | |
d38ceaf9 | 120 | return 0; |
e9d035ec HR |
121 | |
122 | error_entity: | |
123 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | |
124 | error_bo: | |
125 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
126 | error_mem: | |
127 | return r; | |
d38ceaf9 AD |
128 | } |
129 | ||
130 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) | |
131 | { | |
132 | if (adev->mman.mem_global_referenced) { | |
703297c1 CK |
133 | amd_sched_entity_fini(adev->mman.entity.sched, |
134 | &adev->mman.entity); | |
abca90f1 | 135 | mutex_destroy(&adev->mman.gtt_window_lock); |
d38ceaf9 AD |
136 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
137 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
138 | adev->mman.mem_global_referenced = false; | |
139 | } | |
140 | } | |
141 | ||
142 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
143 | { | |
144 | return 0; | |
145 | } | |
146 | ||
147 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
148 | struct ttm_mem_type_manager *man) | |
149 | { | |
150 | struct amdgpu_device *adev; | |
151 | ||
a7d64de6 | 152 | adev = amdgpu_ttm_adev(bdev); |
d38ceaf9 AD |
153 | |
154 | switch (type) { | |
155 | case TTM_PL_SYSTEM: | |
156 | /* System memory */ | |
157 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
158 | man->available_caching = TTM_PL_MASK_CACHING; | |
159 | man->default_caching = TTM_PL_FLAG_CACHED; | |
160 | break; | |
161 | case TTM_PL_TT: | |
bb990bb0 | 162 | man->func = &amdgpu_gtt_mgr_func; |
6f02a696 | 163 | man->gpu_offset = adev->mc.gart_start; |
d38ceaf9 AD |
164 | man->available_caching = TTM_PL_MASK_CACHING; |
165 | man->default_caching = TTM_PL_FLAG_CACHED; | |
166 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | |
167 | break; | |
168 | case TTM_PL_VRAM: | |
169 | /* "On-card" video ram */ | |
6a7f76e7 | 170 | man->func = &amdgpu_vram_mgr_func; |
d38ceaf9 AD |
171 | man->gpu_offset = adev->mc.vram_start; |
172 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
173 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
174 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
175 | man->default_caching = TTM_PL_FLAG_WC; | |
176 | break; | |
177 | case AMDGPU_PL_GDS: | |
178 | case AMDGPU_PL_GWS: | |
179 | case AMDGPU_PL_OA: | |
180 | /* On-chip GDS memory*/ | |
181 | man->func = &ttm_bo_manager_func; | |
182 | man->gpu_offset = 0; | |
183 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; | |
184 | man->available_caching = TTM_PL_FLAG_UNCACHED; | |
185 | man->default_caching = TTM_PL_FLAG_UNCACHED; | |
186 | break; | |
187 | default: | |
188 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
189 | return -EINVAL; | |
190 | } | |
191 | return 0; | |
192 | } | |
193 | ||
194 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |
195 | struct ttm_placement *placement) | |
196 | { | |
a7d64de6 | 197 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
765e7fbf | 198 | struct amdgpu_bo *abo; |
1aaa5602 | 199 | static const struct ttm_place placements = { |
d38ceaf9 AD |
200 | .fpfn = 0, |
201 | .lpfn = 0, | |
202 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | |
203 | }; | |
204 | ||
205 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { | |
206 | placement->placement = &placements; | |
207 | placement->busy_placement = &placements; | |
208 | placement->num_placement = 1; | |
209 | placement->num_busy_placement = 1; | |
210 | return; | |
211 | } | |
765e7fbf | 212 | abo = container_of(bo, struct amdgpu_bo, tbo); |
d38ceaf9 AD |
213 | switch (bo->mem.mem_type) { |
214 | case TTM_PL_VRAM: | |
cbcbea98 HR |
215 | if (adev->mman.buffer_funcs && |
216 | adev->mman.buffer_funcs_ring && | |
217 | adev->mman.buffer_funcs_ring->ready == false) { | |
765e7fbf | 218 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
cb2dd1a6 MD |
219 | } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && |
220 | !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { | |
221 | unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | |
222 | struct drm_mm_node *node = bo->mem.mm_node; | |
223 | unsigned long pages_left; | |
224 | ||
225 | for (pages_left = bo->mem.num_pages; | |
226 | pages_left; | |
227 | pages_left -= node->size, node++) { | |
228 | if (node->start < fpfn) | |
229 | break; | |
230 | } | |
231 | ||
232 | if (!pages_left) | |
233 | goto gtt; | |
234 | ||
235 | /* Try evicting to the CPU inaccessible part of VRAM | |
236 | * first, but only set GTT as busy placement, so this | |
237 | * BO will be evicted to GTT rather than causing other | |
238 | * BOs to be evicted from VRAM | |
239 | */ | |
240 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | | |
241 | AMDGPU_GEM_DOMAIN_GTT); | |
242 | abo->placements[0].fpfn = fpfn; | |
243 | abo->placements[0].lpfn = 0; | |
244 | abo->placement.busy_placement = &abo->placements[1]; | |
245 | abo->placement.num_busy_placement = 1; | |
08291c5c | 246 | } else { |
cb2dd1a6 | 247 | gtt: |
765e7fbf | 248 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
08291c5c | 249 | } |
d38ceaf9 AD |
250 | break; |
251 | case TTM_PL_TT: | |
252 | default: | |
765e7fbf | 253 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
d38ceaf9 | 254 | } |
765e7fbf | 255 | *placement = abo->placement; |
d38ceaf9 AD |
256 | } |
257 | ||
258 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
259 | { | |
765e7fbf | 260 | struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); |
d38ceaf9 | 261 | |
054892ed JG |
262 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
263 | return -EPERM; | |
28a39654 | 264 | return drm_vma_node_verify_access(&abo->gem_base.vma_node, |
d9a1f0b4 | 265 | filp->private_data); |
d38ceaf9 AD |
266 | } |
267 | ||
268 | static void amdgpu_move_null(struct ttm_buffer_object *bo, | |
269 | struct ttm_mem_reg *new_mem) | |
270 | { | |
271 | struct ttm_mem_reg *old_mem = &bo->mem; | |
272 | ||
273 | BUG_ON(old_mem->mm_node != NULL); | |
274 | *old_mem = *new_mem; | |
275 | new_mem->mm_node = NULL; | |
276 | } | |
277 | ||
92c60d9c CK |
278 | static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
279 | struct drm_mm_node *mm_node, | |
280 | struct ttm_mem_reg *mem) | |
d38ceaf9 | 281 | { |
abca90f1 | 282 | uint64_t addr = 0; |
c855e250 | 283 | |
abca90f1 CK |
284 | if (mem->mem_type != TTM_PL_TT || |
285 | amdgpu_gtt_mgr_is_allocated(mem)) { | |
286 | addr = mm_node->start << PAGE_SHIFT; | |
287 | addr += bo->bdev->man[mem->mem_type].gpu_offset; | |
288 | } | |
92c60d9c | 289 | return addr; |
8892f153 CK |
290 | } |
291 | ||
292 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |
293 | bool evict, bool no_wait_gpu, | |
294 | struct ttm_mem_reg *new_mem, | |
295 | struct ttm_mem_reg *old_mem) | |
296 | { | |
a7d64de6 | 297 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
8892f153 CK |
298 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
299 | ||
300 | struct drm_mm_node *old_mm, *new_mm; | |
301 | uint64_t old_start, old_size, new_start, new_size; | |
302 | unsigned long num_pages; | |
220196b3 | 303 | struct dma_fence *fence = NULL; |
8892f153 CK |
304 | int r; |
305 | ||
306 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | |
307 | ||
d38ceaf9 AD |
308 | if (!ring->ready) { |
309 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | |
310 | return -EINVAL; | |
311 | } | |
312 | ||
8892f153 | 313 | old_mm = old_mem->mm_node; |
8892f153 | 314 | old_size = old_mm->size; |
92c60d9c | 315 | old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); |
8892f153 | 316 | |
8892f153 | 317 | new_mm = new_mem->mm_node; |
8892f153 | 318 | new_size = new_mm->size; |
92c60d9c | 319 | new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); |
8892f153 CK |
320 | |
321 | num_pages = new_mem->num_pages; | |
abca90f1 | 322 | mutex_lock(&adev->mman.gtt_window_lock); |
8892f153 | 323 | while (num_pages) { |
abca90f1 CK |
324 | unsigned long cur_pages = min(min(old_size, new_size), |
325 | (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE); | |
326 | uint64_t from = old_start, to = new_start; | |
220196b3 | 327 | struct dma_fence *next; |
8892f153 | 328 | |
abca90f1 CK |
329 | if (old_mem->mem_type == TTM_PL_TT && |
330 | !amdgpu_gtt_mgr_is_allocated(old_mem)) { | |
331 | r = amdgpu_map_buffer(bo, old_mem, cur_pages, | |
332 | old_start, 0, ring, &from); | |
333 | if (r) | |
334 | goto error; | |
335 | } | |
336 | ||
337 | if (new_mem->mem_type == TTM_PL_TT && | |
338 | !amdgpu_gtt_mgr_is_allocated(new_mem)) { | |
339 | r = amdgpu_map_buffer(bo, new_mem, cur_pages, | |
340 | new_start, 1, ring, &to); | |
341 | if (r) | |
342 | goto error; | |
343 | } | |
344 | ||
345 | r = amdgpu_copy_buffer(ring, from, to, | |
8892f153 | 346 | cur_pages * PAGE_SIZE, |
abca90f1 | 347 | bo->resv, &next, false, true); |
8892f153 CK |
348 | if (r) |
349 | goto error; | |
350 | ||
220196b3 | 351 | dma_fence_put(fence); |
8892f153 CK |
352 | fence = next; |
353 | ||
354 | num_pages -= cur_pages; | |
355 | if (!num_pages) | |
356 | break; | |
357 | ||
358 | old_size -= cur_pages; | |
359 | if (!old_size) { | |
92c60d9c | 360 | old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem); |
8892f153 CK |
361 | old_size = old_mm->size; |
362 | } else { | |
363 | old_start += cur_pages * PAGE_SIZE; | |
364 | } | |
365 | ||
366 | new_size -= cur_pages; | |
367 | if (!new_size) { | |
92c60d9c | 368 | new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem); |
8892f153 CK |
369 | new_size = new_mm->size; |
370 | } else { | |
371 | new_start += cur_pages * PAGE_SIZE; | |
372 | } | |
373 | } | |
abca90f1 | 374 | mutex_unlock(&adev->mman.gtt_window_lock); |
ce64bc25 CK |
375 | |
376 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); | |
f54d1867 | 377 | dma_fence_put(fence); |
d38ceaf9 | 378 | return r; |
8892f153 CK |
379 | |
380 | error: | |
abca90f1 CK |
381 | mutex_unlock(&adev->mman.gtt_window_lock); |
382 | ||
8892f153 | 383 | if (fence) |
220196b3 DA |
384 | dma_fence_wait(fence, false); |
385 | dma_fence_put(fence); | |
8892f153 | 386 | return r; |
d38ceaf9 AD |
387 | } |
388 | ||
389 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | |
390 | bool evict, bool interruptible, | |
391 | bool no_wait_gpu, | |
392 | struct ttm_mem_reg *new_mem) | |
393 | { | |
394 | struct amdgpu_device *adev; | |
395 | struct ttm_mem_reg *old_mem = &bo->mem; | |
396 | struct ttm_mem_reg tmp_mem; | |
397 | struct ttm_place placements; | |
398 | struct ttm_placement placement; | |
399 | int r; | |
400 | ||
a7d64de6 | 401 | adev = amdgpu_ttm_adev(bo->bdev); |
d38ceaf9 AD |
402 | tmp_mem = *new_mem; |
403 | tmp_mem.mm_node = NULL; | |
404 | placement.num_placement = 1; | |
405 | placement.placement = &placements; | |
406 | placement.num_busy_placement = 1; | |
407 | placement.busy_placement = &placements; | |
408 | placements.fpfn = 0; | |
5e7e8396 | 409 | placements.lpfn = 0; |
d38ceaf9 AD |
410 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
411 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
412 | interruptible, no_wait_gpu); | |
413 | if (unlikely(r)) { | |
414 | return r; | |
415 | } | |
416 | ||
417 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
418 | if (unlikely(r)) { | |
419 | goto out_cleanup; | |
420 | } | |
421 | ||
422 | r = ttm_tt_bind(bo->ttm, &tmp_mem); | |
423 | if (unlikely(r)) { | |
424 | goto out_cleanup; | |
425 | } | |
426 | r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); | |
427 | if (unlikely(r)) { | |
428 | goto out_cleanup; | |
429 | } | |
4e2f0caa | 430 | r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); |
d38ceaf9 AD |
431 | out_cleanup: |
432 | ttm_bo_mem_put(bo, &tmp_mem); | |
433 | return r; | |
434 | } | |
435 | ||
436 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | |
437 | bool evict, bool interruptible, | |
438 | bool no_wait_gpu, | |
439 | struct ttm_mem_reg *new_mem) | |
440 | { | |
441 | struct amdgpu_device *adev; | |
442 | struct ttm_mem_reg *old_mem = &bo->mem; | |
443 | struct ttm_mem_reg tmp_mem; | |
444 | struct ttm_placement placement; | |
445 | struct ttm_place placements; | |
446 | int r; | |
447 | ||
a7d64de6 | 448 | adev = amdgpu_ttm_adev(bo->bdev); |
d38ceaf9 AD |
449 | tmp_mem = *new_mem; |
450 | tmp_mem.mm_node = NULL; | |
451 | placement.num_placement = 1; | |
452 | placement.placement = &placements; | |
453 | placement.num_busy_placement = 1; | |
454 | placement.busy_placement = &placements; | |
455 | placements.fpfn = 0; | |
5e7e8396 | 456 | placements.lpfn = 0; |
d38ceaf9 AD |
457 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
458 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
459 | interruptible, no_wait_gpu); | |
460 | if (unlikely(r)) { | |
461 | return r; | |
462 | } | |
4e2f0caa | 463 | r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); |
d38ceaf9 AD |
464 | if (unlikely(r)) { |
465 | goto out_cleanup; | |
466 | } | |
467 | r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); | |
468 | if (unlikely(r)) { | |
469 | goto out_cleanup; | |
470 | } | |
471 | out_cleanup: | |
472 | ttm_bo_mem_put(bo, &tmp_mem); | |
473 | return r; | |
474 | } | |
475 | ||
476 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, | |
477 | bool evict, bool interruptible, | |
478 | bool no_wait_gpu, | |
479 | struct ttm_mem_reg *new_mem) | |
480 | { | |
481 | struct amdgpu_device *adev; | |
104ece97 | 482 | struct amdgpu_bo *abo; |
d38ceaf9 AD |
483 | struct ttm_mem_reg *old_mem = &bo->mem; |
484 | int r; | |
485 | ||
104ece97 MD |
486 | /* Can't move a pinned BO */ |
487 | abo = container_of(bo, struct amdgpu_bo, tbo); | |
488 | if (WARN_ON_ONCE(abo->pin_count > 0)) | |
489 | return -EINVAL; | |
490 | ||
a7d64de6 | 491 | adev = amdgpu_ttm_adev(bo->bdev); |
dbd5ed60 | 492 | |
d38ceaf9 AD |
493 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
494 | amdgpu_move_null(bo, new_mem); | |
495 | return 0; | |
496 | } | |
497 | if ((old_mem->mem_type == TTM_PL_TT && | |
498 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
499 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
500 | new_mem->mem_type == TTM_PL_TT)) { | |
501 | /* bind is enough */ | |
502 | amdgpu_move_null(bo, new_mem); | |
503 | return 0; | |
504 | } | |
505 | if (adev->mman.buffer_funcs == NULL || | |
506 | adev->mman.buffer_funcs_ring == NULL || | |
507 | !adev->mman.buffer_funcs_ring->ready) { | |
508 | /* use memcpy */ | |
509 | goto memcpy; | |
510 | } | |
511 | ||
512 | if (old_mem->mem_type == TTM_PL_VRAM && | |
513 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
514 | r = amdgpu_move_vram_ram(bo, evict, interruptible, | |
515 | no_wait_gpu, new_mem); | |
516 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | |
517 | new_mem->mem_type == TTM_PL_VRAM) { | |
518 | r = amdgpu_move_ram_vram(bo, evict, interruptible, | |
519 | no_wait_gpu, new_mem); | |
520 | } else { | |
521 | r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); | |
522 | } | |
523 | ||
524 | if (r) { | |
525 | memcpy: | |
4499f2ac | 526 | r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); |
d38ceaf9 AD |
527 | if (r) { |
528 | return r; | |
529 | } | |
530 | } | |
531 | ||
96cf8271 JB |
532 | if (bo->type == ttm_bo_type_device && |
533 | new_mem->mem_type == TTM_PL_VRAM && | |
534 | old_mem->mem_type != TTM_PL_VRAM) { | |
535 | /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU | |
536 | * accesses the BO after it's moved. | |
537 | */ | |
538 | abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | |
539 | } | |
540 | ||
d38ceaf9 AD |
541 | /* update statistics */ |
542 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); | |
543 | return 0; | |
544 | } | |
545 | ||
546 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
547 | { | |
548 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
a7d64de6 | 549 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
d38ceaf9 AD |
550 | |
551 | mem->bus.addr = NULL; | |
552 | mem->bus.offset = 0; | |
553 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
554 | mem->bus.base = 0; | |
555 | mem->bus.is_iomem = false; | |
556 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
557 | return -EINVAL; | |
558 | switch (mem->mem_type) { | |
559 | case TTM_PL_SYSTEM: | |
560 | /* system memory */ | |
561 | return 0; | |
562 | case TTM_PL_TT: | |
563 | break; | |
564 | case TTM_PL_VRAM: | |
565 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
566 | /* check if it's visible */ | |
567 | if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) | |
568 | return -EINVAL; | |
569 | mem->bus.base = adev->mc.aper_base; | |
570 | mem->bus.is_iomem = true; | |
d38ceaf9 AD |
571 | break; |
572 | default: | |
573 | return -EINVAL; | |
574 | } | |
575 | return 0; | |
576 | } | |
577 | ||
578 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
579 | { | |
580 | } | |
581 | ||
9bbdcc0f CK |
582 | static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, |
583 | unsigned long page_offset) | |
584 | { | |
585 | struct drm_mm_node *mm = bo->mem.mm_node; | |
586 | uint64_t size = mm->size; | |
01687781 | 587 | uint64_t offset = page_offset; |
9bbdcc0f CK |
588 | |
589 | page_offset = do_div(offset, size); | |
ecdba5db | 590 | mm += offset; |
9bbdcc0f CK |
591 | return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset; |
592 | } | |
593 | ||
d38ceaf9 AD |
594 | /* |
595 | * TTM backend functions. | |
596 | */ | |
637dd3b5 CK |
597 | struct amdgpu_ttm_gup_task_list { |
598 | struct list_head list; | |
599 | struct task_struct *task; | |
600 | }; | |
601 | ||
d38ceaf9 | 602 | struct amdgpu_ttm_tt { |
637dd3b5 CK |
603 | struct ttm_dma_tt ttm; |
604 | struct amdgpu_device *adev; | |
605 | u64 offset; | |
606 | uint64_t userptr; | |
607 | struct mm_struct *usermm; | |
608 | uint32_t userflags; | |
609 | spinlock_t guptasklock; | |
610 | struct list_head guptasks; | |
2f568dbd | 611 | atomic_t mmu_invalidations; |
ca666a3c | 612 | uint32_t last_set_pages; |
5c1354bd | 613 | struct list_head list; |
d38ceaf9 AD |
614 | }; |
615 | ||
2f568dbd | 616 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
d38ceaf9 | 617 | { |
d38ceaf9 | 618 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
768ae309 | 619 | unsigned int flags = 0; |
2f568dbd CK |
620 | unsigned pinned = 0; |
621 | int r; | |
d38ceaf9 | 622 | |
768ae309 LS |
623 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
624 | flags |= FOLL_WRITE; | |
625 | ||
b72cf4fc CK |
626 | down_read(¤t->mm->mmap_sem); |
627 | ||
d38ceaf9 | 628 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { |
2f568dbd | 629 | /* check that we only use anonymous memory |
d38ceaf9 AD |
630 | to prevent problems with writeback */ |
631 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | |
632 | struct vm_area_struct *vma; | |
633 | ||
634 | vma = find_vma(gtt->usermm, gtt->userptr); | |
b72cf4fc CK |
635 | if (!vma || vma->vm_file || vma->vm_end < end) { |
636 | up_read(¤t->mm->mmap_sem); | |
d38ceaf9 | 637 | return -EPERM; |
b72cf4fc | 638 | } |
d38ceaf9 AD |
639 | } |
640 | ||
641 | do { | |
642 | unsigned num_pages = ttm->num_pages - pinned; | |
643 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | |
2f568dbd | 644 | struct page **p = pages + pinned; |
637dd3b5 CK |
645 | struct amdgpu_ttm_gup_task_list guptask; |
646 | ||
647 | guptask.task = current; | |
648 | spin_lock(>t->guptasklock); | |
649 | list_add(&guptask.list, >t->guptasks); | |
650 | spin_unlock(>t->guptasklock); | |
d38ceaf9 | 651 | |
768ae309 | 652 | r = get_user_pages(userptr, num_pages, flags, p, NULL); |
637dd3b5 CK |
653 | |
654 | spin_lock(>t->guptasklock); | |
655 | list_del(&guptask.list); | |
656 | spin_unlock(>t->guptasklock); | |
d38ceaf9 | 657 | |
d38ceaf9 AD |
658 | if (r < 0) |
659 | goto release_pages; | |
660 | ||
661 | pinned += r; | |
662 | ||
663 | } while (pinned < ttm->num_pages); | |
664 | ||
b72cf4fc | 665 | up_read(¤t->mm->mmap_sem); |
2f568dbd CK |
666 | return 0; |
667 | ||
668 | release_pages: | |
669 | release_pages(pages, pinned, 0); | |
b72cf4fc | 670 | up_read(¤t->mm->mmap_sem); |
2f568dbd CK |
671 | return r; |
672 | } | |
673 | ||
a216ab09 | 674 | void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) |
aca81718 | 675 | { |
aca81718 TSD |
676 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
677 | unsigned i; | |
678 | ||
ca666a3c | 679 | gtt->last_set_pages = atomic_read(>t->mmu_invalidations); |
a216ab09 CK |
680 | for (i = 0; i < ttm->num_pages; ++i) { |
681 | if (ttm->pages[i]) | |
682 | put_page(ttm->pages[i]); | |
683 | ||
684 | ttm->pages[i] = pages ? pages[i] : NULL; | |
aca81718 TSD |
685 | } |
686 | } | |
687 | ||
1b0c0f9d | 688 | void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) |
aca81718 | 689 | { |
aca81718 TSD |
690 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
691 | unsigned i; | |
692 | ||
1b0c0f9d CK |
693 | for (i = 0; i < ttm->num_pages; ++i) { |
694 | struct page *page = ttm->pages[i]; | |
695 | ||
696 | if (!page) | |
697 | continue; | |
698 | ||
699 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) | |
700 | set_page_dirty(page); | |
701 | ||
702 | mark_page_accessed(page); | |
aca81718 TSD |
703 | } |
704 | } | |
705 | ||
2f568dbd CK |
706 | /* prepare the sg table with the user pages */ |
707 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | |
708 | { | |
a7d64de6 | 709 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
2f568dbd CK |
710 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
711 | unsigned nents; | |
712 | int r; | |
713 | ||
714 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
715 | enum dma_data_direction direction = write ? | |
716 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
717 | ||
d38ceaf9 AD |
718 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
719 | ttm->num_pages << PAGE_SHIFT, | |
720 | GFP_KERNEL); | |
721 | if (r) | |
722 | goto release_sg; | |
723 | ||
724 | r = -ENOMEM; | |
725 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
726 | if (nents != ttm->sg->nents) | |
727 | goto release_sg; | |
728 | ||
729 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
730 | gtt->ttm.dma_address, ttm->num_pages); | |
731 | ||
732 | return 0; | |
733 | ||
734 | release_sg: | |
735 | kfree(ttm->sg); | |
d38ceaf9 AD |
736 | return r; |
737 | } | |
738 | ||
739 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |
740 | { | |
a7d64de6 | 741 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 | 742 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
d38ceaf9 AD |
743 | |
744 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
745 | enum dma_data_direction direction = write ? | |
746 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
747 | ||
748 | /* double check that we don't free the table twice */ | |
749 | if (!ttm->sg->sgl) | |
750 | return; | |
751 | ||
752 | /* free the sg table and pages again */ | |
753 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
754 | ||
1b0c0f9d | 755 | amdgpu_ttm_tt_mark_user_pages(ttm); |
aca81718 | 756 | |
d38ceaf9 AD |
757 | sg_free_table(ttm->sg); |
758 | } | |
759 | ||
760 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |
761 | struct ttm_mem_reg *bo_mem) | |
762 | { | |
763 | struct amdgpu_ttm_tt *gtt = (void*)ttm; | |
ac7afe6b | 764 | uint64_t flags; |
2ce3f5dc | 765 | int r = 0; |
d38ceaf9 | 766 | |
e2f784fa CZ |
767 | if (gtt->userptr) { |
768 | r = amdgpu_ttm_tt_pin_userptr(ttm); | |
769 | if (r) { | |
770 | DRM_ERROR("failed to pin userptr\n"); | |
771 | return r; | |
772 | } | |
773 | } | |
d38ceaf9 AD |
774 | if (!ttm->num_pages) { |
775 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
776 | ttm->num_pages, bo_mem, ttm); | |
777 | } | |
778 | ||
779 | if (bo_mem->mem_type == AMDGPU_PL_GDS || | |
780 | bo_mem->mem_type == AMDGPU_PL_GWS || | |
781 | bo_mem->mem_type == AMDGPU_PL_OA) | |
782 | return -EINVAL; | |
783 | ||
ac7afe6b CK |
784 | if (!amdgpu_gtt_mgr_is_allocated(bo_mem)) |
785 | return 0; | |
786 | ||
787 | spin_lock(>t->adev->gtt_list_lock); | |
788 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | |
789 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; | |
790 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, | |
791 | ttm->pages, gtt->ttm.dma_address, flags); | |
792 | ||
793 | if (r) { | |
794 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", | |
795 | ttm->num_pages, gtt->offset); | |
796 | goto error_gart_bind; | |
797 | } | |
98a7f88c | 798 | |
ac7afe6b CK |
799 | list_add_tail(>t->list, >t->adev->gtt_list); |
800 | error_gart_bind: | |
801 | spin_unlock(>t->adev->gtt_list_lock); | |
98a7f88c | 802 | return r; |
c855e250 CK |
803 | } |
804 | ||
805 | bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) | |
806 | { | |
807 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
808 | ||
809 | return gtt && !list_empty(>t->list); | |
810 | } | |
811 | ||
bb990bb0 | 812 | int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) |
c855e250 | 813 | { |
1d00402b | 814 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
bb990bb0 | 815 | struct ttm_tt *ttm = bo->ttm; |
1d00402b | 816 | struct ttm_mem_reg tmp; |
1d00402b CK |
817 | struct ttm_placement placement; |
818 | struct ttm_place placements; | |
c855e250 CK |
819 | int r; |
820 | ||
821 | if (!ttm || amdgpu_ttm_is_bound(ttm)) | |
822 | return 0; | |
823 | ||
1d00402b CK |
824 | tmp = bo->mem; |
825 | tmp.mm_node = NULL; | |
826 | placement.num_placement = 1; | |
827 | placement.placement = &placements; | |
828 | placement.num_busy_placement = 1; | |
829 | placement.busy_placement = &placements; | |
830 | placements.fpfn = 0; | |
831 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; | |
70a9c6b9 | 832 | placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; |
1d00402b CK |
833 | |
834 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); | |
835 | if (unlikely(r)) | |
bb990bb0 | 836 | return r; |
bb990bb0 | 837 | |
1d00402b CK |
838 | r = ttm_bo_move_ttm(bo, true, false, &tmp); |
839 | if (unlikely(r)) | |
840 | ttm_bo_mem_put(bo, &tmp); | |
841 | else | |
842 | bo->offset = (bo->mem.start << PAGE_SHIFT) + | |
843 | bo->bdev->man[bo->mem.mem_type].gpu_offset; | |
844 | ||
845 | return r; | |
d38ceaf9 AD |
846 | } |
847 | ||
2c0d7318 CZ |
848 | int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) |
849 | { | |
850 | struct amdgpu_ttm_tt *gtt, *tmp; | |
851 | struct ttm_mem_reg bo_mem; | |
1d1a2cd5 | 852 | uint64_t flags; |
2c0d7318 CZ |
853 | int r; |
854 | ||
855 | bo_mem.mem_type = TTM_PL_TT; | |
856 | spin_lock(&adev->gtt_list_lock); | |
857 | list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) { | |
858 | flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem); | |
859 | r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, | |
860 | gtt->ttm.ttm.pages, gtt->ttm.dma_address, | |
861 | flags); | |
862 | if (r) { | |
863 | spin_unlock(&adev->gtt_list_lock); | |
71c76a08 CK |
864 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
865 | gtt->ttm.ttm.num_pages, gtt->offset); | |
2c0d7318 CZ |
866 | return r; |
867 | } | |
868 | } | |
869 | spin_unlock(&adev->gtt_list_lock); | |
870 | return 0; | |
871 | } | |
872 | ||
d38ceaf9 AD |
873 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) |
874 | { | |
875 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
738f64cc | 876 | int r; |
d38ceaf9 | 877 | |
85a4b579 CK |
878 | if (gtt->userptr) |
879 | amdgpu_ttm_tt_unpin_userptr(ttm); | |
880 | ||
78ab0a38 CK |
881 | if (!amdgpu_ttm_is_bound(ttm)) |
882 | return 0; | |
883 | ||
d38ceaf9 | 884 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |
5c1354bd | 885 | spin_lock(>t->adev->gtt_list_lock); |
738f64cc RH |
886 | r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); |
887 | if (r) { | |
888 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", | |
889 | gtt->ttm.ttm.num_pages, gtt->offset); | |
890 | goto error_unbind; | |
891 | } | |
5c1354bd | 892 | list_del_init(>t->list); |
738f64cc | 893 | error_unbind: |
5c1354bd | 894 | spin_unlock(>t->adev->gtt_list_lock); |
738f64cc | 895 | return r; |
d38ceaf9 AD |
896 | } |
897 | ||
898 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) | |
899 | { | |
900 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
901 | ||
902 | ttm_dma_tt_fini(>t->ttm); | |
903 | kfree(gtt); | |
904 | } | |
905 | ||
906 | static struct ttm_backend_func amdgpu_backend_func = { | |
907 | .bind = &amdgpu_ttm_backend_bind, | |
908 | .unbind = &amdgpu_ttm_backend_unbind, | |
909 | .destroy = &amdgpu_ttm_backend_destroy, | |
910 | }; | |
911 | ||
912 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | |
913 | unsigned long size, uint32_t page_flags, | |
914 | struct page *dummy_read_page) | |
915 | { | |
916 | struct amdgpu_device *adev; | |
917 | struct amdgpu_ttm_tt *gtt; | |
918 | ||
a7d64de6 | 919 | adev = amdgpu_ttm_adev(bdev); |
d38ceaf9 AD |
920 | |
921 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | |
922 | if (gtt == NULL) { | |
923 | return NULL; | |
924 | } | |
925 | gtt->ttm.ttm.func = &amdgpu_backend_func; | |
926 | gtt->adev = adev; | |
927 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { | |
928 | kfree(gtt); | |
929 | return NULL; | |
930 | } | |
5c1354bd | 931 | INIT_LIST_HEAD(>t->list); |
d38ceaf9 AD |
932 | return >t->ttm.ttm; |
933 | } | |
934 | ||
935 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | |
936 | { | |
aca81718 | 937 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 | 938 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
d38ceaf9 AD |
939 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
940 | ||
941 | if (ttm->state != tt_unpopulated) | |
942 | return 0; | |
943 | ||
944 | if (gtt && gtt->userptr) { | |
5f0b34cc | 945 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
d38ceaf9 AD |
946 | if (!ttm->sg) |
947 | return -ENOMEM; | |
948 | ||
949 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | |
950 | ttm->state = tt_unbound; | |
951 | return 0; | |
952 | } | |
953 | ||
954 | if (slave && ttm->sg) { | |
955 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
956 | gtt->ttm.dma_address, ttm->num_pages); | |
957 | ttm->state = tt_unbound; | |
79ba2800 | 958 | return 0; |
d38ceaf9 AD |
959 | } |
960 | ||
d38ceaf9 AD |
961 | #ifdef CONFIG_SWIOTLB |
962 | if (swiotlb_nr_tbl()) { | |
79ba2800 | 963 | return ttm_dma_populate(>t->ttm, adev->dev); |
d38ceaf9 AD |
964 | } |
965 | #endif | |
966 | ||
79ba2800 | 967 | return ttm_populate_and_map_pages(adev->dev, >t->ttm); |
d38ceaf9 AD |
968 | } |
969 | ||
970 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
971 | { | |
972 | struct amdgpu_device *adev; | |
973 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
d38ceaf9 AD |
974 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
975 | ||
976 | if (gtt && gtt->userptr) { | |
a216ab09 | 977 | amdgpu_ttm_tt_set_user_pages(ttm, NULL); |
d38ceaf9 AD |
978 | kfree(ttm->sg); |
979 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | |
980 | return; | |
981 | } | |
982 | ||
983 | if (slave) | |
984 | return; | |
985 | ||
a7d64de6 | 986 | adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 AD |
987 | |
988 | #ifdef CONFIG_SWIOTLB | |
989 | if (swiotlb_nr_tbl()) { | |
990 | ttm_dma_unpopulate(>t->ttm, adev->dev); | |
991 | return; | |
992 | } | |
993 | #endif | |
994 | ||
7405e0da | 995 | ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); |
d38ceaf9 AD |
996 | } |
997 | ||
998 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | |
999 | uint32_t flags) | |
1000 | { | |
1001 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1002 | ||
1003 | if (gtt == NULL) | |
1004 | return -EINVAL; | |
1005 | ||
1006 | gtt->userptr = addr; | |
1007 | gtt->usermm = current->mm; | |
1008 | gtt->userflags = flags; | |
637dd3b5 CK |
1009 | spin_lock_init(>t->guptasklock); |
1010 | INIT_LIST_HEAD(>t->guptasks); | |
2f568dbd | 1011 | atomic_set(>t->mmu_invalidations, 0); |
ca666a3c | 1012 | gtt->last_set_pages = 0; |
637dd3b5 | 1013 | |
d38ceaf9 AD |
1014 | return 0; |
1015 | } | |
1016 | ||
cc325d19 | 1017 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
d38ceaf9 AD |
1018 | { |
1019 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1020 | ||
1021 | if (gtt == NULL) | |
cc325d19 | 1022 | return NULL; |
d38ceaf9 | 1023 | |
cc325d19 | 1024 | return gtt->usermm; |
d38ceaf9 AD |
1025 | } |
1026 | ||
cc1de6e8 CK |
1027 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
1028 | unsigned long end) | |
1029 | { | |
1030 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
637dd3b5 | 1031 | struct amdgpu_ttm_gup_task_list *entry; |
cc1de6e8 CK |
1032 | unsigned long size; |
1033 | ||
637dd3b5 | 1034 | if (gtt == NULL || !gtt->userptr) |
cc1de6e8 CK |
1035 | return false; |
1036 | ||
1037 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; | |
1038 | if (gtt->userptr > end || gtt->userptr + size <= start) | |
1039 | return false; | |
1040 | ||
637dd3b5 CK |
1041 | spin_lock(>t->guptasklock); |
1042 | list_for_each_entry(entry, >t->guptasks, list) { | |
1043 | if (entry->task == current) { | |
1044 | spin_unlock(>t->guptasklock); | |
1045 | return false; | |
1046 | } | |
1047 | } | |
1048 | spin_unlock(>t->guptasklock); | |
1049 | ||
2f568dbd CK |
1050 | atomic_inc(>t->mmu_invalidations); |
1051 | ||
cc1de6e8 CK |
1052 | return true; |
1053 | } | |
1054 | ||
2f568dbd CK |
1055 | bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, |
1056 | int *last_invalidated) | |
1057 | { | |
1058 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1059 | int prev_invalidated = *last_invalidated; | |
1060 | ||
1061 | *last_invalidated = atomic_read(>t->mmu_invalidations); | |
1062 | return prev_invalidated != *last_invalidated; | |
1063 | } | |
1064 | ||
ca666a3c CK |
1065 | bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) |
1066 | { | |
1067 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1068 | ||
1069 | if (gtt == NULL || !gtt->userptr) | |
1070 | return false; | |
1071 | ||
1072 | return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; | |
1073 | } | |
1074 | ||
d38ceaf9 AD |
1075 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
1076 | { | |
1077 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1078 | ||
1079 | if (gtt == NULL) | |
1080 | return false; | |
1081 | ||
1082 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
1083 | } | |
1084 | ||
6b777607 | 1085 | uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
d38ceaf9 AD |
1086 | struct ttm_mem_reg *mem) |
1087 | { | |
6b777607 | 1088 | uint64_t flags = 0; |
d38ceaf9 AD |
1089 | |
1090 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | |
1091 | flags |= AMDGPU_PTE_VALID; | |
1092 | ||
6d99905a | 1093 | if (mem && mem->mem_type == TTM_PL_TT) { |
d38ceaf9 AD |
1094 | flags |= AMDGPU_PTE_SYSTEM; |
1095 | ||
6d99905a CK |
1096 | if (ttm->caching_state == tt_cached) |
1097 | flags |= AMDGPU_PTE_SNOOPED; | |
1098 | } | |
d38ceaf9 | 1099 | |
4b98e0c4 | 1100 | flags |= adev->gart.gart_pte_flags; |
d38ceaf9 AD |
1101 | flags |= AMDGPU_PTE_READABLE; |
1102 | ||
1103 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | |
1104 | flags |= AMDGPU_PTE_WRITEABLE; | |
1105 | ||
1106 | return flags; | |
1107 | } | |
1108 | ||
9982ca68 CK |
1109 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
1110 | const struct ttm_place *place) | |
1111 | { | |
4fcae787 CK |
1112 | unsigned long num_pages = bo->mem.num_pages; |
1113 | struct drm_mm_node *node = bo->mem.mm_node; | |
9982ca68 | 1114 | |
4fcae787 CK |
1115 | if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) |
1116 | return ttm_bo_eviction_valuable(bo, place); | |
1117 | ||
1118 | switch (bo->mem.mem_type) { | |
1119 | case TTM_PL_TT: | |
1120 | return true; | |
9982ca68 | 1121 | |
4fcae787 | 1122 | case TTM_PL_VRAM: |
9982ca68 CK |
1123 | /* Check each drm MM node individually */ |
1124 | while (num_pages) { | |
1125 | if (place->fpfn < (node->start + node->size) && | |
1126 | !(place->lpfn && place->lpfn <= node->start)) | |
1127 | return true; | |
1128 | ||
1129 | num_pages -= node->size; | |
1130 | ++node; | |
1131 | } | |
4fcae787 | 1132 | break; |
9982ca68 | 1133 | |
4fcae787 CK |
1134 | default: |
1135 | break; | |
9982ca68 CK |
1136 | } |
1137 | ||
1138 | return ttm_bo_eviction_valuable(bo, place); | |
1139 | } | |
1140 | ||
e342610c FK |
1141 | static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, |
1142 | unsigned long offset, | |
1143 | void *buf, int len, int write) | |
1144 | { | |
1145 | struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); | |
1146 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); | |
1147 | struct drm_mm_node *nodes = abo->tbo.mem.mm_node; | |
1148 | uint32_t value = 0; | |
1149 | int ret = 0; | |
1150 | uint64_t pos; | |
1151 | unsigned long flags; | |
1152 | ||
1153 | if (bo->mem.mem_type != TTM_PL_VRAM) | |
1154 | return -EIO; | |
1155 | ||
1156 | while (offset >= (nodes->size << PAGE_SHIFT)) { | |
1157 | offset -= nodes->size << PAGE_SHIFT; | |
1158 | ++nodes; | |
1159 | } | |
1160 | pos = (nodes->start << PAGE_SHIFT) + offset; | |
1161 | ||
1162 | while (len && pos < adev->mc.mc_vram_size) { | |
1163 | uint64_t aligned_pos = pos & ~(uint64_t)3; | |
1164 | uint32_t bytes = 4 - (pos & 3); | |
1165 | uint32_t shift = (pos & 3) * 8; | |
1166 | uint32_t mask = 0xffffffff << shift; | |
1167 | ||
1168 | if (len < bytes) { | |
1169 | mask &= 0xffffffff >> (bytes - len) * 8; | |
1170 | bytes = len; | |
1171 | } | |
1172 | ||
1173 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
97bae49c TSD |
1174 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); |
1175 | WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); | |
e342610c | 1176 | if (!write || mask != 0xffffffff) |
97bae49c | 1177 | value = RREG32_NO_KIQ(mmMM_DATA); |
e342610c FK |
1178 | if (write) { |
1179 | value &= ~mask; | |
1180 | value |= (*(uint32_t *)buf << shift) & mask; | |
97bae49c | 1181 | WREG32_NO_KIQ(mmMM_DATA, value); |
e342610c FK |
1182 | } |
1183 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | |
1184 | if (!write) { | |
1185 | value = (value & mask) >> shift; | |
1186 | memcpy(buf, &value, bytes); | |
1187 | } | |
1188 | ||
1189 | ret += bytes; | |
1190 | buf = (uint8_t *)buf + bytes; | |
1191 | pos += bytes; | |
1192 | len -= bytes; | |
1193 | if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { | |
1194 | ++nodes; | |
1195 | pos = (nodes->start << PAGE_SHIFT); | |
1196 | } | |
1197 | } | |
1198 | ||
1199 | return ret; | |
1200 | } | |
1201 | ||
d38ceaf9 AD |
1202 | static struct ttm_bo_driver amdgpu_bo_driver = { |
1203 | .ttm_tt_create = &amdgpu_ttm_tt_create, | |
1204 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | |
1205 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | |
1206 | .invalidate_caches = &amdgpu_invalidate_caches, | |
1207 | .init_mem_type = &amdgpu_init_mem_type, | |
9982ca68 | 1208 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, |
d38ceaf9 AD |
1209 | .evict_flags = &amdgpu_evict_flags, |
1210 | .move = &amdgpu_bo_move, | |
1211 | .verify_access = &amdgpu_verify_access, | |
1212 | .move_notify = &amdgpu_bo_move_notify, | |
1213 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, | |
1214 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, | |
1215 | .io_mem_free = &amdgpu_ttm_io_mem_free, | |
9bbdcc0f | 1216 | .io_mem_pfn = amdgpu_ttm_io_mem_pfn, |
e342610c | 1217 | .access_memory = &amdgpu_ttm_access_memory |
d38ceaf9 AD |
1218 | }; |
1219 | ||
1220 | int amdgpu_ttm_init(struct amdgpu_device *adev) | |
1221 | { | |
36d38372 | 1222 | uint64_t gtt_size; |
d38ceaf9 | 1223 | int r; |
218b5dcd | 1224 | u64 vis_vram_limit; |
d38ceaf9 | 1225 | |
70b5c5aa AD |
1226 | r = amdgpu_ttm_global_init(adev); |
1227 | if (r) { | |
1228 | return r; | |
1229 | } | |
d38ceaf9 AD |
1230 | /* No others user of address space so set it to 0 */ |
1231 | r = ttm_bo_device_init(&adev->mman.bdev, | |
1232 | adev->mman.bo_global_ref.ref.object, | |
1233 | &amdgpu_bo_driver, | |
1234 | adev->ddev->anon_inode->i_mapping, | |
1235 | DRM_FILE_PAGE_OFFSET, | |
1236 | adev->need_dma32); | |
1237 | if (r) { | |
1238 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
1239 | return r; | |
1240 | } | |
1241 | adev->mman.initialized = true; | |
1242 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, | |
1243 | adev->mc.real_vram_size >> PAGE_SHIFT); | |
1244 | if (r) { | |
1245 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
1246 | return r; | |
1247 | } | |
218b5dcd JB |
1248 | |
1249 | /* Reduce size of CPU-visible VRAM if requested */ | |
1250 | vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; | |
1251 | if (amdgpu_vis_vram_limit > 0 && | |
1252 | vis_vram_limit <= adev->mc.visible_vram_size) | |
1253 | adev->mc.visible_vram_size = vis_vram_limit; | |
1254 | ||
d38ceaf9 AD |
1255 | /* Change the size here instead of the init above so only lpfn is affected */ |
1256 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | |
1257 | ||
a4a02777 CK |
1258 | r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, |
1259 | AMDGPU_GEM_DOMAIN_VRAM, | |
5af2c10d | 1260 | &adev->stolen_vga_memory, |
a4a02777 | 1261 | NULL, NULL); |
d38ceaf9 AD |
1262 | if (r) |
1263 | return r; | |
d38ceaf9 AD |
1264 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", |
1265 | (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); | |
36d38372 CK |
1266 | |
1267 | if (amdgpu_gtt_size == -1) | |
1268 | gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), | |
1269 | adev->mc.mc_vram_size); | |
1270 | else | |
1271 | gtt_size = (uint64_t)amdgpu_gtt_size << 20; | |
1272 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); | |
d38ceaf9 AD |
1273 | if (r) { |
1274 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
1275 | return r; | |
1276 | } | |
1277 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", | |
36d38372 | 1278 | (unsigned)(gtt_size / (1024 * 1024))); |
d38ceaf9 AD |
1279 | |
1280 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; | |
1281 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; | |
1282 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; | |
1283 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; | |
1284 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; | |
1285 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; | |
1286 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; | |
1287 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; | |
1288 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; | |
1289 | /* GDS Memory */ | |
d2d51d81 AD |
1290 | if (adev->gds.mem.total_size) { |
1291 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, | |
1292 | adev->gds.mem.total_size >> PAGE_SHIFT); | |
1293 | if (r) { | |
1294 | DRM_ERROR("Failed initializing GDS heap.\n"); | |
1295 | return r; | |
1296 | } | |
d38ceaf9 AD |
1297 | } |
1298 | ||
1299 | /* GWS */ | |
d2d51d81 AD |
1300 | if (adev->gds.gws.total_size) { |
1301 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, | |
1302 | adev->gds.gws.total_size >> PAGE_SHIFT); | |
1303 | if (r) { | |
1304 | DRM_ERROR("Failed initializing gws heap.\n"); | |
1305 | return r; | |
1306 | } | |
d38ceaf9 AD |
1307 | } |
1308 | ||
1309 | /* OA */ | |
d2d51d81 AD |
1310 | if (adev->gds.oa.total_size) { |
1311 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, | |
1312 | adev->gds.oa.total_size >> PAGE_SHIFT); | |
1313 | if (r) { | |
1314 | DRM_ERROR("Failed initializing oa heap.\n"); | |
1315 | return r; | |
1316 | } | |
d38ceaf9 AD |
1317 | } |
1318 | ||
1319 | r = amdgpu_ttm_debugfs_init(adev); | |
1320 | if (r) { | |
1321 | DRM_ERROR("Failed to init debugfs\n"); | |
1322 | return r; | |
1323 | } | |
1324 | return 0; | |
1325 | } | |
1326 | ||
1327 | void amdgpu_ttm_fini(struct amdgpu_device *adev) | |
1328 | { | |
1329 | int r; | |
1330 | ||
1331 | if (!adev->mman.initialized) | |
1332 | return; | |
1333 | amdgpu_ttm_debugfs_fini(adev); | |
5af2c10d KR |
1334 | if (adev->stolen_vga_memory) { |
1335 | r = amdgpu_bo_reserve(adev->stolen_vga_memory, true); | |
d38ceaf9 | 1336 | if (r == 0) { |
5af2c10d KR |
1337 | amdgpu_bo_unpin(adev->stolen_vga_memory); |
1338 | amdgpu_bo_unreserve(adev->stolen_vga_memory); | |
d38ceaf9 | 1339 | } |
5af2c10d | 1340 | amdgpu_bo_unref(&adev->stolen_vga_memory); |
d38ceaf9 AD |
1341 | } |
1342 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); | |
1343 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); | |
d2d51d81 AD |
1344 | if (adev->gds.mem.total_size) |
1345 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); | |
1346 | if (adev->gds.gws.total_size) | |
1347 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); | |
1348 | if (adev->gds.oa.total_size) | |
1349 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); | |
d38ceaf9 AD |
1350 | ttm_bo_device_release(&adev->mman.bdev); |
1351 | amdgpu_gart_fini(adev); | |
1352 | amdgpu_ttm_global_fini(adev); | |
1353 | adev->mman.initialized = false; | |
1354 | DRM_INFO("amdgpu: ttm finalized\n"); | |
1355 | } | |
1356 | ||
1357 | /* this should only be called at bootup or when userspace | |
1358 | * isn't running */ | |
1359 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) | |
1360 | { | |
1361 | struct ttm_mem_type_manager *man; | |
1362 | ||
1363 | if (!adev->mman.initialized) | |
1364 | return; | |
1365 | ||
1366 | man = &adev->mman.bdev.man[TTM_PL_VRAM]; | |
1367 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
1368 | man->size = size >> PAGE_SHIFT; | |
1369 | } | |
1370 | ||
d38ceaf9 AD |
1371 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) |
1372 | { | |
1373 | struct drm_file *file_priv; | |
1374 | struct amdgpu_device *adev; | |
d38ceaf9 | 1375 | |
e176fe17 | 1376 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
d38ceaf9 | 1377 | return -EINVAL; |
d38ceaf9 AD |
1378 | |
1379 | file_priv = filp->private_data; | |
1380 | adev = file_priv->minor->dev->dev_private; | |
e176fe17 | 1381 | if (adev == NULL) |
d38ceaf9 | 1382 | return -EINVAL; |
e176fe17 CK |
1383 | |
1384 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); | |
d38ceaf9 AD |
1385 | } |
1386 | ||
abca90f1 CK |
1387 | static int amdgpu_map_buffer(struct ttm_buffer_object *bo, |
1388 | struct ttm_mem_reg *mem, unsigned num_pages, | |
1389 | uint64_t offset, unsigned window, | |
1390 | struct amdgpu_ring *ring, | |
1391 | uint64_t *addr) | |
1392 | { | |
1393 | struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; | |
1394 | struct amdgpu_device *adev = ring->adev; | |
1395 | struct ttm_tt *ttm = bo->ttm; | |
1396 | struct amdgpu_job *job; | |
1397 | unsigned num_dw, num_bytes; | |
1398 | dma_addr_t *dma_address; | |
1399 | struct dma_fence *fence; | |
1400 | uint64_t src_addr, dst_addr; | |
1401 | uint64_t flags; | |
1402 | int r; | |
1403 | ||
1404 | BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < | |
1405 | AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); | |
1406 | ||
6f02a696 | 1407 | *addr = adev->mc.gart_start; |
abca90f1 CK |
1408 | *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * |
1409 | AMDGPU_GPU_PAGE_SIZE; | |
1410 | ||
1411 | num_dw = adev->mman.buffer_funcs->copy_num_dw; | |
1412 | while (num_dw & 0x7) | |
1413 | num_dw++; | |
1414 | ||
1415 | num_bytes = num_pages * 8; | |
1416 | ||
1417 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); | |
1418 | if (r) | |
1419 | return r; | |
1420 | ||
1421 | src_addr = num_dw * 4; | |
1422 | src_addr += job->ibs[0].gpu_addr; | |
1423 | ||
1424 | dst_addr = adev->gart.table_addr; | |
1425 | dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; | |
1426 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, | |
1427 | dst_addr, num_bytes); | |
1428 | ||
1429 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | |
1430 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
1431 | ||
1432 | dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; | |
1433 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); | |
1434 | r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, | |
1435 | &job->ibs[0].ptr[num_dw]); | |
1436 | if (r) | |
1437 | goto error_free; | |
1438 | ||
1439 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, | |
1440 | AMDGPU_FENCE_OWNER_UNDEFINED, &fence); | |
1441 | if (r) | |
1442 | goto error_free; | |
1443 | ||
1444 | dma_fence_put(fence); | |
1445 | ||
1446 | return r; | |
1447 | ||
1448 | error_free: | |
1449 | amdgpu_job_free(job); | |
1450 | return r; | |
1451 | } | |
1452 | ||
fc9c8f54 CK |
1453 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
1454 | uint64_t dst_offset, uint32_t byte_count, | |
d38ceaf9 | 1455 | struct reservation_object *resv, |
fc9c8f54 CK |
1456 | struct dma_fence **fence, bool direct_submit, |
1457 | bool vm_needs_flush) | |
d38ceaf9 AD |
1458 | { |
1459 | struct amdgpu_device *adev = ring->adev; | |
d71518b5 CK |
1460 | struct amdgpu_job *job; |
1461 | ||
d38ceaf9 AD |
1462 | uint32_t max_bytes; |
1463 | unsigned num_loops, num_dw; | |
1464 | unsigned i; | |
1465 | int r; | |
1466 | ||
d38ceaf9 AD |
1467 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
1468 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); | |
1469 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; | |
1470 | ||
c7ae72c0 CZ |
1471 | /* for IB padding */ |
1472 | while (num_dw & 0x7) | |
1473 | num_dw++; | |
1474 | ||
d71518b5 CK |
1475 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
1476 | if (r) | |
9066b0c3 | 1477 | return r; |
c7ae72c0 | 1478 | |
fc9c8f54 | 1479 | job->vm_needs_flush = vm_needs_flush; |
c7ae72c0 | 1480 | if (resv) { |
e86f9cee | 1481 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
c7ae72c0 CZ |
1482 | AMDGPU_FENCE_OWNER_UNDEFINED); |
1483 | if (r) { | |
1484 | DRM_ERROR("sync failed (%d).\n", r); | |
1485 | goto error_free; | |
1486 | } | |
d38ceaf9 | 1487 | } |
d38ceaf9 AD |
1488 | |
1489 | for (i = 0; i < num_loops; i++) { | |
1490 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | |
1491 | ||
d71518b5 CK |
1492 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, |
1493 | dst_offset, cur_size_in_bytes); | |
d38ceaf9 AD |
1494 | |
1495 | src_offset += cur_size_in_bytes; | |
1496 | dst_offset += cur_size_in_bytes; | |
1497 | byte_count -= cur_size_in_bytes; | |
1498 | } | |
1499 | ||
d71518b5 CK |
1500 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
1501 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
e24db985 CZ |
1502 | if (direct_submit) { |
1503 | r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, | |
50ddc75e | 1504 | NULL, fence); |
f54d1867 | 1505 | job->fence = dma_fence_get(*fence); |
e24db985 CZ |
1506 | if (r) |
1507 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | |
1508 | amdgpu_job_free(job); | |
1509 | } else { | |
1510 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, | |
1511 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); | |
1512 | if (r) | |
1513 | goto error_free; | |
1514 | } | |
d38ceaf9 | 1515 | |
e24db985 | 1516 | return r; |
d71518b5 | 1517 | |
c7ae72c0 | 1518 | error_free: |
d71518b5 | 1519 | amdgpu_job_free(job); |
c7ae72c0 | 1520 | return r; |
d38ceaf9 AD |
1521 | } |
1522 | ||
59b4a977 | 1523 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
330df03b | 1524 | uint64_t src_data, |
f29224a6 CK |
1525 | struct reservation_object *resv, |
1526 | struct dma_fence **fence) | |
59b4a977 | 1527 | { |
a7d64de6 | 1528 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
7bdc53f9 YZ |
1529 | uint32_t max_bytes = 8 * |
1530 | adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde; | |
59b4a977 FC |
1531 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
1532 | ||
f29224a6 CK |
1533 | struct drm_mm_node *mm_node; |
1534 | unsigned long num_pages; | |
59b4a977 | 1535 | unsigned int num_loops, num_dw; |
f29224a6 CK |
1536 | |
1537 | struct amdgpu_job *job; | |
59b4a977 FC |
1538 | int r; |
1539 | ||
f29224a6 CK |
1540 | if (!ring->ready) { |
1541 | DRM_ERROR("Trying to clear memory with ring turned off.\n"); | |
1542 | return -EINVAL; | |
1543 | } | |
1544 | ||
92c60d9c CK |
1545 | if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
1546 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); | |
1547 | if (r) | |
1548 | return r; | |
1549 | } | |
1550 | ||
f29224a6 CK |
1551 | num_pages = bo->tbo.num_pages; |
1552 | mm_node = bo->tbo.mem.mm_node; | |
1553 | num_loops = 0; | |
1554 | while (num_pages) { | |
1555 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; | |
1556 | ||
1557 | num_loops += DIV_ROUND_UP(byte_count, max_bytes); | |
1558 | num_pages -= mm_node->size; | |
1559 | ++mm_node; | |
1560 | } | |
330df03b | 1561 | |
7bdc53f9 YZ |
1562 | /* num of dwords for each SDMA_OP_PTEPDE cmd */ |
1563 | num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw; | |
59b4a977 FC |
1564 | |
1565 | /* for IB padding */ | |
f29224a6 | 1566 | num_dw += 64; |
59b4a977 FC |
1567 | |
1568 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); | |
1569 | if (r) | |
1570 | return r; | |
1571 | ||
1572 | if (resv) { | |
1573 | r = amdgpu_sync_resv(adev, &job->sync, resv, | |
f29224a6 | 1574 | AMDGPU_FENCE_OWNER_UNDEFINED); |
59b4a977 FC |
1575 | if (r) { |
1576 | DRM_ERROR("sync failed (%d).\n", r); | |
1577 | goto error_free; | |
1578 | } | |
1579 | } | |
1580 | ||
f29224a6 CK |
1581 | num_pages = bo->tbo.num_pages; |
1582 | mm_node = bo->tbo.mem.mm_node; | |
59b4a977 | 1583 | |
f29224a6 CK |
1584 | while (num_pages) { |
1585 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; | |
1586 | uint64_t dst_addr; | |
59b4a977 | 1587 | |
330df03b YZ |
1588 | WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8"); |
1589 | ||
92c60d9c | 1590 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); |
f29224a6 CK |
1591 | while (byte_count) { |
1592 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | |
1593 | ||
330df03b YZ |
1594 | amdgpu_vm_set_pte_pde(adev, &job->ibs[0], |
1595 | dst_addr, 0, | |
1596 | cur_size_in_bytes >> 3, 0, | |
1597 | src_data); | |
f29224a6 CK |
1598 | |
1599 | dst_addr += cur_size_in_bytes; | |
1600 | byte_count -= cur_size_in_bytes; | |
1601 | } | |
1602 | ||
1603 | num_pages -= mm_node->size; | |
1604 | ++mm_node; | |
59b4a977 FC |
1605 | } |
1606 | ||
1607 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | |
1608 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
1609 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, | |
f29224a6 | 1610 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
59b4a977 FC |
1611 | if (r) |
1612 | goto error_free; | |
1613 | ||
1614 | return 0; | |
1615 | ||
1616 | error_free: | |
1617 | amdgpu_job_free(job); | |
1618 | return r; | |
1619 | } | |
1620 | ||
d38ceaf9 AD |
1621 | #if defined(CONFIG_DEBUG_FS) |
1622 | ||
1623 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | |
1624 | { | |
1625 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
1626 | unsigned ttm_pl = *(int *)node->info_ent->data; | |
1627 | struct drm_device *dev = node->minor->dev; | |
1628 | struct amdgpu_device *adev = dev->dev_private; | |
12d4ac58 | 1629 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; |
b5c3714f | 1630 | struct drm_printer p = drm_seq_file_printer(m); |
d38ceaf9 | 1631 | |
12d4ac58 | 1632 | man->func->debug(man, &p); |
b5c3714f | 1633 | return 0; |
d38ceaf9 AD |
1634 | } |
1635 | ||
1636 | static int ttm_pl_vram = TTM_PL_VRAM; | |
1637 | static int ttm_pl_tt = TTM_PL_TT; | |
1638 | ||
06ab6832 | 1639 | static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { |
d38ceaf9 AD |
1640 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, |
1641 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, | |
1642 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | |
1643 | #ifdef CONFIG_SWIOTLB | |
1644 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | |
1645 | #endif | |
1646 | }; | |
1647 | ||
1648 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, | |
1649 | size_t size, loff_t *pos) | |
1650 | { | |
45063097 | 1651 | struct amdgpu_device *adev = file_inode(f)->i_private; |
d38ceaf9 AD |
1652 | ssize_t result = 0; |
1653 | int r; | |
1654 | ||
1655 | if (size & 0x3 || *pos & 0x3) | |
1656 | return -EINVAL; | |
1657 | ||
9156e723 TSD |
1658 | if (*pos >= adev->mc.mc_vram_size) |
1659 | return -ENXIO; | |
1660 | ||
d38ceaf9 AD |
1661 | while (size) { |
1662 | unsigned long flags; | |
1663 | uint32_t value; | |
1664 | ||
1665 | if (*pos >= adev->mc.mc_vram_size) | |
1666 | return result; | |
1667 | ||
1668 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
c3057281 TSD |
1669 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
1670 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); | |
1671 | value = RREG32_NO_KIQ(mmMM_DATA); | |
d38ceaf9 AD |
1672 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
1673 | ||
1674 | r = put_user(value, (uint32_t *)buf); | |
1675 | if (r) | |
1676 | return r; | |
1677 | ||
1678 | result += 4; | |
1679 | buf += 4; | |
1680 | *pos += 4; | |
1681 | size -= 4; | |
1682 | } | |
1683 | ||
1684 | return result; | |
1685 | } | |
1686 | ||
08cab989 TSD |
1687 | static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, |
1688 | size_t size, loff_t *pos) | |
1689 | { | |
1690 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
1691 | ssize_t result = 0; | |
1692 | int r; | |
1693 | ||
1694 | if (size & 0x3 || *pos & 0x3) | |
1695 | return -EINVAL; | |
1696 | ||
1697 | if (*pos >= adev->mc.mc_vram_size) | |
1698 | return -ENXIO; | |
1699 | ||
1700 | while (size) { | |
1701 | unsigned long flags; | |
1702 | uint32_t value; | |
1703 | ||
1704 | if (*pos >= adev->mc.mc_vram_size) | |
1705 | return result; | |
1706 | ||
1707 | r = get_user(value, (uint32_t *)buf); | |
1708 | if (r) | |
1709 | return r; | |
1710 | ||
1711 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
c3057281 TSD |
1712 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
1713 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); | |
1714 | WREG32_NO_KIQ(mmMM_DATA, value); | |
08cab989 TSD |
1715 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
1716 | ||
1717 | result += 4; | |
1718 | buf += 4; | |
1719 | *pos += 4; | |
1720 | size -= 4; | |
1721 | } | |
1722 | ||
1723 | return result; | |
1724 | } | |
1725 | ||
d38ceaf9 AD |
1726 | static const struct file_operations amdgpu_ttm_vram_fops = { |
1727 | .owner = THIS_MODULE, | |
1728 | .read = amdgpu_ttm_vram_read, | |
08cab989 TSD |
1729 | .write = amdgpu_ttm_vram_write, |
1730 | .llseek = default_llseek, | |
d38ceaf9 AD |
1731 | }; |
1732 | ||
a1d29476 CK |
1733 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
1734 | ||
d38ceaf9 AD |
1735 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, |
1736 | size_t size, loff_t *pos) | |
1737 | { | |
45063097 | 1738 | struct amdgpu_device *adev = file_inode(f)->i_private; |
d38ceaf9 AD |
1739 | ssize_t result = 0; |
1740 | int r; | |
1741 | ||
1742 | while (size) { | |
1743 | loff_t p = *pos / PAGE_SIZE; | |
1744 | unsigned off = *pos & ~PAGE_MASK; | |
1745 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); | |
1746 | struct page *page; | |
1747 | void *ptr; | |
1748 | ||
1749 | if (p >= adev->gart.num_cpu_pages) | |
1750 | return result; | |
1751 | ||
1752 | page = adev->gart.pages[p]; | |
1753 | if (page) { | |
1754 | ptr = kmap(page); | |
1755 | ptr += off; | |
1756 | ||
1757 | r = copy_to_user(buf, ptr, cur_size); | |
1758 | kunmap(adev->gart.pages[p]); | |
1759 | } else | |
1760 | r = clear_user(buf, cur_size); | |
1761 | ||
1762 | if (r) | |
1763 | return -EFAULT; | |
1764 | ||
1765 | result += cur_size; | |
1766 | buf += cur_size; | |
1767 | *pos += cur_size; | |
1768 | size -= cur_size; | |
1769 | } | |
1770 | ||
1771 | return result; | |
1772 | } | |
1773 | ||
1774 | static const struct file_operations amdgpu_ttm_gtt_fops = { | |
1775 | .owner = THIS_MODULE, | |
1776 | .read = amdgpu_ttm_gtt_read, | |
1777 | .llseek = default_llseek | |
1778 | }; | |
1779 | ||
1780 | #endif | |
1781 | ||
38290b2c TSD |
1782 | static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf, |
1783 | size_t size, loff_t *pos) | |
1784 | { | |
1785 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
38290b2c TSD |
1786 | int r; |
1787 | uint64_t phys; | |
38290b2c TSD |
1788 | struct iommu_domain *dom; |
1789 | ||
10cfafd6 TSD |
1790 | // always return 8 bytes |
1791 | if (size != 8) | |
1792 | return -EINVAL; | |
38290b2c | 1793 | |
10cfafd6 TSD |
1794 | // only accept page addresses |
1795 | if (*pos & 0xFFF) | |
1796 | return -EINVAL; | |
38290b2c TSD |
1797 | |
1798 | dom = iommu_get_domain_for_dev(adev->dev); | |
10cfafd6 | 1799 | if (dom) |
38290b2c | 1800 | phys = iommu_iova_to_phys(dom, *pos); |
10cfafd6 TSD |
1801 | else |
1802 | phys = *pos; | |
a40cfa0b | 1803 | |
10cfafd6 TSD |
1804 | r = copy_to_user(buf, &phys, 8); |
1805 | if (r) | |
1806 | return -EFAULT; | |
38290b2c | 1807 | |
10cfafd6 | 1808 | return 8; |
38290b2c TSD |
1809 | } |
1810 | ||
1811 | static const struct file_operations amdgpu_ttm_iova_fops = { | |
1812 | .owner = THIS_MODULE, | |
1813 | .read = amdgpu_iova_to_phys_read, | |
38290b2c TSD |
1814 | .llseek = default_llseek |
1815 | }; | |
a40cfa0b TSD |
1816 | |
1817 | static const struct { | |
1818 | char *name; | |
1819 | const struct file_operations *fops; | |
1820 | int domain; | |
1821 | } ttm_debugfs_entries[] = { | |
1822 | { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, | |
1823 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS | |
1824 | { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, | |
1825 | #endif | |
38290b2c | 1826 | { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM }, |
a40cfa0b TSD |
1827 | }; |
1828 | ||
a1d29476 CK |
1829 | #endif |
1830 | ||
d38ceaf9 AD |
1831 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) |
1832 | { | |
1833 | #if defined(CONFIG_DEBUG_FS) | |
1834 | unsigned count; | |
1835 | ||
1836 | struct drm_minor *minor = adev->ddev->primary; | |
1837 | struct dentry *ent, *root = minor->debugfs_root; | |
1838 | ||
a40cfa0b TSD |
1839 | for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { |
1840 | ent = debugfs_create_file( | |
1841 | ttm_debugfs_entries[count].name, | |
1842 | S_IFREG | S_IRUGO, root, | |
1843 | adev, | |
1844 | ttm_debugfs_entries[count].fops); | |
1845 | if (IS_ERR(ent)) | |
1846 | return PTR_ERR(ent); | |
1847 | if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) | |
1848 | i_size_write(ent->d_inode, adev->mc.mc_vram_size); | |
1849 | else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) | |
1850 | i_size_write(ent->d_inode, adev->mc.gart_size); | |
1851 | adev->mman.debugfs_entries[count] = ent; | |
1852 | } | |
d38ceaf9 AD |
1853 | |
1854 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); | |
1855 | ||
1856 | #ifdef CONFIG_SWIOTLB | |
1857 | if (!swiotlb_nr_tbl()) | |
1858 | --count; | |
1859 | #endif | |
1860 | ||
1861 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); | |
1862 | #else | |
d38ceaf9 AD |
1863 | return 0; |
1864 | #endif | |
1865 | } | |
1866 | ||
1867 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) | |
1868 | { | |
1869 | #if defined(CONFIG_DEBUG_FS) | |
a40cfa0b | 1870 | unsigned i; |
d38ceaf9 | 1871 | |
a40cfa0b TSD |
1872 | for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) |
1873 | debugfs_remove(adev->mman.debugfs_entries[i]); | |
a1d29476 | 1874 | #endif |
d38ceaf9 | 1875 | } |