Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
c366be54 | 32 | |
fdf2f6c5 | 33 | #include <linux/dma-mapping.h> |
c366be54 | 34 | #include <linux/iommu.h> |
2454fcea | 35 | #include <linux/hmm.h> |
c366be54 SR |
36 | #include <linux/pagemap.h> |
37 | #include <linux/sched/task.h> | |
a9ae8731 | 38 | #include <linux/sched/mm.h> |
c366be54 SR |
39 | #include <linux/seq_file.h> |
40 | #include <linux/slab.h> | |
41 | #include <linux/swap.h> | |
42 | #include <linux/swiotlb.h> | |
a3941471 | 43 | #include <linux/dma-buf.h> |
f81110b8 | 44 | #include <linux/sizes.h> |
c366be54 | 45 | |
248a1d6f MY |
46 | #include <drm/ttm/ttm_bo_api.h> |
47 | #include <drm/ttm/ttm_bo_driver.h> | |
48 | #include <drm/ttm/ttm_placement.h> | |
49 | #include <drm/ttm/ttm_module.h> | |
50 | #include <drm/ttm/ttm_page_alloc.h> | |
fdf2f6c5 SR |
51 | |
52 | #include <drm/drm_debugfs.h> | |
d38ceaf9 | 53 | #include <drm/amdgpu_drm.h> |
2454fcea | 54 | |
d38ceaf9 | 55 | #include "amdgpu.h" |
b82485fd | 56 | #include "amdgpu_object.h" |
aca81718 | 57 | #include "amdgpu_trace.h" |
d8d019cc | 58 | #include "amdgpu_amdkfd.h" |
bb7743bc | 59 | #include "amdgpu_sdma.h" |
1a6fc071 | 60 | #include "amdgpu_ras.h" |
87ba7fea | 61 | #include "amdgpu_atomfirmware.h" |
d38ceaf9 AD |
62 | #include "bif/bif_4_1_d.h" |
63 | ||
030d5b97 CK |
64 | #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 |
65 | ||
abca90f1 | 66 | |
50da5174 | 67 | /** |
2e603d04 HR |
68 | * amdgpu_init_mem_type - Initialize a memory manager for a specific type of |
69 | * memory request. | |
50da5174 | 70 | * |
2e603d04 HR |
71 | * @bdev: The TTM BO device object (contains a reference to amdgpu_device) |
72 | * @type: The type of memory requested | |
73 | * @man: The memory type manager for each domain | |
50da5174 TSD |
74 | * |
75 | * This is called by ttm_bo_init_mm() when a buffer object is being | |
76 | * initialized. | |
77 | */ | |
d38ceaf9 AD |
78 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
79 | struct ttm_mem_type_manager *man) | |
80 | { | |
81 | struct amdgpu_device *adev; | |
82 | ||
a7d64de6 | 83 | adev = amdgpu_ttm_adev(bdev); |
d38ceaf9 AD |
84 | |
85 | switch (type) { | |
86 | case TTM_PL_SYSTEM: | |
87 | /* System memory */ | |
88 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
89 | man->available_caching = TTM_PL_MASK_CACHING; | |
90 | man->default_caching = TTM_PL_FLAG_CACHED; | |
91 | break; | |
92 | case TTM_PL_TT: | |
50da5174 | 93 | /* GTT memory */ |
bb990bb0 | 94 | man->func = &amdgpu_gtt_mgr_func; |
d38ceaf9 AD |
95 | man->available_caching = TTM_PL_MASK_CACHING; |
96 | man->default_caching = TTM_PL_FLAG_CACHED; | |
97 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | |
98 | break; | |
99 | case TTM_PL_VRAM: | |
100 | /* "On-card" video ram */ | |
6a7f76e7 | 101 | man->func = &amdgpu_vram_mgr_func; |
d38ceaf9 AD |
102 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
103 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
104 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
105 | man->default_caching = TTM_PL_FLAG_WC; | |
106 | break; | |
107 | case AMDGPU_PL_GDS: | |
108 | case AMDGPU_PL_GWS: | |
109 | case AMDGPU_PL_OA: | |
110 | /* On-chip GDS memory*/ | |
111 | man->func = &ttm_bo_manager_func; | |
d38ceaf9 AD |
112 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; |
113 | man->available_caching = TTM_PL_FLAG_UNCACHED; | |
114 | man->default_caching = TTM_PL_FLAG_UNCACHED; | |
115 | break; | |
116 | default: | |
117 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
118 | return -EINVAL; | |
119 | } | |
120 | return 0; | |
121 | } | |
122 | ||
50da5174 TSD |
123 | /** |
124 | * amdgpu_evict_flags - Compute placement flags | |
125 | * | |
126 | * @bo: The buffer object to evict | |
127 | * @placement: Possible destination(s) for evicted BO | |
128 | * | |
129 | * Fill in placement data when ttm_bo_evict() is called | |
130 | */ | |
d38ceaf9 AD |
131 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
132 | struct ttm_placement *placement) | |
133 | { | |
a7d64de6 | 134 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
765e7fbf | 135 | struct amdgpu_bo *abo; |
1aaa5602 | 136 | static const struct ttm_place placements = { |
d38ceaf9 AD |
137 | .fpfn = 0, |
138 | .lpfn = 0, | |
139 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | |
140 | }; | |
141 | ||
50da5174 | 142 | /* Don't handle scatter gather BOs */ |
82dee241 CK |
143 | if (bo->type == ttm_bo_type_sg) { |
144 | placement->num_placement = 0; | |
145 | placement->num_busy_placement = 0; | |
146 | return; | |
147 | } | |
148 | ||
50da5174 | 149 | /* Object isn't an AMDGPU object so ignore */ |
c704ab18 | 150 | if (!amdgpu_bo_is_amdgpu_bo(bo)) { |
d38ceaf9 AD |
151 | placement->placement = &placements; |
152 | placement->busy_placement = &placements; | |
153 | placement->num_placement = 1; | |
154 | placement->num_busy_placement = 1; | |
155 | return; | |
156 | } | |
50da5174 | 157 | |
b82485fd | 158 | abo = ttm_to_amdgpu_bo(bo); |
d38ceaf9 | 159 | switch (bo->mem.mem_type) { |
3b2de699 CK |
160 | case AMDGPU_PL_GDS: |
161 | case AMDGPU_PL_GWS: | |
162 | case AMDGPU_PL_OA: | |
163 | placement->num_placement = 0; | |
164 | placement->num_busy_placement = 0; | |
165 | return; | |
166 | ||
d38ceaf9 | 167 | case TTM_PL_VRAM: |
81988f9c | 168 | if (!adev->mman.buffer_funcs_enabled) { |
50da5174 | 169 | /* Move to system memory */ |
c704ab18 | 170 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
c8c5e569 | 171 | } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
5422a28f CK |
172 | !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
173 | amdgpu_bo_in_cpu_visible_vram(abo)) { | |
cb2dd1a6 MD |
174 | |
175 | /* Try evicting to the CPU inaccessible part of VRAM | |
176 | * first, but only set GTT as busy placement, so this | |
177 | * BO will be evicted to GTT rather than causing other | |
178 | * BOs to be evicted from VRAM | |
179 | */ | |
c704ab18 | 180 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | |
cb2dd1a6 | 181 | AMDGPU_GEM_DOMAIN_GTT); |
5422a28f | 182 | abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; |
cb2dd1a6 MD |
183 | abo->placements[0].lpfn = 0; |
184 | abo->placement.busy_placement = &abo->placements[1]; | |
185 | abo->placement.num_busy_placement = 1; | |
08291c5c | 186 | } else { |
50da5174 | 187 | /* Move to GTT memory */ |
c704ab18 | 188 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
08291c5c | 189 | } |
d38ceaf9 AD |
190 | break; |
191 | case TTM_PL_TT: | |
192 | default: | |
c704ab18 | 193 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
3b2de699 | 194 | break; |
d38ceaf9 | 195 | } |
765e7fbf | 196 | *placement = abo->placement; |
d38ceaf9 AD |
197 | } |
198 | ||
50da5174 TSD |
199 | /** |
200 | * amdgpu_verify_access - Verify access for a mmap call | |
201 | * | |
2e603d04 HR |
202 | * @bo: The buffer object to map |
203 | * @filp: The file pointer from the process performing the mmap | |
50da5174 TSD |
204 | * |
205 | * This is called by ttm_bo_mmap() to verify whether a process | |
206 | * has the right to mmap a BO to their process space. | |
207 | */ | |
d38ceaf9 AD |
208 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
209 | { | |
b82485fd | 210 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
d38ceaf9 | 211 | |
a46a2cd1 FK |
212 | /* |
213 | * Don't verify access for KFD BOs. They don't have a GEM | |
214 | * object associated with them. | |
215 | */ | |
216 | if (abo->kfd_bo) | |
217 | return 0; | |
218 | ||
054892ed JG |
219 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
220 | return -EPERM; | |
c105de28 | 221 | return drm_vma_node_verify_access(&abo->tbo.base.vma_node, |
d9a1f0b4 | 222 | filp->private_data); |
d38ceaf9 AD |
223 | } |
224 | ||
50da5174 TSD |
225 | /** |
226 | * amdgpu_move_null - Register memory for a buffer object | |
227 | * | |
2e603d04 HR |
228 | * @bo: The bo to assign the memory to |
229 | * @new_mem: The memory to be assigned. | |
50da5174 | 230 | * |
2e603d04 | 231 | * Assign the memory from new_mem to the memory of the buffer object bo. |
50da5174 | 232 | */ |
d38ceaf9 AD |
233 | static void amdgpu_move_null(struct ttm_buffer_object *bo, |
234 | struct ttm_mem_reg *new_mem) | |
235 | { | |
236 | struct ttm_mem_reg *old_mem = &bo->mem; | |
237 | ||
238 | BUG_ON(old_mem->mm_node != NULL); | |
239 | *old_mem = *new_mem; | |
240 | new_mem->mm_node = NULL; | |
241 | } | |
242 | ||
50da5174 | 243 | /** |
2e603d04 HR |
244 | * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. |
245 | * | |
246 | * @bo: The bo to assign the memory to. | |
247 | * @mm_node: Memory manager node for drm allocator. | |
248 | * @mem: The region where the bo resides. | |
249 | * | |
50da5174 | 250 | */ |
92c60d9c CK |
251 | static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
252 | struct drm_mm_node *mm_node, | |
253 | struct ttm_mem_reg *mem) | |
d38ceaf9 | 254 | { |
abca90f1 | 255 | uint64_t addr = 0; |
c855e250 | 256 | |
0e33495d | 257 | if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) { |
abca90f1 | 258 | addr = mm_node->start << PAGE_SHIFT; |
b1a8ef95 ND |
259 | addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev), |
260 | mem->mem_type); | |
abca90f1 | 261 | } |
92c60d9c | 262 | return addr; |
8892f153 CK |
263 | } |
264 | ||
1eca5a53 | 265 | /** |
2e603d04 HR |
266 | * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to |
267 | * @offset. It also modifies the offset to be within the drm_mm_node returned | |
268 | * | |
269 | * @mem: The region where the bo resides. | |
270 | * @offset: The offset that drm_mm_node is used for finding. | |
271 | * | |
e1d51505 HK |
272 | */ |
273 | static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, | |
f0ee63cb | 274 | uint64_t *offset) |
8892f153 | 275 | { |
e1d51505 | 276 | struct drm_mm_node *mm_node = mem->mm_node; |
8892f153 | 277 | |
e1d51505 HK |
278 | while (*offset >= (mm_node->size << PAGE_SHIFT)) { |
279 | *offset -= (mm_node->size << PAGE_SHIFT); | |
280 | ++mm_node; | |
281 | } | |
282 | return mm_node; | |
283 | } | |
8892f153 | 284 | |
f0ee63cb CK |
285 | /** |
286 | * amdgpu_ttm_map_buffer - Map memory into the GART windows | |
287 | * @bo: buffer object to map | |
288 | * @mem: memory object to map | |
289 | * @mm_node: drm_mm node object to map | |
290 | * @num_pages: number of pages to map | |
291 | * @offset: offset into @mm_node where to start | |
292 | * @window: which GART window to use | |
293 | * @ring: DMA ring to use for the copy | |
294 | * @tmz: if we should setup a TMZ enabled mapping | |
295 | * @addr: resulting address inside the MC address space | |
296 | * | |
297 | * Setup one of the GART windows to access a specific piece of memory or return | |
298 | * the physical address for local memory. | |
299 | */ | |
300 | static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, | |
301 | struct ttm_mem_reg *mem, | |
302 | struct drm_mm_node *mm_node, | |
303 | unsigned num_pages, uint64_t offset, | |
304 | unsigned window, struct amdgpu_ring *ring, | |
305 | bool tmz, uint64_t *addr) | |
306 | { | |
f0ee63cb CK |
307 | struct amdgpu_device *adev = ring->adev; |
308 | struct amdgpu_job *job; | |
309 | unsigned num_dw, num_bytes; | |
f0ee63cb CK |
310 | struct dma_fence *fence; |
311 | uint64_t src_addr, dst_addr; | |
95045783 | 312 | void *cpu_addr; |
f0ee63cb | 313 | uint64_t flags; |
95045783 | 314 | unsigned int i; |
f0ee63cb CK |
315 | int r; |
316 | ||
317 | BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < | |
318 | AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); | |
319 | ||
320 | /* Map only what can't be accessed directly */ | |
95045783 | 321 | if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { |
f0ee63cb CK |
322 | *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset; |
323 | return 0; | |
324 | } | |
325 | ||
326 | *addr = adev->gmc.gart_start; | |
327 | *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * | |
328 | AMDGPU_GPU_PAGE_SIZE; | |
329 | *addr += offset & ~PAGE_MASK; | |
330 | ||
331 | num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); | |
332 | num_bytes = num_pages * 8; | |
333 | ||
334 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, | |
9ecefb19 | 335 | AMDGPU_IB_POOL_DELAYED, &job); |
f0ee63cb CK |
336 | if (r) |
337 | return r; | |
338 | ||
339 | src_addr = num_dw * 4; | |
340 | src_addr += job->ibs[0].gpu_addr; | |
341 | ||
342 | dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); | |
343 | dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; | |
344 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, | |
345 | dst_addr, num_bytes, false); | |
346 | ||
347 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | |
348 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
349 | ||
f0ee63cb CK |
350 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); |
351 | if (tmz) | |
352 | flags |= AMDGPU_PTE_TMZ; | |
353 | ||
95045783 CK |
354 | cpu_addr = &job->ibs[0].ptr[num_dw]; |
355 | ||
356 | if (mem->mem_type == TTM_PL_TT) { | |
357 | struct ttm_dma_tt *dma; | |
358 | dma_addr_t *dma_address; | |
359 | ||
360 | dma = container_of(bo->ttm, struct ttm_dma_tt, ttm); | |
361 | dma_address = &dma->dma_address[offset >> PAGE_SHIFT]; | |
362 | r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, | |
363 | cpu_addr); | |
364 | if (r) | |
365 | goto error_free; | |
366 | } else { | |
367 | dma_addr_t dma_address; | |
368 | ||
369 | dma_address = (mm_node->start << PAGE_SHIFT) + offset; | |
370 | dma_address += adev->vm_manager.vram_base_offset; | |
371 | ||
372 | for (i = 0; i < num_pages; ++i) { | |
373 | r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, | |
374 | &dma_address, flags, cpu_addr); | |
375 | if (r) | |
376 | goto error_free; | |
377 | ||
378 | dma_address += PAGE_SIZE; | |
379 | } | |
380 | } | |
f0ee63cb CK |
381 | |
382 | r = amdgpu_job_submit(job, &adev->mman.entity, | |
383 | AMDGPU_FENCE_OWNER_UNDEFINED, &fence); | |
384 | if (r) | |
385 | goto error_free; | |
386 | ||
387 | dma_fence_put(fence); | |
388 | ||
389 | return r; | |
390 | ||
391 | error_free: | |
392 | amdgpu_job_free(job); | |
393 | return r; | |
394 | } | |
395 | ||
e1d51505 HK |
396 | /** |
397 | * amdgpu_copy_ttm_mem_to_mem - Helper function for copy | |
effb97cc CK |
398 | * @adev: amdgpu device |
399 | * @src: buffer/address where to read from | |
400 | * @dst: buffer/address where to write to | |
401 | * @size: number of bytes to copy | |
402 | * @tmz: if a secure copy should be used | |
403 | * @resv: resv object to sync to | |
404 | * @f: Returns the last fence if multiple jobs are submitted. | |
1eca5a53 HK |
405 | * |
406 | * The function copies @size bytes from {src->mem + src->offset} to | |
407 | * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a | |
408 | * move and different for a BO to BO copy. | |
409 | * | |
1eca5a53 HK |
410 | */ |
411 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, | |
f0ee63cb CK |
412 | const struct amdgpu_copy_mem *src, |
413 | const struct amdgpu_copy_mem *dst, | |
effb97cc | 414 | uint64_t size, bool tmz, |
52791eee | 415 | struct dma_resv *resv, |
1eca5a53 | 416 | struct dma_fence **f) |
8892f153 | 417 | { |
f0ee63cb CK |
418 | const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * |
419 | AMDGPU_GPU_PAGE_SIZE); | |
420 | ||
421 | uint64_t src_node_size, dst_node_size, src_offset, dst_offset; | |
8892f153 | 422 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
1eca5a53 | 423 | struct drm_mm_node *src_mm, *dst_mm; |
220196b3 | 424 | struct dma_fence *fence = NULL; |
1eca5a53 | 425 | int r = 0; |
8892f153 | 426 | |
81988f9c | 427 | if (!adev->mman.buffer_funcs_enabled) { |
d38ceaf9 AD |
428 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
429 | return -EINVAL; | |
430 | } | |
431 | ||
f0ee63cb CK |
432 | src_offset = src->offset; |
433 | src_mm = amdgpu_find_mm_node(src->mem, &src_offset); | |
434 | src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset; | |
8892f153 | 435 | |
f0ee63cb CK |
436 | dst_offset = dst->offset; |
437 | dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset); | |
438 | dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset; | |
8892f153 | 439 | |
abca90f1 | 440 | mutex_lock(&adev->mman.gtt_window_lock); |
1eca5a53 HK |
441 | |
442 | while (size) { | |
f0ee63cb CK |
443 | uint32_t src_page_offset = src_offset & ~PAGE_MASK; |
444 | uint32_t dst_page_offset = dst_offset & ~PAGE_MASK; | |
220196b3 | 445 | struct dma_fence *next; |
f0ee63cb CK |
446 | uint32_t cur_size; |
447 | uint64_t from, to; | |
8892f153 | 448 | |
1eca5a53 HK |
449 | /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst |
450 | * begins at an offset, then adjust the size accordingly | |
451 | */ | |
b717fa5c CK |
452 | cur_size = max(src_page_offset, dst_page_offset); |
453 | cur_size = min(min3(src_node_size, dst_node_size, size), | |
454 | (uint64_t)(GTT_MAX_BYTES - cur_size)); | |
f0ee63cb CK |
455 | |
456 | /* Map src to window 0 and dst to window 1. */ | |
457 | r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm, | |
458 | PFN_UP(cur_size + src_page_offset), | |
459 | src_offset, 0, ring, tmz, &from); | |
460 | if (r) | |
461 | goto error; | |
abca90f1 | 462 | |
f0ee63cb CK |
463 | r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm, |
464 | PFN_UP(cur_size + dst_page_offset), | |
465 | dst_offset, 1, ring, tmz, &to); | |
466 | if (r) | |
467 | goto error; | |
abca90f1 | 468 | |
1eca5a53 | 469 | r = amdgpu_copy_buffer(ring, from, to, cur_size, |
effb97cc | 470 | resv, &next, false, true, tmz); |
8892f153 CK |
471 | if (r) |
472 | goto error; | |
473 | ||
220196b3 | 474 | dma_fence_put(fence); |
8892f153 CK |
475 | fence = next; |
476 | ||
1eca5a53 HK |
477 | size -= cur_size; |
478 | if (!size) | |
8892f153 CK |
479 | break; |
480 | ||
1eca5a53 HK |
481 | src_node_size -= cur_size; |
482 | if (!src_node_size) { | |
f0ee63cb CK |
483 | ++src_mm; |
484 | src_node_size = src_mm->size << PAGE_SHIFT; | |
485 | src_offset = 0; | |
8892f153 | 486 | } else { |
f0ee63cb | 487 | src_offset += cur_size; |
8892f153 | 488 | } |
f0ee63cb | 489 | |
1eca5a53 HK |
490 | dst_node_size -= cur_size; |
491 | if (!dst_node_size) { | |
f0ee63cb CK |
492 | ++dst_mm; |
493 | dst_node_size = dst_mm->size << PAGE_SHIFT; | |
494 | dst_offset = 0; | |
8892f153 | 495 | } else { |
f0ee63cb | 496 | dst_offset += cur_size; |
8892f153 CK |
497 | } |
498 | } | |
1eca5a53 | 499 | error: |
abca90f1 | 500 | mutex_unlock(&adev->mman.gtt_window_lock); |
1eca5a53 HK |
501 | if (f) |
502 | *f = dma_fence_get(fence); | |
503 | dma_fence_put(fence); | |
504 | return r; | |
505 | } | |
506 | ||
50da5174 TSD |
507 | /** |
508 | * amdgpu_move_blit - Copy an entire buffer to another buffer | |
509 | * | |
2e603d04 HR |
510 | * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to |
511 | * help move buffers to and from VRAM. | |
50da5174 | 512 | */ |
1eca5a53 HK |
513 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
514 | bool evict, bool no_wait_gpu, | |
515 | struct ttm_mem_reg *new_mem, | |
516 | struct ttm_mem_reg *old_mem) | |
517 | { | |
518 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | |
effb97cc | 519 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
1eca5a53 HK |
520 | struct amdgpu_copy_mem src, dst; |
521 | struct dma_fence *fence = NULL; | |
522 | int r; | |
523 | ||
524 | src.bo = bo; | |
525 | dst.bo = bo; | |
526 | src.mem = old_mem; | |
527 | dst.mem = new_mem; | |
528 | src.offset = 0; | |
529 | dst.offset = 0; | |
530 | ||
531 | r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, | |
532 | new_mem->num_pages << PAGE_SHIFT, | |
effb97cc | 533 | amdgpu_bo_encrypted(abo), |
5a5011a7 | 534 | bo->base.resv, &fence); |
1eca5a53 HK |
535 | if (r) |
536 | goto error; | |
ce64bc25 | 537 | |
ab2f7a5c FK |
538 | /* clear the space being freed */ |
539 | if (old_mem->mem_type == TTM_PL_VRAM && | |
effb97cc | 540 | (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { |
ab2f7a5c FK |
541 | struct dma_fence *wipe_fence = NULL; |
542 | ||
543 | r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON, | |
544 | NULL, &wipe_fence); | |
545 | if (r) { | |
546 | goto error; | |
547 | } else if (wipe_fence) { | |
548 | dma_fence_put(fence); | |
549 | fence = wipe_fence; | |
550 | } | |
551 | } | |
552 | ||
4947b2f2 CK |
553 | /* Always block for VM page tables before committing the new location */ |
554 | if (bo->type == ttm_bo_type_kernel) | |
555 | r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); | |
556 | else | |
557 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); | |
f54d1867 | 558 | dma_fence_put(fence); |
d38ceaf9 | 559 | return r; |
8892f153 CK |
560 | |
561 | error: | |
562 | if (fence) | |
220196b3 DA |
563 | dma_fence_wait(fence, false); |
564 | dma_fence_put(fence); | |
8892f153 | 565 | return r; |
d38ceaf9 AD |
566 | } |
567 | ||
50da5174 TSD |
568 | /** |
569 | * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer | |
570 | * | |
571 | * Called by amdgpu_bo_move(). | |
572 | */ | |
dfb8fa98 CK |
573 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, |
574 | struct ttm_operation_ctx *ctx, | |
d38ceaf9 AD |
575 | struct ttm_mem_reg *new_mem) |
576 | { | |
d38ceaf9 AD |
577 | struct ttm_mem_reg *old_mem = &bo->mem; |
578 | struct ttm_mem_reg tmp_mem; | |
579 | struct ttm_place placements; | |
580 | struct ttm_placement placement; | |
581 | int r; | |
582 | ||
50da5174 | 583 | /* create space/pages for new_mem in GTT space */ |
d38ceaf9 AD |
584 | tmp_mem = *new_mem; |
585 | tmp_mem.mm_node = NULL; | |
586 | placement.num_placement = 1; | |
587 | placement.placement = &placements; | |
588 | placement.num_busy_placement = 1; | |
589 | placement.busy_placement = &placements; | |
590 | placements.fpfn = 0; | |
5e7e8396 | 591 | placements.lpfn = 0; |
d38ceaf9 | 592 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
dfb8fa98 | 593 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
d38ceaf9 | 594 | if (unlikely(r)) { |
67adb569 | 595 | pr_err("Failed to find GTT space for blit from VRAM\n"); |
d38ceaf9 AD |
596 | return r; |
597 | } | |
598 | ||
50da5174 | 599 | /* set caching flags */ |
d38ceaf9 AD |
600 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); |
601 | if (unlikely(r)) { | |
602 | goto out_cleanup; | |
603 | } | |
604 | ||
50da5174 | 605 | /* Bind the memory to the GTT space */ |
993baf15 | 606 | r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); |
d38ceaf9 AD |
607 | if (unlikely(r)) { |
608 | goto out_cleanup; | |
609 | } | |
50da5174 TSD |
610 | |
611 | /* blit VRAM to GTT */ | |
204029e1 | 612 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem); |
d38ceaf9 AD |
613 | if (unlikely(r)) { |
614 | goto out_cleanup; | |
615 | } | |
50da5174 TSD |
616 | |
617 | /* move BO (in tmp_mem) to new_mem */ | |
3e98d829 | 618 | r = ttm_bo_move_ttm(bo, ctx, new_mem); |
d38ceaf9 AD |
619 | out_cleanup: |
620 | ttm_bo_mem_put(bo, &tmp_mem); | |
621 | return r; | |
622 | } | |
623 | ||
50da5174 TSD |
624 | /** |
625 | * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM | |
626 | * | |
627 | * Called by amdgpu_bo_move(). | |
628 | */ | |
dfb8fa98 CK |
629 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, |
630 | struct ttm_operation_ctx *ctx, | |
d38ceaf9 AD |
631 | struct ttm_mem_reg *new_mem) |
632 | { | |
d38ceaf9 AD |
633 | struct ttm_mem_reg *old_mem = &bo->mem; |
634 | struct ttm_mem_reg tmp_mem; | |
635 | struct ttm_placement placement; | |
636 | struct ttm_place placements; | |
637 | int r; | |
638 | ||
50da5174 | 639 | /* make space in GTT for old_mem buffer */ |
d38ceaf9 AD |
640 | tmp_mem = *new_mem; |
641 | tmp_mem.mm_node = NULL; | |
642 | placement.num_placement = 1; | |
643 | placement.placement = &placements; | |
644 | placement.num_busy_placement = 1; | |
645 | placement.busy_placement = &placements; | |
646 | placements.fpfn = 0; | |
5e7e8396 | 647 | placements.lpfn = 0; |
d38ceaf9 | 648 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
dfb8fa98 | 649 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); |
d38ceaf9 | 650 | if (unlikely(r)) { |
67adb569 | 651 | pr_err("Failed to find GTT space for blit to VRAM\n"); |
d38ceaf9 AD |
652 | return r; |
653 | } | |
50da5174 TSD |
654 | |
655 | /* move/bind old memory to GTT space */ | |
3e98d829 | 656 | r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); |
d38ceaf9 AD |
657 | if (unlikely(r)) { |
658 | goto out_cleanup; | |
659 | } | |
50da5174 TSD |
660 | |
661 | /* copy to VRAM */ | |
204029e1 | 662 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem); |
d38ceaf9 AD |
663 | if (unlikely(r)) { |
664 | goto out_cleanup; | |
665 | } | |
666 | out_cleanup: | |
667 | ttm_bo_mem_put(bo, &tmp_mem); | |
668 | return r; | |
669 | } | |
670 | ||
67adb569 FK |
671 | /** |
672 | * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy | |
673 | * | |
674 | * Called by amdgpu_bo_move() | |
675 | */ | |
676 | static bool amdgpu_mem_visible(struct amdgpu_device *adev, | |
677 | struct ttm_mem_reg *mem) | |
678 | { | |
679 | struct drm_mm_node *nodes = mem->mm_node; | |
680 | ||
681 | if (mem->mem_type == TTM_PL_SYSTEM || | |
682 | mem->mem_type == TTM_PL_TT) | |
683 | return true; | |
684 | if (mem->mem_type != TTM_PL_VRAM) | |
685 | return false; | |
686 | ||
687 | /* ttm_mem_reg_ioremap only supports contiguous memory */ | |
688 | if (nodes->size != mem->num_pages) | |
689 | return false; | |
690 | ||
691 | return ((nodes->start + nodes->size) << PAGE_SHIFT) | |
692 | <= adev->gmc.visible_vram_size; | |
693 | } | |
694 | ||
50da5174 TSD |
695 | /** |
696 | * amdgpu_bo_move - Move a buffer object to a new memory location | |
697 | * | |
698 | * Called by ttm_bo_handle_move_mem() | |
699 | */ | |
2823f4f0 CK |
700 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, |
701 | struct ttm_operation_ctx *ctx, | |
702 | struct ttm_mem_reg *new_mem) | |
d38ceaf9 AD |
703 | { |
704 | struct amdgpu_device *adev; | |
104ece97 | 705 | struct amdgpu_bo *abo; |
d38ceaf9 AD |
706 | struct ttm_mem_reg *old_mem = &bo->mem; |
707 | int r; | |
708 | ||
104ece97 | 709 | /* Can't move a pinned BO */ |
b82485fd | 710 | abo = ttm_to_amdgpu_bo(bo); |
104ece97 MD |
711 | if (WARN_ON_ONCE(abo->pin_count > 0)) |
712 | return -EINVAL; | |
713 | ||
a7d64de6 | 714 | adev = amdgpu_ttm_adev(bo->bdev); |
dbd5ed60 | 715 | |
d38ceaf9 AD |
716 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
717 | amdgpu_move_null(bo, new_mem); | |
718 | return 0; | |
719 | } | |
720 | if ((old_mem->mem_type == TTM_PL_TT && | |
721 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
722 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
723 | new_mem->mem_type == TTM_PL_TT)) { | |
724 | /* bind is enough */ | |
725 | amdgpu_move_null(bo, new_mem); | |
726 | return 0; | |
727 | } | |
3b2de699 CK |
728 | if (old_mem->mem_type == AMDGPU_PL_GDS || |
729 | old_mem->mem_type == AMDGPU_PL_GWS || | |
730 | old_mem->mem_type == AMDGPU_PL_OA || | |
731 | new_mem->mem_type == AMDGPU_PL_GDS || | |
732 | new_mem->mem_type == AMDGPU_PL_GWS || | |
733 | new_mem->mem_type == AMDGPU_PL_OA) { | |
734 | /* Nothing to save here */ | |
735 | amdgpu_move_null(bo, new_mem); | |
736 | return 0; | |
737 | } | |
81988f9c | 738 | |
67adb569 FK |
739 | if (!adev->mman.buffer_funcs_enabled) { |
740 | r = -ENODEV; | |
d38ceaf9 | 741 | goto memcpy; |
67adb569 | 742 | } |
d38ceaf9 AD |
743 | |
744 | if (old_mem->mem_type == TTM_PL_VRAM && | |
745 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
dfb8fa98 | 746 | r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); |
d38ceaf9 AD |
747 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
748 | new_mem->mem_type == TTM_PL_VRAM) { | |
dfb8fa98 | 749 | r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); |
d38ceaf9 | 750 | } else { |
2823f4f0 CK |
751 | r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, |
752 | new_mem, old_mem); | |
d38ceaf9 AD |
753 | } |
754 | ||
755 | if (r) { | |
756 | memcpy: | |
67adb569 FK |
757 | /* Check that all memory is CPU accessible */ |
758 | if (!amdgpu_mem_visible(adev, old_mem) || | |
759 | !amdgpu_mem_visible(adev, new_mem)) { | |
760 | pr_err("Move buffer fallback to memcpy unavailable\n"); | |
d38ceaf9 AD |
761 | return r; |
762 | } | |
67adb569 FK |
763 | |
764 | r = ttm_bo_move_memcpy(bo, ctx, new_mem); | |
765 | if (r) | |
766 | return r; | |
d38ceaf9 AD |
767 | } |
768 | ||
96cf8271 JB |
769 | if (bo->type == ttm_bo_type_device && |
770 | new_mem->mem_type == TTM_PL_VRAM && | |
771 | old_mem->mem_type != TTM_PL_VRAM) { | |
772 | /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU | |
773 | * accesses the BO after it's moved. | |
774 | */ | |
775 | abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | |
776 | } | |
777 | ||
d38ceaf9 AD |
778 | /* update statistics */ |
779 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); | |
780 | return 0; | |
781 | } | |
782 | ||
50da5174 TSD |
783 | /** |
784 | * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault | |
785 | * | |
786 | * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() | |
787 | */ | |
d38ceaf9 AD |
788 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
789 | { | |
790 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
a7d64de6 | 791 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
f8f4b9a6 | 792 | struct drm_mm_node *mm_node = mem->mm_node; |
d38ceaf9 AD |
793 | |
794 | mem->bus.addr = NULL; | |
795 | mem->bus.offset = 0; | |
796 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
797 | mem->bus.base = 0; | |
798 | mem->bus.is_iomem = false; | |
799 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
800 | return -EINVAL; | |
801 | switch (mem->mem_type) { | |
802 | case TTM_PL_SYSTEM: | |
803 | /* system memory */ | |
804 | return 0; | |
805 | case TTM_PL_TT: | |
806 | break; | |
807 | case TTM_PL_VRAM: | |
808 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
809 | /* check if it's visible */ | |
770d13b1 | 810 | if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) |
d38ceaf9 | 811 | return -EINVAL; |
f8f4b9a6 AL |
812 | /* Only physically contiguous buffers apply. In a contiguous |
813 | * buffer, size of the first mm_node would match the number of | |
814 | * pages in ttm_mem_reg. | |
815 | */ | |
816 | if (adev->mman.aper_base_kaddr && | |
817 | (mm_node->size == mem->num_pages)) | |
818 | mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + | |
819 | mem->bus.offset; | |
820 | ||
770d13b1 | 821 | mem->bus.base = adev->gmc.aper_base; |
d38ceaf9 | 822 | mem->bus.is_iomem = true; |
d38ceaf9 AD |
823 | break; |
824 | default: | |
825 | return -EINVAL; | |
826 | } | |
827 | return 0; | |
828 | } | |
829 | ||
830 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
831 | { | |
832 | } | |
833 | ||
9bbdcc0f CK |
834 | static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, |
835 | unsigned long page_offset) | |
836 | { | |
f0ee63cb | 837 | uint64_t offset = (page_offset << PAGE_SHIFT); |
e1d51505 | 838 | struct drm_mm_node *mm; |
9bbdcc0f | 839 | |
e1d51505 HK |
840 | mm = amdgpu_find_mm_node(&bo->mem, &offset); |
841 | return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + | |
842 | (offset >> PAGE_SHIFT); | |
9bbdcc0f CK |
843 | } |
844 | ||
b1a8ef95 ND |
845 | /** |
846 | * amdgpu_ttm_domain_start - Returns GPU start address | |
847 | * @adev: amdgpu device object | |
848 | * @type: type of the memory | |
849 | * | |
850 | * Returns: | |
851 | * GPU start address of a memory domain | |
852 | */ | |
853 | ||
854 | uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) | |
855 | { | |
856 | switch (type) { | |
857 | case TTM_PL_TT: | |
858 | return adev->gmc.gart_start; | |
859 | case TTM_PL_VRAM: | |
860 | return adev->gmc.vram_start; | |
861 | } | |
862 | ||
863 | return 0; | |
864 | } | |
865 | ||
d38ceaf9 AD |
866 | /* |
867 | * TTM backend functions. | |
868 | */ | |
869 | struct amdgpu_ttm_tt { | |
637dd3b5 | 870 | struct ttm_dma_tt ttm; |
a3941471 | 871 | struct drm_gem_object *gobj; |
637dd3b5 CK |
872 | u64 offset; |
873 | uint64_t userptr; | |
0919195f | 874 | struct task_struct *usertask; |
637dd3b5 | 875 | uint32_t userflags; |
ad595b86 | 876 | #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) |
66c45500 | 877 | struct hmm_range *range; |
ad595b86 | 878 | #endif |
d38ceaf9 AD |
879 | }; |
880 | ||
81fa1af3 | 881 | #ifdef CONFIG_DRM_AMDGPU_USERPTR |
50da5174 | 882 | /** |
899fbde1 PY |
883 | * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user |
884 | * memory and start HMM tracking CPU page table update | |
50da5174 | 885 | * |
899fbde1 PY |
886 | * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only |
887 | * once afterwards to stop HMM tracking | |
50da5174 | 888 | */ |
e5eaa7cc | 889 | int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) |
d38ceaf9 | 890 | { |
e5eaa7cc | 891 | struct ttm_tt *ttm = bo->tbo.ttm; |
d38ceaf9 | 892 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
6826cb3b | 893 | unsigned long start = gtt->userptr; |
66c45500 PY |
894 | struct vm_area_struct *vma; |
895 | struct hmm_range *range; | |
81fa1af3 JG |
896 | unsigned long timeout; |
897 | struct mm_struct *mm; | |
66c45500 | 898 | unsigned long i; |
1986a3b0 | 899 | int r = 0; |
d38ceaf9 | 900 | |
81fa1af3 JG |
901 | mm = bo->notifier.mm; |
902 | if (unlikely(!mm)) { | |
903 | DRM_DEBUG_DRIVER("BO is not registered?\n"); | |
a9ae8731 | 904 | return -EFAULT; |
e5eaa7cc | 905 | } |
5aeaccca | 906 | |
81fa1af3 JG |
907 | /* Another get_user_pages is running at the same time?? */ |
908 | if (WARN_ON(gtt->range)) | |
909 | return -EFAULT; | |
910 | ||
a9ae8731 | 911 | if (!mmget_not_zero(mm)) /* Happens during process shutdown */ |
0919195f FK |
912 | return -ESRCH; |
913 | ||
66c45500 PY |
914 | range = kzalloc(sizeof(*range), GFP_KERNEL); |
915 | if (unlikely(!range)) { | |
899fbde1 | 916 | r = -ENOMEM; |
e5eaa7cc PY |
917 | goto out; |
918 | } | |
81fa1af3 | 919 | range->notifier = &bo->notifier; |
81fa1af3 JG |
920 | range->start = bo->notifier.interval_tree.start; |
921 | range->end = bo->notifier.interval_tree.last + 1; | |
2733ea14 | 922 | range->default_flags = HMM_PFN_REQ_FAULT; |
81fa1af3 | 923 | if (!amdgpu_ttm_tt_is_readonly(ttm)) |
2733ea14 | 924 | range->default_flags |= HMM_PFN_REQ_WRITE; |
81fa1af3 | 925 | |
2733ea14 JG |
926 | range->hmm_pfns = kvmalloc_array(ttm->num_pages, |
927 | sizeof(*range->hmm_pfns), GFP_KERNEL); | |
928 | if (unlikely(!range->hmm_pfns)) { | |
6826cb3b PY |
929 | r = -ENOMEM; |
930 | goto out_free_ranges; | |
d38ceaf9 | 931 | } |
5aeaccca | 932 | |
d8ed45c5 | 933 | mmap_read_lock(mm); |
66c45500 PY |
934 | vma = find_vma(mm, start); |
935 | if (unlikely(!vma || start < vma->vm_start)) { | |
936 | r = -EFAULT; | |
a9ae8731 | 937 | goto out_unlock; |
66c45500 | 938 | } |
6826cb3b | 939 | if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && |
66c45500 | 940 | vma->vm_file)) { |
899fbde1 | 941 | r = -EPERM; |
a9ae8731 | 942 | goto out_unlock; |
6826cb3b | 943 | } |
d8ed45c5 | 944 | mmap_read_unlock(mm); |
81fa1af3 | 945 | timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); |
5aeaccca | 946 | |
81fa1af3 JG |
947 | retry: |
948 | range->notifier_seq = mmu_interval_read_begin(&bo->notifier); | |
d38ceaf9 | 949 | |
d8ed45c5 | 950 | mmap_read_lock(mm); |
6bfef2f9 | 951 | r = hmm_range_fault(range); |
d8ed45c5 | 952 | mmap_read_unlock(mm); |
be957c88 | 953 | if (unlikely(r)) { |
81fa1af3 JG |
954 | /* |
955 | * FIXME: This timeout should encompass the retry from | |
956 | * mmu_interval_read_retry() as well. | |
957 | */ | |
be957c88 | 958 | if (r == -EBUSY && !time_after(jiffies, timeout)) |
81fa1af3 | 959 | goto retry; |
9d0a1665 | 960 | goto out_free_pfns; |
81fa1af3 | 961 | } |
9d0a1665 | 962 | |
4e249084 JG |
963 | /* |
964 | * Due to default_flags, all pages are HMM_PFN_VALID or | |
965 | * hmm_range_fault() fails. FIXME: The pages cannot be touched outside | |
966 | * the notifier_lock, and mmu_interval_read_retry() must be done first. | |
967 | */ | |
968 | for (i = 0; i < ttm->num_pages; i++) | |
2733ea14 | 969 | pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); |
66c45500 PY |
970 | |
971 | gtt->range = range; | |
a9ae8731 | 972 | mmput(mm); |
915d3eec | 973 | |
318c3f4b | 974 | return 0; |
2f568dbd | 975 | |
a9ae8731 | 976 | out_unlock: |
d8ed45c5 | 977 | mmap_read_unlock(mm); |
899fbde1 | 978 | out_free_pfns: |
2733ea14 | 979 | kvfree(range->hmm_pfns); |
6826cb3b | 980 | out_free_ranges: |
66c45500 | 981 | kfree(range); |
899fbde1 | 982 | out: |
a9ae8731 | 983 | mmput(mm); |
2f568dbd CK |
984 | return r; |
985 | } | |
986 | ||
50da5174 | 987 | /** |
899fbde1 PY |
988 | * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change |
989 | * Check if the pages backing this ttm range have been invalidated | |
50da5174 | 990 | * |
899fbde1 | 991 | * Returns: true if pages are still valid |
50da5174 | 992 | */ |
899fbde1 | 993 | bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) |
aca81718 | 994 | { |
318c3f4b | 995 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
899fbde1 | 996 | bool r = false; |
aca81718 | 997 | |
899fbde1 PY |
998 | if (!gtt || !gtt->userptr) |
999 | return false; | |
318c3f4b | 1000 | |
66c45500 PY |
1001 | DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", |
1002 | gtt->userptr, ttm->num_pages); | |
6826cb3b | 1003 | |
2733ea14 | 1004 | WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, |
6826cb3b PY |
1005 | "No user pages to check\n"); |
1006 | ||
66c45500 | 1007 | if (gtt->range) { |
81fa1af3 JG |
1008 | /* |
1009 | * FIXME: Must always hold notifier_lock for this, and must | |
1010 | * not ignore the return code. | |
1011 | */ | |
1012 | r = mmu_interval_read_retry(gtt->range->notifier, | |
1013 | gtt->range->notifier_seq); | |
2733ea14 | 1014 | kvfree(gtt->range->hmm_pfns); |
66c45500 PY |
1015 | kfree(gtt->range); |
1016 | gtt->range = NULL; | |
318c3f4b | 1017 | } |
2f568dbd | 1018 | |
81fa1af3 | 1019 | return !r; |
aca81718 | 1020 | } |
ad595b86 | 1021 | #endif |
aca81718 | 1022 | |
8944042d | 1023 | /** |
2e603d04 | 1024 | * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. |
8944042d | 1025 | * |
2e603d04 | 1026 | * Called by amdgpu_cs_list_validate(). This creates the page list |
50da5174 TSD |
1027 | * that backs user memory and will ultimately be mapped into the device |
1028 | * address space. | |
8944042d | 1029 | */ |
a216ab09 | 1030 | void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) |
8944042d | 1031 | { |
1986a3b0 | 1032 | unsigned long i; |
8944042d | 1033 | |
899fbde1 | 1034 | for (i = 0; i < ttm->num_pages; ++i) |
a216ab09 | 1035 | ttm->pages[i] = pages ? pages[i] : NULL; |
8944042d AD |
1036 | } |
1037 | ||
50da5174 | 1038 | /** |
2e603d04 | 1039 | * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages |
50da5174 TSD |
1040 | * |
1041 | * Called by amdgpu_ttm_backend_bind() | |
1042 | **/ | |
2f568dbd CK |
1043 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) |
1044 | { | |
a7d64de6 | 1045 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
2f568dbd CK |
1046 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
1047 | unsigned nents; | |
1048 | int r; | |
1049 | ||
1050 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
1051 | enum dma_data_direction direction = write ? | |
1052 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
1053 | ||
50da5174 | 1054 | /* Allocate an SG array and squash pages into it */ |
d38ceaf9 AD |
1055 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
1056 | ttm->num_pages << PAGE_SHIFT, | |
1057 | GFP_KERNEL); | |
1058 | if (r) | |
1059 | goto release_sg; | |
1060 | ||
50da5174 | 1061 | /* Map SG to device */ |
d38ceaf9 AD |
1062 | r = -ENOMEM; |
1063 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
0199172f | 1064 | if (nents == 0) |
d38ceaf9 AD |
1065 | goto release_sg; |
1066 | ||
50da5174 | 1067 | /* convert SG to linear array of pages and dma addresses */ |
d38ceaf9 AD |
1068 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
1069 | gtt->ttm.dma_address, ttm->num_pages); | |
1070 | ||
1071 | return 0; | |
1072 | ||
1073 | release_sg: | |
1074 | kfree(ttm->sg); | |
d38ceaf9 AD |
1075 | return r; |
1076 | } | |
1077 | ||
50da5174 TSD |
1078 | /** |
1079 | * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages | |
1080 | */ | |
d38ceaf9 AD |
1081 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) |
1082 | { | |
a7d64de6 | 1083 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 | 1084 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
d38ceaf9 AD |
1085 | |
1086 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
1087 | enum dma_data_direction direction = write ? | |
1088 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
1089 | ||
1090 | /* double check that we don't free the table twice */ | |
1091 | if (!ttm->sg->sgl) | |
1092 | return; | |
1093 | ||
50da5174 | 1094 | /* unmap the pages mapped to the device */ |
d38ceaf9 AD |
1095 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
1096 | ||
318c3f4b | 1097 | sg_free_table(ttm->sg); |
899fbde1 | 1098 | |
ad595b86 | 1099 | #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) |
81fa1af3 JG |
1100 | if (gtt->range) { |
1101 | unsigned long i; | |
1102 | ||
1103 | for (i = 0; i < ttm->num_pages; i++) { | |
1104 | if (ttm->pages[i] != | |
2733ea14 | 1105 | hmm_pfn_to_page(gtt->range->hmm_pfns[i])) |
81fa1af3 JG |
1106 | break; |
1107 | } | |
1108 | ||
1109 | WARN((i == ttm->num_pages), "Missing get_user_page_done\n"); | |
1110 | } | |
ad595b86 | 1111 | #endif |
d38ceaf9 AD |
1112 | } |
1113 | ||
f3167919 | 1114 | static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, |
959a2091 YZ |
1115 | struct ttm_buffer_object *tbo, |
1116 | uint64_t flags) | |
1117 | { | |
1118 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); | |
1119 | struct ttm_tt *ttm = tbo->ttm; | |
1120 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1121 | int r; | |
1122 | ||
bffc8c5c CK |
1123 | if (amdgpu_bo_encrypted(abo)) |
1124 | flags |= AMDGPU_PTE_TMZ; | |
1125 | ||
fa5bde80 | 1126 | if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { |
959a2091 YZ |
1127 | uint64_t page_idx = 1; |
1128 | ||
1129 | r = amdgpu_gart_bind(adev, gtt->offset, page_idx, | |
1130 | ttm->pages, gtt->ttm.dma_address, flags); | |
1131 | if (r) | |
1132 | goto gart_bind_fail; | |
1133 | ||
fa5bde80 YZ |
1134 | /* The memory type of the first page defaults to UC. Now |
1135 | * modify the memory type to NC from the second page of | |
1136 | * the BO onward. | |
1137 | */ | |
7596ab68 HZ |
1138 | flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; |
1139 | flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); | |
959a2091 YZ |
1140 | |
1141 | r = amdgpu_gart_bind(adev, | |
1142 | gtt->offset + (page_idx << PAGE_SHIFT), | |
1143 | ttm->num_pages - page_idx, | |
1144 | &ttm->pages[page_idx], | |
1145 | &(gtt->ttm.dma_address[page_idx]), flags); | |
1146 | } else { | |
1147 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, | |
1148 | ttm->pages, gtt->ttm.dma_address, flags); | |
1149 | } | |
1150 | ||
1151 | gart_bind_fail: | |
1152 | if (r) | |
1153 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", | |
1154 | ttm->num_pages, gtt->offset); | |
1155 | ||
1156 | return r; | |
1157 | } | |
1158 | ||
50da5174 TSD |
1159 | /** |
1160 | * amdgpu_ttm_backend_bind - Bind GTT memory | |
1161 | * | |
1162 | * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). | |
1163 | * This handles binding GTT memory to the device address space. | |
1164 | */ | |
d38ceaf9 AD |
1165 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, |
1166 | struct ttm_mem_reg *bo_mem) | |
1167 | { | |
d9a13766 | 1168 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 | 1169 | struct amdgpu_ttm_tt *gtt = (void*)ttm; |
ac7afe6b | 1170 | uint64_t flags; |
2ce3f5dc | 1171 | int r = 0; |
d38ceaf9 | 1172 | |
e2f784fa CZ |
1173 | if (gtt->userptr) { |
1174 | r = amdgpu_ttm_tt_pin_userptr(ttm); | |
1175 | if (r) { | |
1176 | DRM_ERROR("failed to pin userptr\n"); | |
1177 | return r; | |
1178 | } | |
1179 | } | |
d38ceaf9 AD |
1180 | if (!ttm->num_pages) { |
1181 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
1182 | ttm->num_pages, bo_mem, ttm); | |
1183 | } | |
1184 | ||
1185 | if (bo_mem->mem_type == AMDGPU_PL_GDS || | |
1186 | bo_mem->mem_type == AMDGPU_PL_GWS || | |
1187 | bo_mem->mem_type == AMDGPU_PL_OA) | |
1188 | return -EINVAL; | |
1189 | ||
3da917b6 CK |
1190 | if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { |
1191 | gtt->offset = AMDGPU_BO_INVALID_OFFSET; | |
ac7afe6b | 1192 | return 0; |
3da917b6 | 1193 | } |
ac7afe6b | 1194 | |
50da5174 | 1195 | /* compute PTE flags relevant to this BO memory */ |
d9a13766 | 1196 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); |
50da5174 TSD |
1197 | |
1198 | /* bind pages into GART page tables */ | |
0957dc70 | 1199 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; |
d9a13766 | 1200 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
ac7afe6b CK |
1201 | ttm->pages, gtt->ttm.dma_address, flags); |
1202 | ||
c1c7ce8f | 1203 | if (r) |
ac7afe6b CK |
1204 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", |
1205 | ttm->num_pages, gtt->offset); | |
98a7f88c | 1206 | return r; |
c855e250 CK |
1207 | } |
1208 | ||
50da5174 TSD |
1209 | /** |
1210 | * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object | |
1211 | */ | |
c5835bbb | 1212 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) |
c855e250 | 1213 | { |
1d00402b | 1214 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
c13c55d6 | 1215 | struct ttm_operation_ctx ctx = { false, false }; |
40575732 | 1216 | struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; |
1d00402b | 1217 | struct ttm_mem_reg tmp; |
1d00402b CK |
1218 | struct ttm_placement placement; |
1219 | struct ttm_place placements; | |
485fc361 | 1220 | uint64_t addr, flags; |
c855e250 CK |
1221 | int r; |
1222 | ||
0e33495d | 1223 | if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) |
c855e250 CK |
1224 | return 0; |
1225 | ||
485fc361 CK |
1226 | addr = amdgpu_gmc_agp_addr(bo); |
1227 | if (addr != AMDGPU_BO_INVALID_OFFSET) { | |
1228 | bo->mem.start = addr >> PAGE_SHIFT; | |
1229 | } else { | |
1d00402b | 1230 | |
485fc361 CK |
1231 | /* allocate GART space */ |
1232 | tmp = bo->mem; | |
1233 | tmp.mm_node = NULL; | |
1234 | placement.num_placement = 1; | |
1235 | placement.placement = &placements; | |
1236 | placement.num_busy_placement = 1; | |
1237 | placement.busy_placement = &placements; | |
1238 | placements.fpfn = 0; | |
1239 | placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; | |
1240 | placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | | |
1241 | TTM_PL_FLAG_TT; | |
1242 | ||
1243 | r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); | |
1244 | if (unlikely(r)) | |
1245 | return r; | |
bb990bb0 | 1246 | |
485fc361 CK |
1247 | /* compute PTE flags for this buffer object */ |
1248 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); | |
50da5174 | 1249 | |
485fc361 | 1250 | /* Bind pages */ |
0957dc70 | 1251 | gtt->offset = (u64)tmp.start << PAGE_SHIFT; |
485fc361 CK |
1252 | r = amdgpu_ttm_gart_bind(adev, bo, flags); |
1253 | if (unlikely(r)) { | |
1254 | ttm_bo_mem_put(bo, &tmp); | |
1255 | return r; | |
1256 | } | |
1257 | ||
1258 | ttm_bo_mem_put(bo, &bo->mem); | |
1259 | bo->mem = tmp; | |
40575732 | 1260 | } |
1d00402b | 1261 | |
40575732 | 1262 | return 0; |
d38ceaf9 AD |
1263 | } |
1264 | ||
50da5174 TSD |
1265 | /** |
1266 | * amdgpu_ttm_recover_gart - Rebind GTT pages | |
1267 | * | |
1268 | * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to | |
1269 | * rebind GTT pages during a GPU reset. | |
1270 | */ | |
c1c7ce8f | 1271 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) |
2c0d7318 | 1272 | { |
c1c7ce8f | 1273 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
1d1a2cd5 | 1274 | uint64_t flags; |
2c0d7318 CZ |
1275 | int r; |
1276 | ||
959a2091 | 1277 | if (!tbo->ttm) |
c1c7ce8f CK |
1278 | return 0; |
1279 | ||
959a2091 YZ |
1280 | flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); |
1281 | r = amdgpu_ttm_gart_bind(adev, tbo, flags); | |
1282 | ||
c1c7ce8f | 1283 | return r; |
2c0d7318 CZ |
1284 | } |
1285 | ||
50da5174 TSD |
1286 | /** |
1287 | * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages | |
1288 | * | |
1289 | * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and | |
1290 | * ttm_tt_destroy(). | |
1291 | */ | |
d38ceaf9 AD |
1292 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) |
1293 | { | |
d9a13766 | 1294 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 | 1295 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
738f64cc | 1296 | int r; |
d38ceaf9 | 1297 | |
50da5174 | 1298 | /* if the pages have userptr pinning then clear that first */ |
85a4b579 CK |
1299 | if (gtt->userptr) |
1300 | amdgpu_ttm_tt_unpin_userptr(ttm); | |
1301 | ||
3da917b6 | 1302 | if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) |
78ab0a38 CK |
1303 | return 0; |
1304 | ||
d38ceaf9 | 1305 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |
d9a13766 | 1306 | r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); |
c1c7ce8f | 1307 | if (r) |
738f64cc RH |
1308 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", |
1309 | gtt->ttm.ttm.num_pages, gtt->offset); | |
738f64cc | 1310 | return r; |
d38ceaf9 AD |
1311 | } |
1312 | ||
1313 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) | |
1314 | { | |
1315 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1316 | ||
0919195f FK |
1317 | if (gtt->usertask) |
1318 | put_task_struct(gtt->usertask); | |
1319 | ||
d38ceaf9 AD |
1320 | ttm_dma_tt_fini(>t->ttm); |
1321 | kfree(gtt); | |
1322 | } | |
1323 | ||
1324 | static struct ttm_backend_func amdgpu_backend_func = { | |
1325 | .bind = &amdgpu_ttm_backend_bind, | |
1326 | .unbind = &amdgpu_ttm_backend_unbind, | |
1327 | .destroy = &amdgpu_ttm_backend_destroy, | |
1328 | }; | |
1329 | ||
50da5174 TSD |
1330 | /** |
1331 | * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO | |
1332 | * | |
1333 | * @bo: The buffer object to create a GTT ttm_tt object around | |
1334 | * | |
1335 | * Called by ttm_tt_create(). | |
1336 | */ | |
dde5da23 CK |
1337 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, |
1338 | uint32_t page_flags) | |
d38ceaf9 | 1339 | { |
d38ceaf9 AD |
1340 | struct amdgpu_ttm_tt *gtt; |
1341 | ||
d38ceaf9 AD |
1342 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); |
1343 | if (gtt == NULL) { | |
1344 | return NULL; | |
1345 | } | |
1346 | gtt->ttm.ttm.func = &amdgpu_backend_func; | |
a3941471 | 1347 | gtt->gobj = &bo->base; |
50da5174 TSD |
1348 | |
1349 | /* allocate space for the uninitialized page entries */ | |
dde5da23 | 1350 | if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { |
d38ceaf9 AD |
1351 | kfree(gtt); |
1352 | return NULL; | |
1353 | } | |
1354 | return >t->ttm.ttm; | |
1355 | } | |
1356 | ||
50da5174 TSD |
1357 | /** |
1358 | * amdgpu_ttm_tt_populate - Map GTT pages visible to the device | |
1359 | * | |
1360 | * Map the pages of a ttm_tt object to an address space visible | |
1361 | * to the underlying device. | |
1362 | */ | |
d0cef9fa RH |
1363 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, |
1364 | struct ttm_operation_ctx *ctx) | |
d38ceaf9 | 1365 | { |
aca81718 | 1366 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 | 1367 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
d38ceaf9 | 1368 | |
50da5174 | 1369 | /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ |
d38ceaf9 | 1370 | if (gtt && gtt->userptr) { |
5f0b34cc | 1371 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
d38ceaf9 AD |
1372 | if (!ttm->sg) |
1373 | return -ENOMEM; | |
1374 | ||
1375 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | |
1376 | ttm->state = tt_unbound; | |
1377 | return 0; | |
1378 | } | |
1379 | ||
a3941471 CK |
1380 | if (ttm->page_flags & TTM_PAGE_FLAG_SG) { |
1381 | if (!ttm->sg) { | |
1382 | struct dma_buf_attachment *attach; | |
1383 | struct sg_table *sgt; | |
1384 | ||
1385 | attach = gtt->gobj->import_attach; | |
1386 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | |
1387 | if (IS_ERR(sgt)) | |
1388 | return PTR_ERR(sgt); | |
1389 | ||
1390 | ttm->sg = sgt; | |
1391 | } | |
1392 | ||
d38ceaf9 | 1393 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
e89d0d33 CK |
1394 | gtt->ttm.dma_address, |
1395 | ttm->num_pages); | |
d38ceaf9 | 1396 | ttm->state = tt_unbound; |
79ba2800 | 1397 | return 0; |
d38ceaf9 AD |
1398 | } |
1399 | ||
d38ceaf9 | 1400 | #ifdef CONFIG_SWIOTLB |
fd5fd480 | 1401 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
d0cef9fa | 1402 | return ttm_dma_populate(>t->ttm, adev->dev, ctx); |
d38ceaf9 AD |
1403 | } |
1404 | #endif | |
1405 | ||
50da5174 TSD |
1406 | /* fall back to generic helper to populate the page array |
1407 | * and map them to the device */ | |
d0cef9fa | 1408 | return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); |
d38ceaf9 AD |
1409 | } |
1410 | ||
50da5174 TSD |
1411 | /** |
1412 | * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays | |
1413 | * | |
1414 | * Unmaps pages of a ttm_tt object from the device address space and | |
1415 | * unpopulates the page array backing it. | |
1416 | */ | |
d38ceaf9 AD |
1417 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) |
1418 | { | |
d38ceaf9 | 1419 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
a3941471 | 1420 | struct amdgpu_device *adev; |
d38ceaf9 AD |
1421 | |
1422 | if (gtt && gtt->userptr) { | |
a216ab09 | 1423 | amdgpu_ttm_tt_set_user_pages(ttm, NULL); |
d38ceaf9 AD |
1424 | kfree(ttm->sg); |
1425 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | |
1426 | return; | |
1427 | } | |
1428 | ||
a3941471 CK |
1429 | if (ttm->sg && gtt->gobj->import_attach) { |
1430 | struct dma_buf_attachment *attach; | |
1431 | ||
1432 | attach = gtt->gobj->import_attach; | |
1433 | dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); | |
1434 | ttm->sg = NULL; | |
1435 | return; | |
1436 | } | |
1437 | ||
1438 | if (ttm->page_flags & TTM_PAGE_FLAG_SG) | |
d38ceaf9 AD |
1439 | return; |
1440 | ||
a7d64de6 | 1441 | adev = amdgpu_ttm_adev(ttm->bdev); |
d38ceaf9 AD |
1442 | |
1443 | #ifdef CONFIG_SWIOTLB | |
fd5fd480 | 1444 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { |
d38ceaf9 AD |
1445 | ttm_dma_unpopulate(>t->ttm, adev->dev); |
1446 | return; | |
1447 | } | |
1448 | #endif | |
1449 | ||
50da5174 | 1450 | /* fall back to generic helper to unmap and unpopulate array */ |
7405e0da | 1451 | ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); |
d38ceaf9 AD |
1452 | } |
1453 | ||
50da5174 | 1454 | /** |
2e603d04 HR |
1455 | * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current |
1456 | * task | |
50da5174 TSD |
1457 | * |
1458 | * @ttm: The ttm_tt object to bind this userptr object to | |
1459 | * @addr: The address in the current tasks VM space to use | |
1460 | * @flags: Requirements of userptr object. | |
1461 | * | |
1462 | * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages | |
1463 | * to current task | |
1464 | */ | |
d38ceaf9 AD |
1465 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
1466 | uint32_t flags) | |
1467 | { | |
1468 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1469 | ||
1470 | if (gtt == NULL) | |
1471 | return -EINVAL; | |
1472 | ||
1473 | gtt->userptr = addr; | |
d38ceaf9 | 1474 | gtt->userflags = flags; |
0919195f FK |
1475 | |
1476 | if (gtt->usertask) | |
1477 | put_task_struct(gtt->usertask); | |
1478 | gtt->usertask = current->group_leader; | |
1479 | get_task_struct(gtt->usertask); | |
1480 | ||
d38ceaf9 AD |
1481 | return 0; |
1482 | } | |
1483 | ||
50da5174 TSD |
1484 | /** |
1485 | * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object | |
1486 | */ | |
cc325d19 | 1487 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
d38ceaf9 AD |
1488 | { |
1489 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1490 | ||
1491 | if (gtt == NULL) | |
cc325d19 | 1492 | return NULL; |
d38ceaf9 | 1493 | |
0919195f FK |
1494 | if (gtt->usertask == NULL) |
1495 | return NULL; | |
1496 | ||
1497 | return gtt->usertask->mm; | |
d38ceaf9 AD |
1498 | } |
1499 | ||
50da5174 | 1500 | /** |
2e603d04 HR |
1501 | * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an |
1502 | * address range for the current task. | |
50da5174 TSD |
1503 | * |
1504 | */ | |
cc1de6e8 CK |
1505 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
1506 | unsigned long end) | |
1507 | { | |
1508 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1509 | unsigned long size; | |
1510 | ||
637dd3b5 | 1511 | if (gtt == NULL || !gtt->userptr) |
cc1de6e8 CK |
1512 | return false; |
1513 | ||
50da5174 TSD |
1514 | /* Return false if no part of the ttm_tt object lies within |
1515 | * the range | |
1516 | */ | |
cc1de6e8 CK |
1517 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; |
1518 | if (gtt->userptr > end || gtt->userptr + size <= start) | |
1519 | return false; | |
1520 | ||
1521 | return true; | |
1522 | } | |
1523 | ||
50da5174 | 1524 | /** |
899fbde1 | 1525 | * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? |
318c3f4b | 1526 | */ |
899fbde1 | 1527 | bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) |
ca666a3c CK |
1528 | { |
1529 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1530 | ||
1531 | if (gtt == NULL || !gtt->userptr) | |
1532 | return false; | |
1533 | ||
899fbde1 | 1534 | return true; |
ca666a3c CK |
1535 | } |
1536 | ||
50da5174 TSD |
1537 | /** |
1538 | * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? | |
1539 | */ | |
d38ceaf9 AD |
1540 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
1541 | { | |
1542 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
1543 | ||
1544 | if (gtt == NULL) | |
1545 | return false; | |
1546 | ||
1547 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
1548 | } | |
1549 | ||
50da5174 | 1550 | /** |
24a8d289 | 1551 | * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object |
50da5174 TSD |
1552 | * |
1553 | * @ttm: The ttm_tt object to compute the flags for | |
1554 | * @mem: The memory registry backing this ttm_tt object | |
24a8d289 CK |
1555 | * |
1556 | * Figure out the flags to use for a VM PDE (Page Directory Entry). | |
50da5174 | 1557 | */ |
24a8d289 | 1558 | uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
d38ceaf9 | 1559 | { |
6b777607 | 1560 | uint64_t flags = 0; |
d38ceaf9 AD |
1561 | |
1562 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | |
1563 | flags |= AMDGPU_PTE_VALID; | |
1564 | ||
6d99905a | 1565 | if (mem && mem->mem_type == TTM_PL_TT) { |
d38ceaf9 AD |
1566 | flags |= AMDGPU_PTE_SYSTEM; |
1567 | ||
6d99905a CK |
1568 | if (ttm->caching_state == tt_cached) |
1569 | flags |= AMDGPU_PTE_SNOOPED; | |
1570 | } | |
d38ceaf9 | 1571 | |
24a8d289 CK |
1572 | return flags; |
1573 | } | |
1574 | ||
1575 | /** | |
1576 | * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object | |
1577 | * | |
1578 | * @ttm: The ttm_tt object to compute the flags for | |
1579 | * @mem: The memory registry backing this ttm_tt object | |
1580 | ||
1581 | * Figure out the flags to use for a VM PTE (Page Table Entry). | |
1582 | */ | |
1583 | uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |
1584 | struct ttm_mem_reg *mem) | |
1585 | { | |
1586 | uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); | |
1587 | ||
4b98e0c4 | 1588 | flags |= adev->gart.gart_pte_flags; |
d38ceaf9 AD |
1589 | flags |= AMDGPU_PTE_READABLE; |
1590 | ||
1591 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | |
1592 | flags |= AMDGPU_PTE_WRITEABLE; | |
1593 | ||
1594 | return flags; | |
1595 | } | |
1596 | ||
50da5174 | 1597 | /** |
2e603d04 HR |
1598 | * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer |
1599 | * object. | |
50da5174 | 1600 | * |
2e603d04 HR |
1601 | * Return true if eviction is sensible. Called by ttm_mem_evict_first() on |
1602 | * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until | |
1603 | * it can find space for a new object and by ttm_bo_force_list_clean() which is | |
50da5174 TSD |
1604 | * used to clean out a memory space. |
1605 | */ | |
9982ca68 CK |
1606 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
1607 | const struct ttm_place *place) | |
1608 | { | |
4fcae787 CK |
1609 | unsigned long num_pages = bo->mem.num_pages; |
1610 | struct drm_mm_node *node = bo->mem.mm_node; | |
52791eee | 1611 | struct dma_resv_list *flist; |
d8d019cc FK |
1612 | struct dma_fence *f; |
1613 | int i; | |
1614 | ||
1bd4e4ca | 1615 | if (bo->type == ttm_bo_type_kernel && |
6ceeb144 | 1616 | !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) |
1bd4e4ca CK |
1617 | return false; |
1618 | ||
d8d019cc FK |
1619 | /* If bo is a KFD BO, check if the bo belongs to the current process. |
1620 | * If true, then return false as any KFD process needs all its BOs to | |
1621 | * be resident to run successfully | |
1622 | */ | |
52791eee | 1623 | flist = dma_resv_get_list(bo->base.resv); |
d8d019cc FK |
1624 | if (flist) { |
1625 | for (i = 0; i < flist->shared_count; ++i) { | |
1626 | f = rcu_dereference_protected(flist->shared[i], | |
52791eee | 1627 | dma_resv_held(bo->base.resv)); |
d8d019cc FK |
1628 | if (amdkfd_fence_check_mm(f, current->mm)) |
1629 | return false; | |
1630 | } | |
1631 | } | |
9982ca68 | 1632 | |
4fcae787 CK |
1633 | switch (bo->mem.mem_type) { |
1634 | case TTM_PL_TT: | |
218c0b7f CK |
1635 | if (amdgpu_bo_is_amdgpu_bo(bo) && |
1636 | amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) | |
1637 | return false; | |
4fcae787 | 1638 | return true; |
9982ca68 | 1639 | |
4fcae787 | 1640 | case TTM_PL_VRAM: |
9982ca68 CK |
1641 | /* Check each drm MM node individually */ |
1642 | while (num_pages) { | |
1643 | if (place->fpfn < (node->start + node->size) && | |
1644 | !(place->lpfn && place->lpfn <= node->start)) | |
1645 | return true; | |
1646 | ||
1647 | num_pages -= node->size; | |
1648 | ++node; | |
1649 | } | |
7da2e3e0 | 1650 | return false; |
9982ca68 | 1651 | |
4fcae787 CK |
1652 | default: |
1653 | break; | |
9982ca68 CK |
1654 | } |
1655 | ||
1656 | return ttm_bo_eviction_valuable(bo, place); | |
1657 | } | |
1658 | ||
50da5174 | 1659 | /** |
2e603d04 | 1660 | * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. |
50da5174 TSD |
1661 | * |
1662 | * @bo: The buffer object to read/write | |
1663 | * @offset: Offset into buffer object | |
1664 | * @buf: Secondary buffer to write/read from | |
1665 | * @len: Length in bytes of access | |
1666 | * @write: true if writing | |
1667 | * | |
1668 | * This is used to access VRAM that backs a buffer object via MMIO | |
1669 | * access for debugging purposes. | |
1670 | */ | |
e342610c FK |
1671 | static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, |
1672 | unsigned long offset, | |
1673 | void *buf, int len, int write) | |
1674 | { | |
b82485fd | 1675 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
e342610c | 1676 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
e1d51505 | 1677 | struct drm_mm_node *nodes; |
e342610c FK |
1678 | uint32_t value = 0; |
1679 | int ret = 0; | |
1680 | uint64_t pos; | |
1681 | unsigned long flags; | |
1682 | ||
1683 | if (bo->mem.mem_type != TTM_PL_VRAM) | |
1684 | return -EIO; | |
1685 | ||
f0ee63cb CK |
1686 | pos = offset; |
1687 | nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos); | |
1688 | pos += (nodes->start << PAGE_SHIFT); | |
e342610c | 1689 | |
770d13b1 | 1690 | while (len && pos < adev->gmc.mc_vram_size) { |
e342610c | 1691 | uint64_t aligned_pos = pos & ~(uint64_t)3; |
dd1ab799 | 1692 | uint64_t bytes = 4 - (pos & 3); |
e342610c FK |
1693 | uint32_t shift = (pos & 3) * 8; |
1694 | uint32_t mask = 0xffffffff << shift; | |
1695 | ||
1696 | if (len < bytes) { | |
1697 | mask &= 0xffffffff >> (bytes - len) * 8; | |
1698 | bytes = len; | |
1699 | } | |
1700 | ||
dd1ab799 CK |
1701 | if (mask != 0xffffffff) { |
1702 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
1703 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); | |
1704 | WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); | |
1705 | if (!write || mask != 0xffffffff) | |
1706 | value = RREG32_NO_KIQ(mmMM_DATA); | |
1707 | if (write) { | |
1708 | value &= ~mask; | |
1709 | value |= (*(uint32_t *)buf << shift) & mask; | |
1710 | WREG32_NO_KIQ(mmMM_DATA, value); | |
1711 | } | |
1712 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | |
1713 | if (!write) { | |
1714 | value = (value & mask) >> shift; | |
1715 | memcpy(buf, &value, bytes); | |
1716 | } | |
1717 | } else { | |
1718 | bytes = (nodes->start + nodes->size) << PAGE_SHIFT; | |
1719 | bytes = min(bytes - pos, (uint64_t)len & ~0x3ull); | |
1720 | ||
1721 | amdgpu_device_vram_access(adev, pos, (uint32_t *)buf, | |
1722 | bytes, write); | |
e342610c FK |
1723 | } |
1724 | ||
1725 | ret += bytes; | |
1726 | buf = (uint8_t *)buf + bytes; | |
1727 | pos += bytes; | |
1728 | len -= bytes; | |
1729 | if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { | |
1730 | ++nodes; | |
1731 | pos = (nodes->start << PAGE_SHIFT); | |
1732 | } | |
1733 | } | |
1734 | ||
1735 | return ret; | |
1736 | } | |
1737 | ||
d38ceaf9 AD |
1738 | static struct ttm_bo_driver amdgpu_bo_driver = { |
1739 | .ttm_tt_create = &amdgpu_ttm_tt_create, | |
1740 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | |
1741 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | |
d38ceaf9 | 1742 | .init_mem_type = &amdgpu_init_mem_type, |
9982ca68 | 1743 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, |
d38ceaf9 AD |
1744 | .evict_flags = &amdgpu_evict_flags, |
1745 | .move = &amdgpu_bo_move, | |
1746 | .verify_access = &amdgpu_verify_access, | |
1747 | .move_notify = &amdgpu_bo_move_notify, | |
ab2f7a5c | 1748 | .release_notify = &amdgpu_bo_release_notify, |
d38ceaf9 AD |
1749 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, |
1750 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, | |
1751 | .io_mem_free = &amdgpu_ttm_io_mem_free, | |
9bbdcc0f | 1752 | .io_mem_pfn = amdgpu_ttm_io_mem_pfn, |
b61857b5 CZ |
1753 | .access_memory = &amdgpu_ttm_access_memory, |
1754 | .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify | |
d38ceaf9 AD |
1755 | }; |
1756 | ||
f5ec697e AD |
1757 | /* |
1758 | * Firmware Reservation functions | |
1759 | */ | |
1760 | /** | |
1761 | * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram | |
1762 | * | |
1763 | * @adev: amdgpu_device pointer | |
1764 | * | |
1765 | * free fw reserved vram if it has been reserved. | |
1766 | */ | |
1767 | static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) | |
1768 | { | |
1769 | amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, | |
1770 | NULL, &adev->fw_vram_usage.va); | |
1771 | } | |
1772 | ||
1773 | /** | |
1774 | * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw | |
1775 | * | |
1776 | * @adev: amdgpu_device pointer | |
1777 | * | |
1778 | * create bo vram reservation from fw. | |
1779 | */ | |
1780 | static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) | |
1781 | { | |
de7b45ba | 1782 | uint64_t vram_size = adev->gmc.visible_vram_size; |
de7b45ba | 1783 | |
f5ec697e AD |
1784 | adev->fw_vram_usage.va = NULL; |
1785 | adev->fw_vram_usage.reserved_bo = NULL; | |
1786 | ||
de7b45ba CK |
1787 | if (adev->fw_vram_usage.size == 0 || |
1788 | adev->fw_vram_usage.size > vram_size) | |
1789 | return 0; | |
f5ec697e | 1790 | |
de7b45ba CK |
1791 | return amdgpu_bo_create_kernel_at(adev, |
1792 | adev->fw_vram_usage.start_offset, | |
1793 | adev->fw_vram_usage.size, | |
1794 | AMDGPU_GEM_DOMAIN_VRAM, | |
1795 | &adev->fw_vram_usage.reserved_bo, | |
1796 | &adev->fw_vram_usage.va); | |
f5ec697e | 1797 | } |
de7b45ba | 1798 | |
778e8c42 TY |
1799 | /* |
1800 | * Memoy training reservation functions | |
1801 | */ | |
1802 | ||
1803 | /** | |
1804 | * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram | |
1805 | * | |
1806 | * @adev: amdgpu_device pointer | |
1807 | * | |
1808 | * free memory training reserved vram if it has been reserved. | |
1809 | */ | |
1810 | static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) | |
1811 | { | |
1812 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; | |
1813 | ||
1814 | ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; | |
1815 | amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); | |
1816 | ctx->c2p_bo = NULL; | |
1817 | ||
778e8c42 TY |
1818 | return 0; |
1819 | } | |
1820 | ||
83d7f66a | 1821 | static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) |
8d40002f | 1822 | { |
83d7f66a LG |
1823 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; |
1824 | ||
1825 | memset(ctx, 0, sizeof(*ctx)); | |
8d40002f | 1826 | |
83d7f66a | 1827 | ctx->c2p_train_data_offset = |
2c6e83a1 | 1828 | ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M); |
83d7f66a LG |
1829 | ctx->p2c_train_data_offset = |
1830 | (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); | |
1831 | ctx->train_data_size = | |
1832 | GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; | |
1833 | ||
1834 | DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", | |
1835 | ctx->train_data_size, | |
1836 | ctx->p2c_train_data_offset, | |
1837 | ctx->c2p_train_data_offset); | |
8d40002f TY |
1838 | } |
1839 | ||
83d7f66a LG |
1840 | /* |
1841 | * reserve TMR memory at the top of VRAM which holds | |
1842 | * IP Discovery data and is protected by PSP. | |
778e8c42 | 1843 | */ |
83d7f66a | 1844 | static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) |
778e8c42 TY |
1845 | { |
1846 | int ret; | |
1847 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; | |
83d7f66a | 1848 | bool mem_train_support = false; |
778e8c42 | 1849 | |
83d7f66a | 1850 | if (!amdgpu_sriov_vf(adev)) { |
72d208c2 | 1851 | ret = amdgpu_mem_train_support(adev); |
2c6e83a1 | 1852 | if (ret == 1) |
83d7f66a | 1853 | mem_train_support = true; |
2c6e83a1 | 1854 | else if (ret == -1) |
72d208c2 LG |
1855 | return -EINVAL; |
1856 | else | |
83d7f66a | 1857 | DRM_DEBUG("memory training does not support!\n"); |
778e8c42 TY |
1858 | } |
1859 | ||
83d7f66a LG |
1860 | /* |
1861 | * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all | |
1862 | * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) | |
1863 | * | |
1864 | * Otherwise, fallback to legacy approach to check and reserve tmr block for ip | |
1865 | * discovery data and G6 memory training data respectively | |
1866 | */ | |
1867 | adev->discovery_tmr_size = | |
1868 | amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); | |
2c6e83a1 | 1869 | if (!adev->discovery_tmr_size) |
f56071d4 | 1870 | adev->discovery_tmr_size = DISCOVERY_TMR_OFFSET; |
2c6e83a1 LG |
1871 | |
1872 | if (mem_train_support) { | |
1873 | /* reserve vram for mem train according to TMR location */ | |
1874 | amdgpu_ttm_training_data_block_init(adev); | |
1875 | ret = amdgpu_bo_create_kernel_at(adev, | |
778e8c42 TY |
1876 | ctx->c2p_train_data_offset, |
1877 | ctx->train_data_size, | |
1878 | AMDGPU_GEM_DOMAIN_VRAM, | |
1879 | &ctx->c2p_bo, | |
1880 | NULL); | |
2c6e83a1 LG |
1881 | if (ret) { |
1882 | DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); | |
1883 | amdgpu_ttm_training_reserve_vram_fini(adev); | |
1884 | return ret; | |
83d7f66a | 1885 | } |
2c6e83a1 | 1886 | ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; |
83d7f66a | 1887 | } |
778e8c42 | 1888 | |
778e8c42 | 1889 | ret = amdgpu_bo_create_kernel_at(adev, |
83d7f66a LG |
1890 | adev->gmc.real_vram_size - adev->discovery_tmr_size, |
1891 | adev->discovery_tmr_size, | |
1892 | AMDGPU_GEM_DOMAIN_VRAM, | |
1893 | &adev->discovery_memory, | |
1894 | NULL); | |
778e8c42 | 1895 | if (ret) { |
83d7f66a LG |
1896 | DRM_ERROR("alloc tmr failed(%d)!\n", ret); |
1897 | amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); | |
33a9a5ab | 1898 | return ret; |
778e8c42 TY |
1899 | } |
1900 | ||
778e8c42 | 1901 | return 0; |
778e8c42 TY |
1902 | } |
1903 | ||
50da5174 | 1904 | /** |
2e603d04 HR |
1905 | * amdgpu_ttm_init - Init the memory management (ttm) as well as various |
1906 | * gtt/vram related fields. | |
50da5174 TSD |
1907 | * |
1908 | * This initializes all of the memory space pools that the TTM layer | |
1909 | * will need such as the GTT space (system memory mapped to the device), | |
1910 | * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which | |
1911 | * can be mapped per VMID. | |
1912 | */ | |
d38ceaf9 AD |
1913 | int amdgpu_ttm_init(struct amdgpu_device *adev) |
1914 | { | |
36d38372 | 1915 | uint64_t gtt_size; |
d38ceaf9 | 1916 | int r; |
218b5dcd | 1917 | u64 vis_vram_limit; |
994dcfaa | 1918 | void *stolen_vga_buf; |
d38ceaf9 | 1919 | |
a64f784b CK |
1920 | mutex_init(&adev->mman.gtt_window_lock); |
1921 | ||
d38ceaf9 AD |
1922 | /* No others user of address space so set it to 0 */ |
1923 | r = ttm_bo_device_init(&adev->mman.bdev, | |
d38ceaf9 AD |
1924 | &amdgpu_bo_driver, |
1925 | adev->ddev->anon_inode->i_mapping, | |
e7bf74d0 | 1926 | adev->ddev->vma_offset_manager, |
90489ce1 | 1927 | dma_addressing_limited(adev->dev)); |
d38ceaf9 AD |
1928 | if (r) { |
1929 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
1930 | return r; | |
1931 | } | |
1932 | adev->mman.initialized = true; | |
7cce9584 AG |
1933 | |
1934 | /* We opt to avoid OOM on system pages allocations */ | |
1935 | adev->mman.bdev.no_retry = true; | |
1936 | ||
50da5174 | 1937 | /* Initialize VRAM pool with all of VRAM divided into pages */ |
d38ceaf9 | 1938 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, |
770d13b1 | 1939 | adev->gmc.real_vram_size >> PAGE_SHIFT); |
d38ceaf9 AD |
1940 | if (r) { |
1941 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
1942 | return r; | |
1943 | } | |
218b5dcd JB |
1944 | |
1945 | /* Reduce size of CPU-visible VRAM if requested */ | |
1946 | vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; | |
1947 | if (amdgpu_vis_vram_limit > 0 && | |
770d13b1 CK |
1948 | vis_vram_limit <= adev->gmc.visible_vram_size) |
1949 | adev->gmc.visible_vram_size = vis_vram_limit; | |
218b5dcd | 1950 | |
d38ceaf9 | 1951 | /* Change the size here instead of the init above so only lpfn is affected */ |
57adc4ce | 1952 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
f8f4b9a6 AL |
1953 | #ifdef CONFIG_64BIT |
1954 | adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, | |
1955 | adev->gmc.visible_vram_size); | |
1956 | #endif | |
d38ceaf9 | 1957 | |
a05502e5 HC |
1958 | /* |
1959 | *The reserved vram for firmware must be pinned to the specified | |
1960 | *place on the VRAM, so reserve it early. | |
1961 | */ | |
f5ec697e | 1962 | r = amdgpu_ttm_fw_reserve_vram_init(adev); |
a05502e5 HC |
1963 | if (r) { |
1964 | return r; | |
1965 | } | |
1966 | ||
778e8c42 | 1967 | /* |
83d7f66a LG |
1968 | * only NAVI10 and onwards ASIC support for IP discovery. |
1969 | * If IP discovery enabled, a block of memory should be | |
1970 | * reserved for IP discovey. | |
778e8c42 | 1971 | */ |
6a8987a8 | 1972 | if (adev->discovery_bin) { |
83d7f66a | 1973 | r = amdgpu_ttm_reserve_tmr(adev); |
e862b08b ML |
1974 | if (r) |
1975 | return r; | |
1976 | } | |
778e8c42 | 1977 | |
50da5174 TSD |
1978 | /* allocate memory as required for VGA |
1979 | * This is used for VGA emulation and pre-OS scanout buffers to | |
1980 | * avoid display artifacts while transitioning between pre-OS | |
1981 | * and driver. */ | |
fcbc92e2 | 1982 | r = amdgpu_bo_create_kernel_at(adev, 0, adev->gmc.stolen_vga_size, |
adb5be81 | 1983 | AMDGPU_GEM_DOMAIN_VRAM, |
fcbc92e2 | 1984 | &adev->gmc.stolen_vga_memory, |
adb5be81 | 1985 | &stolen_vga_buf); |
52975728 CK |
1986 | if (r) |
1987 | return r; | |
5f6a556f | 1988 | |
d38ceaf9 | 1989 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", |
770d13b1 | 1990 | (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); |
36d38372 | 1991 | |
50da5174 TSD |
1992 | /* Compute GTT size, either bsaed on 3/4th the size of RAM size |
1993 | * or whatever the user passed on module init */ | |
424e2c85 RH |
1994 | if (amdgpu_gtt_size == -1) { |
1995 | struct sysinfo si; | |
1996 | ||
1997 | si_meminfo(&si); | |
24562523 | 1998 | gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), |
770d13b1 | 1999 | adev->gmc.mc_vram_size), |
24562523 AG |
2000 | ((uint64_t)si.totalram * si.mem_unit * 3/4)); |
2001 | } | |
2002 | else | |
36d38372 | 2003 | gtt_size = (uint64_t)amdgpu_gtt_size << 20; |
50da5174 TSD |
2004 | |
2005 | /* Initialize GTT memory pool */ | |
36d38372 | 2006 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); |
d38ceaf9 AD |
2007 | if (r) { |
2008 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
2009 | return r; | |
2010 | } | |
2011 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", | |
36d38372 | 2012 | (unsigned)(gtt_size / (1024 * 1024))); |
d38ceaf9 | 2013 | |
50da5174 | 2014 | /* Initialize various on-chip memory pools */ |
c832c346 | 2015 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, |
dca29491 | 2016 | adev->gds.gds_size); |
c832c346 CK |
2017 | if (r) { |
2018 | DRM_ERROR("Failed initializing GDS heap.\n"); | |
2019 | return r; | |
d38ceaf9 AD |
2020 | } |
2021 | ||
c832c346 | 2022 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, |
dca29491 | 2023 | adev->gds.gws_size); |
c832c346 CK |
2024 | if (r) { |
2025 | DRM_ERROR("Failed initializing gws heap.\n"); | |
2026 | return r; | |
d38ceaf9 AD |
2027 | } |
2028 | ||
c832c346 | 2029 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, |
dca29491 | 2030 | adev->gds.oa_size); |
c832c346 CK |
2031 | if (r) { |
2032 | DRM_ERROR("Failed initializing oa heap.\n"); | |
2033 | return r; | |
d38ceaf9 AD |
2034 | } |
2035 | ||
d38ceaf9 AD |
2036 | return 0; |
2037 | } | |
2038 | ||
50da5174 | 2039 | /** |
2e603d04 | 2040 | * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm |
50da5174 | 2041 | */ |
6f752ec2 AG |
2042 | void amdgpu_ttm_late_init(struct amdgpu_device *adev) |
2043 | { | |
994dcfaa | 2044 | void *stolen_vga_buf; |
5db62dc8 | 2045 | |
50da5174 | 2046 | /* return the VGA stolen memory (if any) back to VRAM */ |
5db62dc8 AD |
2047 | if (!adev->gmc.keep_stolen_vga_memory) |
2048 | amdgpu_bo_free_kernel(&adev->gmc.stolen_vga_memory, NULL, &stolen_vga_buf); | |
6f752ec2 AG |
2049 | } |
2050 | ||
50da5174 TSD |
2051 | /** |
2052 | * amdgpu_ttm_fini - De-initialize the TTM memory pools | |
2053 | */ | |
d38ceaf9 AD |
2054 | void amdgpu_ttm_fini(struct amdgpu_device *adev) |
2055 | { | |
5db62dc8 AD |
2056 | void *stolen_vga_buf; |
2057 | ||
d38ceaf9 AD |
2058 | if (!adev->mman.initialized) |
2059 | return; | |
11c6b82a | 2060 | |
778e8c42 | 2061 | amdgpu_ttm_training_reserve_vram_fini(adev); |
5db62dc8 AD |
2062 | /* return the stolen vga memory back to VRAM */ |
2063 | if (adev->gmc.keep_stolen_vga_memory) | |
2064 | amdgpu_bo_free_kernel(&adev->gmc.stolen_vga_memory, NULL, &stolen_vga_buf); | |
224f82e5 ED |
2065 | /* return the IP Discovery TMR memory back to VRAM */ |
2066 | amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); | |
f5ec697e | 2067 | amdgpu_ttm_fw_reserve_vram_fini(adev); |
224f82e5 | 2068 | |
f8f4b9a6 AL |
2069 | if (adev->mman.aper_base_kaddr) |
2070 | iounmap(adev->mman.aper_base_kaddr); | |
2071 | adev->mman.aper_base_kaddr = NULL; | |
11c6b82a | 2072 | |
d38ceaf9 AD |
2073 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); |
2074 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); | |
c832c346 CK |
2075 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); |
2076 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); | |
2077 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); | |
d38ceaf9 | 2078 | ttm_bo_device_release(&adev->mman.bdev); |
d38ceaf9 AD |
2079 | adev->mman.initialized = false; |
2080 | DRM_INFO("amdgpu: ttm finalized\n"); | |
2081 | } | |
2082 | ||
57adc4ce CK |
2083 | /** |
2084 | * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions | |
2085 | * | |
2086 | * @adev: amdgpu_device pointer | |
2087 | * @enable: true when we can use buffer functions. | |
2088 | * | |
2089 | * Enable/disable use of buffer functions during suspend/resume. This should | |
2090 | * only be called at bootup or when userspace isn't running. | |
2091 | */ | |
2092 | void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) | |
d38ceaf9 | 2093 | { |
57adc4ce CK |
2094 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM]; |
2095 | uint64_t size; | |
b7d85e1d | 2096 | int r; |
d38ceaf9 | 2097 | |
df9c8d1a | 2098 | if (!adev->mman.initialized || amdgpu_in_reset(adev) || |
b7d85e1d | 2099 | adev->mman.buffer_funcs_enabled == enable) |
d38ceaf9 AD |
2100 | return; |
2101 | ||
b7d85e1d CK |
2102 | if (enable) { |
2103 | struct amdgpu_ring *ring; | |
b3ac1766 | 2104 | struct drm_gpu_scheduler *sched; |
b7d85e1d CK |
2105 | |
2106 | ring = adev->mman.buffer_funcs_ring; | |
b3ac1766 ND |
2107 | sched = &ring->sched; |
2108 | r = drm_sched_entity_init(&adev->mman.entity, | |
2109 | DRM_SCHED_PRIORITY_KERNEL, &sched, | |
2110 | 1, NULL); | |
b7d85e1d CK |
2111 | if (r) { |
2112 | DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", | |
2113 | r); | |
2114 | return; | |
2115 | } | |
2116 | } else { | |
cdc50176 | 2117 | drm_sched_entity_destroy(&adev->mman.entity); |
7766484b AG |
2118 | dma_fence_put(man->move); |
2119 | man->move = NULL; | |
b7d85e1d CK |
2120 | } |
2121 | ||
d38ceaf9 | 2122 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ |
57adc4ce CK |
2123 | if (enable) |
2124 | size = adev->gmc.real_vram_size; | |
2125 | else | |
2126 | size = adev->gmc.visible_vram_size; | |
d38ceaf9 | 2127 | man->size = size >> PAGE_SHIFT; |
81988f9c | 2128 | adev->mman.buffer_funcs_enabled = enable; |
d38ceaf9 AD |
2129 | } |
2130 | ||
d38ceaf9 AD |
2131 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) |
2132 | { | |
bed2dd84 TZ |
2133 | struct drm_file *file_priv = filp->private_data; |
2134 | struct amdgpu_device *adev = file_priv->minor->dev->dev_private; | |
d38ceaf9 | 2135 | |
e176fe17 | 2136 | if (adev == NULL) |
d38ceaf9 | 2137 | return -EINVAL; |
e176fe17 CK |
2138 | |
2139 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); | |
d38ceaf9 AD |
2140 | } |
2141 | ||
fc9c8f54 CK |
2142 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
2143 | uint64_t dst_offset, uint32_t byte_count, | |
52791eee | 2144 | struct dma_resv *resv, |
fc9c8f54 | 2145 | struct dma_fence **fence, bool direct_submit, |
c9dc9cfe | 2146 | bool vm_needs_flush, bool tmz) |
d38ceaf9 | 2147 | { |
9ecefb19 CK |
2148 | enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : |
2149 | AMDGPU_IB_POOL_DELAYED; | |
d38ceaf9 | 2150 | struct amdgpu_device *adev = ring->adev; |
d71518b5 CK |
2151 | struct amdgpu_job *job; |
2152 | ||
d38ceaf9 AD |
2153 | uint32_t max_bytes; |
2154 | unsigned num_loops, num_dw; | |
2155 | unsigned i; | |
2156 | int r; | |
2157 | ||
c66ed765 | 2158 | if (direct_submit && !ring->sched.ready) { |
81988f9c CK |
2159 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
2160 | return -EINVAL; | |
2161 | } | |
2162 | ||
d38ceaf9 AD |
2163 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
2164 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); | |
4e930d96 | 2165 | num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); |
c7ae72c0 | 2166 | |
9ecefb19 | 2167 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job); |
d71518b5 | 2168 | if (r) |
9066b0c3 | 2169 | return r; |
c7ae72c0 | 2170 | |
cbd52851 | 2171 | if (vm_needs_flush) { |
11c3a249 | 2172 | job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); |
cbd52851 CK |
2173 | job->vm_needs_flush = true; |
2174 | } | |
c7ae72c0 | 2175 | if (resv) { |
e86f9cee | 2176 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
5d319660 CK |
2177 | AMDGPU_SYNC_ALWAYS, |
2178 | AMDGPU_FENCE_OWNER_UNDEFINED); | |
c7ae72c0 CZ |
2179 | if (r) { |
2180 | DRM_ERROR("sync failed (%d).\n", r); | |
2181 | goto error_free; | |
2182 | } | |
d38ceaf9 | 2183 | } |
d38ceaf9 AD |
2184 | |
2185 | for (i = 0; i < num_loops; i++) { | |
2186 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | |
2187 | ||
d71518b5 | 2188 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, |
c9dc9cfe | 2189 | dst_offset, cur_size_in_bytes, tmz); |
d38ceaf9 AD |
2190 | |
2191 | src_offset += cur_size_in_bytes; | |
2192 | dst_offset += cur_size_in_bytes; | |
2193 | byte_count -= cur_size_in_bytes; | |
2194 | } | |
2195 | ||
d71518b5 CK |
2196 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
2197 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
ee913fd9 CK |
2198 | if (direct_submit) |
2199 | r = amdgpu_job_submit_direct(job, ring, fence); | |
2200 | else | |
0e28b10f | 2201 | r = amdgpu_job_submit(job, &adev->mman.entity, |
e24db985 | 2202 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
ee913fd9 CK |
2203 | if (r) |
2204 | goto error_free; | |
d38ceaf9 | 2205 | |
e24db985 | 2206 | return r; |
d71518b5 | 2207 | |
c7ae72c0 | 2208 | error_free: |
d71518b5 | 2209 | amdgpu_job_free(job); |
ee913fd9 | 2210 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
c7ae72c0 | 2211 | return r; |
d38ceaf9 AD |
2212 | } |
2213 | ||
59b4a977 | 2214 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
44e1baeb | 2215 | uint32_t src_data, |
52791eee | 2216 | struct dma_resv *resv, |
f29224a6 | 2217 | struct dma_fence **fence) |
59b4a977 | 2218 | { |
a7d64de6 | 2219 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
44e1baeb | 2220 | uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; |
59b4a977 FC |
2221 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
2222 | ||
f29224a6 CK |
2223 | struct drm_mm_node *mm_node; |
2224 | unsigned long num_pages; | |
59b4a977 | 2225 | unsigned int num_loops, num_dw; |
f29224a6 CK |
2226 | |
2227 | struct amdgpu_job *job; | |
59b4a977 FC |
2228 | int r; |
2229 | ||
81988f9c | 2230 | if (!adev->mman.buffer_funcs_enabled) { |
f29224a6 CK |
2231 | DRM_ERROR("Trying to clear memory with ring turned off.\n"); |
2232 | return -EINVAL; | |
2233 | } | |
2234 | ||
92c60d9c | 2235 | if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
c5835bbb | 2236 | r = amdgpu_ttm_alloc_gart(&bo->tbo); |
92c60d9c CK |
2237 | if (r) |
2238 | return r; | |
2239 | } | |
2240 | ||
f29224a6 CK |
2241 | num_pages = bo->tbo.num_pages; |
2242 | mm_node = bo->tbo.mem.mm_node; | |
2243 | num_loops = 0; | |
2244 | while (num_pages) { | |
7e4dec58 | 2245 | uint64_t byte_count = mm_node->size << PAGE_SHIFT; |
f29224a6 | 2246 | |
7e4dec58 | 2247 | num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes); |
f29224a6 CK |
2248 | num_pages -= mm_node->size; |
2249 | ++mm_node; | |
2250 | } | |
44e1baeb | 2251 | num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; |
59b4a977 FC |
2252 | |
2253 | /* for IB padding */ | |
f29224a6 | 2254 | num_dw += 64; |
59b4a977 | 2255 | |
9ecefb19 CK |
2256 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, |
2257 | &job); | |
59b4a977 FC |
2258 | if (r) |
2259 | return r; | |
2260 | ||
2261 | if (resv) { | |
2262 | r = amdgpu_sync_resv(adev, &job->sync, resv, | |
5d319660 CK |
2263 | AMDGPU_SYNC_ALWAYS, |
2264 | AMDGPU_FENCE_OWNER_UNDEFINED); | |
59b4a977 FC |
2265 | if (r) { |
2266 | DRM_ERROR("sync failed (%d).\n", r); | |
2267 | goto error_free; | |
2268 | } | |
2269 | } | |
2270 | ||
f29224a6 CK |
2271 | num_pages = bo->tbo.num_pages; |
2272 | mm_node = bo->tbo.mem.mm_node; | |
59b4a977 | 2273 | |
f29224a6 | 2274 | while (num_pages) { |
7e4dec58 | 2275 | uint64_t byte_count = mm_node->size << PAGE_SHIFT; |
f29224a6 | 2276 | uint64_t dst_addr; |
59b4a977 | 2277 | |
92c60d9c | 2278 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); |
f29224a6 | 2279 | while (byte_count) { |
7e4dec58 FK |
2280 | uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count, |
2281 | max_bytes); | |
f29224a6 | 2282 | |
44e1baeb CK |
2283 | amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, |
2284 | dst_addr, cur_size_in_bytes); | |
f29224a6 CK |
2285 | |
2286 | dst_addr += cur_size_in_bytes; | |
2287 | byte_count -= cur_size_in_bytes; | |
2288 | } | |
2289 | ||
2290 | num_pages -= mm_node->size; | |
2291 | ++mm_node; | |
59b4a977 FC |
2292 | } |
2293 | ||
2294 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | |
2295 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
0e28b10f | 2296 | r = amdgpu_job_submit(job, &adev->mman.entity, |
f29224a6 | 2297 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); |
59b4a977 FC |
2298 | if (r) |
2299 | goto error_free; | |
2300 | ||
2301 | return 0; | |
2302 | ||
2303 | error_free: | |
2304 | amdgpu_job_free(job); | |
2305 | return r; | |
2306 | } | |
2307 | ||
d38ceaf9 AD |
2308 | #if defined(CONFIG_DEBUG_FS) |
2309 | ||
2310 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | |
2311 | { | |
2312 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
0ee86853 | 2313 | unsigned ttm_pl = (uintptr_t)node->info_ent->data; |
d38ceaf9 AD |
2314 | struct drm_device *dev = node->minor->dev; |
2315 | struct amdgpu_device *adev = dev->dev_private; | |
12d4ac58 | 2316 | struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; |
b5c3714f | 2317 | struct drm_printer p = drm_seq_file_printer(m); |
d38ceaf9 | 2318 | |
12d4ac58 | 2319 | man->func->debug(man, &p); |
b5c3714f | 2320 | return 0; |
d38ceaf9 AD |
2321 | } |
2322 | ||
06ab6832 | 2323 | static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { |
0ee86853 CK |
2324 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, |
2325 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, | |
2326 | {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, | |
2327 | {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, | |
2328 | {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, | |
d38ceaf9 AD |
2329 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, |
2330 | #ifdef CONFIG_SWIOTLB | |
2331 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | |
2332 | #endif | |
2333 | }; | |
2334 | ||
50da5174 TSD |
2335 | /** |
2336 | * amdgpu_ttm_vram_read - Linear read access to VRAM | |
2337 | * | |
2338 | * Accesses VRAM via MMIO for debugging purposes. | |
2339 | */ | |
d38ceaf9 AD |
2340 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, |
2341 | size_t size, loff_t *pos) | |
2342 | { | |
45063097 | 2343 | struct amdgpu_device *adev = file_inode(f)->i_private; |
d38ceaf9 | 2344 | ssize_t result = 0; |
d38ceaf9 AD |
2345 | |
2346 | if (size & 0x3 || *pos & 0x3) | |
2347 | return -EINVAL; | |
2348 | ||
770d13b1 | 2349 | if (*pos >= adev->gmc.mc_vram_size) |
9156e723 TSD |
2350 | return -ENXIO; |
2351 | ||
030d5b97 | 2352 | size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); |
d38ceaf9 | 2353 | while (size) { |
030d5b97 CK |
2354 | size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); |
2355 | uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; | |
d38ceaf9 | 2356 | |
030d5b97 | 2357 | amdgpu_device_vram_access(adev, *pos, value, bytes, false); |
434cbcb1 DC |
2358 | if (copy_to_user(buf, value, bytes)) |
2359 | return -EFAULT; | |
d38ceaf9 | 2360 | |
030d5b97 CK |
2361 | result += bytes; |
2362 | buf += bytes; | |
2363 | *pos += bytes; | |
2364 | size -= bytes; | |
d38ceaf9 AD |
2365 | } |
2366 | ||
2367 | return result; | |
2368 | } | |
2369 | ||
50da5174 TSD |
2370 | /** |
2371 | * amdgpu_ttm_vram_write - Linear write access to VRAM | |
2372 | * | |
2373 | * Accesses VRAM via MMIO for debugging purposes. | |
2374 | */ | |
08cab989 TSD |
2375 | static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, |
2376 | size_t size, loff_t *pos) | |
2377 | { | |
2378 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
2379 | ssize_t result = 0; | |
2380 | int r; | |
2381 | ||
2382 | if (size & 0x3 || *pos & 0x3) | |
2383 | return -EINVAL; | |
2384 | ||
770d13b1 | 2385 | if (*pos >= adev->gmc.mc_vram_size) |
08cab989 TSD |
2386 | return -ENXIO; |
2387 | ||
2388 | while (size) { | |
2389 | unsigned long flags; | |
2390 | uint32_t value; | |
2391 | ||
770d13b1 | 2392 | if (*pos >= adev->gmc.mc_vram_size) |
08cab989 TSD |
2393 | return result; |
2394 | ||
2395 | r = get_user(value, (uint32_t *)buf); | |
2396 | if (r) | |
2397 | return r; | |
2398 | ||
2399 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
c3057281 TSD |
2400 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); |
2401 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); | |
2402 | WREG32_NO_KIQ(mmMM_DATA, value); | |
08cab989 TSD |
2403 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); |
2404 | ||
2405 | result += 4; | |
2406 | buf += 4; | |
2407 | *pos += 4; | |
2408 | size -= 4; | |
2409 | } | |
2410 | ||
2411 | return result; | |
2412 | } | |
2413 | ||
d38ceaf9 AD |
2414 | static const struct file_operations amdgpu_ttm_vram_fops = { |
2415 | .owner = THIS_MODULE, | |
2416 | .read = amdgpu_ttm_vram_read, | |
08cab989 TSD |
2417 | .write = amdgpu_ttm_vram_write, |
2418 | .llseek = default_llseek, | |
d38ceaf9 AD |
2419 | }; |
2420 | ||
a1d29476 CK |
2421 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
2422 | ||
50da5174 TSD |
2423 | /** |
2424 | * amdgpu_ttm_gtt_read - Linear read access to GTT memory | |
2425 | */ | |
d38ceaf9 AD |
2426 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, |
2427 | size_t size, loff_t *pos) | |
2428 | { | |
45063097 | 2429 | struct amdgpu_device *adev = file_inode(f)->i_private; |
d38ceaf9 AD |
2430 | ssize_t result = 0; |
2431 | int r; | |
2432 | ||
2433 | while (size) { | |
2434 | loff_t p = *pos / PAGE_SIZE; | |
2435 | unsigned off = *pos & ~PAGE_MASK; | |
2436 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); | |
2437 | struct page *page; | |
2438 | void *ptr; | |
2439 | ||
2440 | if (p >= adev->gart.num_cpu_pages) | |
2441 | return result; | |
2442 | ||
2443 | page = adev->gart.pages[p]; | |
2444 | if (page) { | |
2445 | ptr = kmap(page); | |
2446 | ptr += off; | |
2447 | ||
2448 | r = copy_to_user(buf, ptr, cur_size); | |
2449 | kunmap(adev->gart.pages[p]); | |
2450 | } else | |
2451 | r = clear_user(buf, cur_size); | |
2452 | ||
2453 | if (r) | |
2454 | return -EFAULT; | |
2455 | ||
2456 | result += cur_size; | |
2457 | buf += cur_size; | |
2458 | *pos += cur_size; | |
2459 | size -= cur_size; | |
2460 | } | |
2461 | ||
2462 | return result; | |
2463 | } | |
2464 | ||
2465 | static const struct file_operations amdgpu_ttm_gtt_fops = { | |
2466 | .owner = THIS_MODULE, | |
2467 | .read = amdgpu_ttm_gtt_read, | |
2468 | .llseek = default_llseek | |
2469 | }; | |
2470 | ||
2471 | #endif | |
2472 | ||
50da5174 TSD |
2473 | /** |
2474 | * amdgpu_iomem_read - Virtual read access to GPU mapped memory | |
2475 | * | |
2476 | * This function is used to read memory that has been mapped to the | |
2477 | * GPU and the known addresses are not physical addresses but instead | |
2478 | * bus addresses (e.g., what you'd put in an IB or ring buffer). | |
2479 | */ | |
ebb043f2 TSD |
2480 | static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, |
2481 | size_t size, loff_t *pos) | |
38290b2c TSD |
2482 | { |
2483 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
38290b2c | 2484 | struct iommu_domain *dom; |
ebb043f2 TSD |
2485 | ssize_t result = 0; |
2486 | int r; | |
38290b2c | 2487 | |
50da5174 | 2488 | /* retrieve the IOMMU domain if any for this device */ |
ebb043f2 | 2489 | dom = iommu_get_domain_for_dev(adev->dev); |
38290b2c | 2490 | |
ebb043f2 TSD |
2491 | while (size) { |
2492 | phys_addr_t addr = *pos & PAGE_MASK; | |
2493 | loff_t off = *pos & ~PAGE_MASK; | |
2494 | size_t bytes = PAGE_SIZE - off; | |
2495 | unsigned long pfn; | |
2496 | struct page *p; | |
2497 | void *ptr; | |
2498 | ||
2499 | bytes = bytes < size ? bytes : size; | |
2500 | ||
50da5174 TSD |
2501 | /* Translate the bus address to a physical address. If |
2502 | * the domain is NULL it means there is no IOMMU active | |
2503 | * and the address translation is the identity | |
2504 | */ | |
ebb043f2 TSD |
2505 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
2506 | ||
2507 | pfn = addr >> PAGE_SHIFT; | |
2508 | if (!pfn_valid(pfn)) | |
2509 | return -EPERM; | |
2510 | ||
2511 | p = pfn_to_page(pfn); | |
2512 | if (p->mapping != adev->mman.bdev.dev_mapping) | |
2513 | return -EPERM; | |
2514 | ||
2515 | ptr = kmap(p); | |
864917a3 | 2516 | r = copy_to_user(buf, ptr + off, bytes); |
ebb043f2 TSD |
2517 | kunmap(p); |
2518 | if (r) | |
2519 | return -EFAULT; | |
2520 | ||
2521 | size -= bytes; | |
2522 | *pos += bytes; | |
2523 | result += bytes; | |
2524 | } | |
2525 | ||
2526 | return result; | |
2527 | } | |
2528 | ||
50da5174 TSD |
2529 | /** |
2530 | * amdgpu_iomem_write - Virtual write access to GPU mapped memory | |
2531 | * | |
2532 | * This function is used to write memory that has been mapped to the | |
2533 | * GPU and the known addresses are not physical addresses but instead | |
2534 | * bus addresses (e.g., what you'd put in an IB or ring buffer). | |
2535 | */ | |
ebb043f2 TSD |
2536 | static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, |
2537 | size_t size, loff_t *pos) | |
2538 | { | |
2539 | struct amdgpu_device *adev = file_inode(f)->i_private; | |
2540 | struct iommu_domain *dom; | |
2541 | ssize_t result = 0; | |
2542 | int r; | |
38290b2c TSD |
2543 | |
2544 | dom = iommu_get_domain_for_dev(adev->dev); | |
a40cfa0b | 2545 | |
ebb043f2 TSD |
2546 | while (size) { |
2547 | phys_addr_t addr = *pos & PAGE_MASK; | |
2548 | loff_t off = *pos & ~PAGE_MASK; | |
2549 | size_t bytes = PAGE_SIZE - off; | |
2550 | unsigned long pfn; | |
2551 | struct page *p; | |
2552 | void *ptr; | |
2553 | ||
2554 | bytes = bytes < size ? bytes : size; | |
38290b2c | 2555 | |
ebb043f2 TSD |
2556 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
2557 | ||
2558 | pfn = addr >> PAGE_SHIFT; | |
2559 | if (!pfn_valid(pfn)) | |
2560 | return -EPERM; | |
2561 | ||
2562 | p = pfn_to_page(pfn); | |
2563 | if (p->mapping != adev->mman.bdev.dev_mapping) | |
2564 | return -EPERM; | |
2565 | ||
2566 | ptr = kmap(p); | |
864917a3 | 2567 | r = copy_from_user(ptr + off, buf, bytes); |
ebb043f2 TSD |
2568 | kunmap(p); |
2569 | if (r) | |
2570 | return -EFAULT; | |
2571 | ||
2572 | size -= bytes; | |
2573 | *pos += bytes; | |
2574 | result += bytes; | |
2575 | } | |
2576 | ||
2577 | return result; | |
38290b2c TSD |
2578 | } |
2579 | ||
ebb043f2 | 2580 | static const struct file_operations amdgpu_ttm_iomem_fops = { |
38290b2c | 2581 | .owner = THIS_MODULE, |
ebb043f2 TSD |
2582 | .read = amdgpu_iomem_read, |
2583 | .write = amdgpu_iomem_write, | |
38290b2c TSD |
2584 | .llseek = default_llseek |
2585 | }; | |
a40cfa0b TSD |
2586 | |
2587 | static const struct { | |
2588 | char *name; | |
2589 | const struct file_operations *fops; | |
2590 | int domain; | |
2591 | } ttm_debugfs_entries[] = { | |
2592 | { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM }, | |
2593 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS | |
2594 | { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT }, | |
2595 | #endif | |
ebb043f2 | 2596 | { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM }, |
a40cfa0b TSD |
2597 | }; |
2598 | ||
a1d29476 CK |
2599 | #endif |
2600 | ||
c5820361 | 2601 | int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) |
d38ceaf9 AD |
2602 | { |
2603 | #if defined(CONFIG_DEBUG_FS) | |
2604 | unsigned count; | |
2605 | ||
2606 | struct drm_minor *minor = adev->ddev->primary; | |
2607 | struct dentry *ent, *root = minor->debugfs_root; | |
2608 | ||
a40cfa0b TSD |
2609 | for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { |
2610 | ent = debugfs_create_file( | |
2611 | ttm_debugfs_entries[count].name, | |
2612 | S_IFREG | S_IRUGO, root, | |
2613 | adev, | |
2614 | ttm_debugfs_entries[count].fops); | |
2615 | if (IS_ERR(ent)) | |
2616 | return PTR_ERR(ent); | |
2617 | if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) | |
770d13b1 | 2618 | i_size_write(ent->d_inode, adev->gmc.mc_vram_size); |
a40cfa0b | 2619 | else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) |
770d13b1 | 2620 | i_size_write(ent->d_inode, adev->gmc.gart_size); |
a40cfa0b TSD |
2621 | adev->mman.debugfs_entries[count] = ent; |
2622 | } | |
d38ceaf9 AD |
2623 | |
2624 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); | |
2625 | ||
2626 | #ifdef CONFIG_SWIOTLB | |
fd5fd480 | 2627 | if (!(adev->need_swiotlb && swiotlb_nr_tbl())) |
d38ceaf9 AD |
2628 | --count; |
2629 | #endif | |
2630 | ||
2631 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); | |
2632 | #else | |
d38ceaf9 AD |
2633 | return 0; |
2634 | #endif | |
2635 | } |