drm/amdgpu: Get DRM dev from adev by inline-f
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
c366be54 32
fdf2f6c5 33#include <linux/dma-mapping.h>
c366be54 34#include <linux/iommu.h>
2454fcea 35#include <linux/hmm.h>
c366be54
SR
36#include <linux/pagemap.h>
37#include <linux/sched/task.h>
a9ae8731 38#include <linux/sched/mm.h>
c366be54
SR
39#include <linux/seq_file.h>
40#include <linux/slab.h>
41#include <linux/swap.h>
42#include <linux/swiotlb.h>
a3941471 43#include <linux/dma-buf.h>
f81110b8 44#include <linux/sizes.h>
c366be54 45
248a1d6f
MY
46#include <drm/ttm/ttm_bo_api.h>
47#include <drm/ttm/ttm_bo_driver.h>
48#include <drm/ttm/ttm_placement.h>
49#include <drm/ttm/ttm_module.h>
50#include <drm/ttm/ttm_page_alloc.h>
fdf2f6c5
SR
51
52#include <drm/drm_debugfs.h>
d38ceaf9 53#include <drm/amdgpu_drm.h>
2454fcea 54
d38ceaf9 55#include "amdgpu.h"
b82485fd 56#include "amdgpu_object.h"
aca81718 57#include "amdgpu_trace.h"
d8d019cc 58#include "amdgpu_amdkfd.h"
bb7743bc 59#include "amdgpu_sdma.h"
1a6fc071 60#include "amdgpu_ras.h"
87ba7fea 61#include "amdgpu_atomfirmware.h"
d38ceaf9
AD
62#include "bif/bif_4_1_d.h"
63
030d5b97
CK
64#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
65
abca90f1 66
50da5174 67/**
2e603d04
HR
68 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
69 * memory request.
50da5174 70 *
2e603d04
HR
71 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
72 * @type: The type of memory requested
73 * @man: The memory type manager for each domain
50da5174
TSD
74 *
75 * This is called by ttm_bo_init_mm() when a buffer object is being
76 * initialized.
77 */
d38ceaf9
AD
78static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
79 struct ttm_mem_type_manager *man)
80{
81 struct amdgpu_device *adev;
82
a7d64de6 83 adev = amdgpu_ttm_adev(bdev);
d38ceaf9
AD
84
85 switch (type) {
86 case TTM_PL_SYSTEM:
87 /* System memory */
88 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
89 man->available_caching = TTM_PL_MASK_CACHING;
90 man->default_caching = TTM_PL_FLAG_CACHED;
91 break;
92 case TTM_PL_TT:
50da5174 93 /* GTT memory */
bb990bb0 94 man->func = &amdgpu_gtt_mgr_func;
d38ceaf9
AD
95 man->available_caching = TTM_PL_MASK_CACHING;
96 man->default_caching = TTM_PL_FLAG_CACHED;
97 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
98 break;
99 case TTM_PL_VRAM:
100 /* "On-card" video ram */
6a7f76e7 101 man->func = &amdgpu_vram_mgr_func;
d38ceaf9
AD
102 man->flags = TTM_MEMTYPE_FLAG_FIXED |
103 TTM_MEMTYPE_FLAG_MAPPABLE;
104 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
105 man->default_caching = TTM_PL_FLAG_WC;
106 break;
107 case AMDGPU_PL_GDS:
108 case AMDGPU_PL_GWS:
109 case AMDGPU_PL_OA:
110 /* On-chip GDS memory*/
111 man->func = &ttm_bo_manager_func;
d38ceaf9
AD
112 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
113 man->available_caching = TTM_PL_FLAG_UNCACHED;
114 man->default_caching = TTM_PL_FLAG_UNCACHED;
115 break;
116 default:
117 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
118 return -EINVAL;
119 }
120 return 0;
121}
122
50da5174
TSD
123/**
124 * amdgpu_evict_flags - Compute placement flags
125 *
126 * @bo: The buffer object to evict
127 * @placement: Possible destination(s) for evicted BO
128 *
129 * Fill in placement data when ttm_bo_evict() is called
130 */
d38ceaf9
AD
131static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
132 struct ttm_placement *placement)
133{
a7d64de6 134 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
765e7fbf 135 struct amdgpu_bo *abo;
1aaa5602 136 static const struct ttm_place placements = {
d38ceaf9
AD
137 .fpfn = 0,
138 .lpfn = 0,
139 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
140 };
141
50da5174 142 /* Don't handle scatter gather BOs */
82dee241
CK
143 if (bo->type == ttm_bo_type_sg) {
144 placement->num_placement = 0;
145 placement->num_busy_placement = 0;
146 return;
147 }
148
50da5174 149 /* Object isn't an AMDGPU object so ignore */
c704ab18 150 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
d38ceaf9
AD
151 placement->placement = &placements;
152 placement->busy_placement = &placements;
153 placement->num_placement = 1;
154 placement->num_busy_placement = 1;
155 return;
156 }
50da5174 157
b82485fd 158 abo = ttm_to_amdgpu_bo(bo);
d38ceaf9 159 switch (bo->mem.mem_type) {
3b2de699
CK
160 case AMDGPU_PL_GDS:
161 case AMDGPU_PL_GWS:
162 case AMDGPU_PL_OA:
163 placement->num_placement = 0;
164 placement->num_busy_placement = 0;
165 return;
166
d38ceaf9 167 case TTM_PL_VRAM:
81988f9c 168 if (!adev->mman.buffer_funcs_enabled) {
50da5174 169 /* Move to system memory */
c704ab18 170 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
c8c5e569 171 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
5422a28f
CK
172 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
173 amdgpu_bo_in_cpu_visible_vram(abo)) {
cb2dd1a6
MD
174
175 /* Try evicting to the CPU inaccessible part of VRAM
176 * first, but only set GTT as busy placement, so this
177 * BO will be evicted to GTT rather than causing other
178 * BOs to be evicted from VRAM
179 */
c704ab18 180 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
cb2dd1a6 181 AMDGPU_GEM_DOMAIN_GTT);
5422a28f 182 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
cb2dd1a6
MD
183 abo->placements[0].lpfn = 0;
184 abo->placement.busy_placement = &abo->placements[1];
185 abo->placement.num_busy_placement = 1;
08291c5c 186 } else {
50da5174 187 /* Move to GTT memory */
c704ab18 188 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
08291c5c 189 }
d38ceaf9
AD
190 break;
191 case TTM_PL_TT:
192 default:
c704ab18 193 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
3b2de699 194 break;
d38ceaf9 195 }
765e7fbf 196 *placement = abo->placement;
d38ceaf9
AD
197}
198
50da5174
TSD
199/**
200 * amdgpu_verify_access - Verify access for a mmap call
201 *
2e603d04
HR
202 * @bo: The buffer object to map
203 * @filp: The file pointer from the process performing the mmap
50da5174
TSD
204 *
205 * This is called by ttm_bo_mmap() to verify whether a process
206 * has the right to mmap a BO to their process space.
207 */
d38ceaf9
AD
208static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
209{
b82485fd 210 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
d38ceaf9 211
a46a2cd1
FK
212 /*
213 * Don't verify access for KFD BOs. They don't have a GEM
214 * object associated with them.
215 */
216 if (abo->kfd_bo)
217 return 0;
218
054892ed
JG
219 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
220 return -EPERM;
c105de28 221 return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
d9a1f0b4 222 filp->private_data);
d38ceaf9
AD
223}
224
50da5174
TSD
225/**
226 * amdgpu_move_null - Register memory for a buffer object
227 *
2e603d04
HR
228 * @bo: The bo to assign the memory to
229 * @new_mem: The memory to be assigned.
50da5174 230 *
2e603d04 231 * Assign the memory from new_mem to the memory of the buffer object bo.
50da5174 232 */
d38ceaf9
AD
233static void amdgpu_move_null(struct ttm_buffer_object *bo,
234 struct ttm_mem_reg *new_mem)
235{
236 struct ttm_mem_reg *old_mem = &bo->mem;
237
238 BUG_ON(old_mem->mm_node != NULL);
239 *old_mem = *new_mem;
240 new_mem->mm_node = NULL;
241}
242
50da5174 243/**
2e603d04
HR
244 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
245 *
246 * @bo: The bo to assign the memory to.
247 * @mm_node: Memory manager node for drm allocator.
248 * @mem: The region where the bo resides.
249 *
50da5174 250 */
92c60d9c
CK
251static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
252 struct drm_mm_node *mm_node,
253 struct ttm_mem_reg *mem)
d38ceaf9 254{
abca90f1 255 uint64_t addr = 0;
c855e250 256
0e33495d 257 if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
abca90f1 258 addr = mm_node->start << PAGE_SHIFT;
b1a8ef95
ND
259 addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
260 mem->mem_type);
abca90f1 261 }
92c60d9c 262 return addr;
8892f153
CK
263}
264
1eca5a53 265/**
2e603d04
HR
266 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
267 * @offset. It also modifies the offset to be within the drm_mm_node returned
268 *
269 * @mem: The region where the bo resides.
270 * @offset: The offset that drm_mm_node is used for finding.
271 *
e1d51505
HK
272 */
273static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
f0ee63cb 274 uint64_t *offset)
8892f153 275{
e1d51505 276 struct drm_mm_node *mm_node = mem->mm_node;
8892f153 277
e1d51505
HK
278 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
279 *offset -= (mm_node->size << PAGE_SHIFT);
280 ++mm_node;
281 }
282 return mm_node;
283}
8892f153 284
f0ee63cb
CK
285/**
286 * amdgpu_ttm_map_buffer - Map memory into the GART windows
287 * @bo: buffer object to map
288 * @mem: memory object to map
289 * @mm_node: drm_mm node object to map
290 * @num_pages: number of pages to map
291 * @offset: offset into @mm_node where to start
292 * @window: which GART window to use
293 * @ring: DMA ring to use for the copy
294 * @tmz: if we should setup a TMZ enabled mapping
295 * @addr: resulting address inside the MC address space
296 *
297 * Setup one of the GART windows to access a specific piece of memory or return
298 * the physical address for local memory.
299 */
300static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
301 struct ttm_mem_reg *mem,
302 struct drm_mm_node *mm_node,
303 unsigned num_pages, uint64_t offset,
304 unsigned window, struct amdgpu_ring *ring,
305 bool tmz, uint64_t *addr)
306{
f0ee63cb
CK
307 struct amdgpu_device *adev = ring->adev;
308 struct amdgpu_job *job;
309 unsigned num_dw, num_bytes;
f0ee63cb
CK
310 struct dma_fence *fence;
311 uint64_t src_addr, dst_addr;
95045783 312 void *cpu_addr;
f0ee63cb 313 uint64_t flags;
95045783 314 unsigned int i;
f0ee63cb
CK
315 int r;
316
317 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
318 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
319
320 /* Map only what can't be accessed directly */
95045783 321 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
f0ee63cb
CK
322 *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
323 return 0;
324 }
325
326 *addr = adev->gmc.gart_start;
327 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
328 AMDGPU_GPU_PAGE_SIZE;
329 *addr += offset & ~PAGE_MASK;
330
331 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
332 num_bytes = num_pages * 8;
333
334 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
9ecefb19 335 AMDGPU_IB_POOL_DELAYED, &job);
f0ee63cb
CK
336 if (r)
337 return r;
338
339 src_addr = num_dw * 4;
340 src_addr += job->ibs[0].gpu_addr;
341
342 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
343 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
344 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
345 dst_addr, num_bytes, false);
346
347 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
348 WARN_ON(job->ibs[0].length_dw > num_dw);
349
f0ee63cb
CK
350 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
351 if (tmz)
352 flags |= AMDGPU_PTE_TMZ;
353
95045783
CK
354 cpu_addr = &job->ibs[0].ptr[num_dw];
355
356 if (mem->mem_type == TTM_PL_TT) {
357 struct ttm_dma_tt *dma;
358 dma_addr_t *dma_address;
359
360 dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
361 dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
362 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
363 cpu_addr);
364 if (r)
365 goto error_free;
366 } else {
367 dma_addr_t dma_address;
368
369 dma_address = (mm_node->start << PAGE_SHIFT) + offset;
370 dma_address += adev->vm_manager.vram_base_offset;
371
372 for (i = 0; i < num_pages; ++i) {
373 r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
374 &dma_address, flags, cpu_addr);
375 if (r)
376 goto error_free;
377
378 dma_address += PAGE_SIZE;
379 }
380 }
f0ee63cb
CK
381
382 r = amdgpu_job_submit(job, &adev->mman.entity,
383 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
384 if (r)
385 goto error_free;
386
387 dma_fence_put(fence);
388
389 return r;
390
391error_free:
392 amdgpu_job_free(job);
393 return r;
394}
395
e1d51505
HK
396/**
397 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
effb97cc
CK
398 * @adev: amdgpu device
399 * @src: buffer/address where to read from
400 * @dst: buffer/address where to write to
401 * @size: number of bytes to copy
402 * @tmz: if a secure copy should be used
403 * @resv: resv object to sync to
404 * @f: Returns the last fence if multiple jobs are submitted.
1eca5a53
HK
405 *
406 * The function copies @size bytes from {src->mem + src->offset} to
407 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
408 * move and different for a BO to BO copy.
409 *
1eca5a53
HK
410 */
411int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
f0ee63cb
CK
412 const struct amdgpu_copy_mem *src,
413 const struct amdgpu_copy_mem *dst,
effb97cc 414 uint64_t size, bool tmz,
52791eee 415 struct dma_resv *resv,
1eca5a53 416 struct dma_fence **f)
8892f153 417{
f0ee63cb
CK
418 const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
419 AMDGPU_GPU_PAGE_SIZE);
420
421 uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
8892f153 422 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
1eca5a53 423 struct drm_mm_node *src_mm, *dst_mm;
220196b3 424 struct dma_fence *fence = NULL;
1eca5a53 425 int r = 0;
8892f153 426
81988f9c 427 if (!adev->mman.buffer_funcs_enabled) {
d38ceaf9
AD
428 DRM_ERROR("Trying to move memory with ring turned off.\n");
429 return -EINVAL;
430 }
431
f0ee63cb
CK
432 src_offset = src->offset;
433 src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
434 src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
8892f153 435
f0ee63cb
CK
436 dst_offset = dst->offset;
437 dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
438 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
8892f153 439
abca90f1 440 mutex_lock(&adev->mman.gtt_window_lock);
1eca5a53
HK
441
442 while (size) {
f0ee63cb
CK
443 uint32_t src_page_offset = src_offset & ~PAGE_MASK;
444 uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
220196b3 445 struct dma_fence *next;
f0ee63cb
CK
446 uint32_t cur_size;
447 uint64_t from, to;
8892f153 448
1eca5a53
HK
449 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
450 * begins at an offset, then adjust the size accordingly
451 */
b717fa5c
CK
452 cur_size = max(src_page_offset, dst_page_offset);
453 cur_size = min(min3(src_node_size, dst_node_size, size),
454 (uint64_t)(GTT_MAX_BYTES - cur_size));
f0ee63cb
CK
455
456 /* Map src to window 0 and dst to window 1. */
457 r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
458 PFN_UP(cur_size + src_page_offset),
459 src_offset, 0, ring, tmz, &from);
460 if (r)
461 goto error;
abca90f1 462
f0ee63cb
CK
463 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
464 PFN_UP(cur_size + dst_page_offset),
465 dst_offset, 1, ring, tmz, &to);
466 if (r)
467 goto error;
abca90f1 468
1eca5a53 469 r = amdgpu_copy_buffer(ring, from, to, cur_size,
effb97cc 470 resv, &next, false, true, tmz);
8892f153
CK
471 if (r)
472 goto error;
473
220196b3 474 dma_fence_put(fence);
8892f153
CK
475 fence = next;
476
1eca5a53
HK
477 size -= cur_size;
478 if (!size)
8892f153
CK
479 break;
480
1eca5a53
HK
481 src_node_size -= cur_size;
482 if (!src_node_size) {
f0ee63cb
CK
483 ++src_mm;
484 src_node_size = src_mm->size << PAGE_SHIFT;
485 src_offset = 0;
8892f153 486 } else {
f0ee63cb 487 src_offset += cur_size;
8892f153 488 }
f0ee63cb 489
1eca5a53
HK
490 dst_node_size -= cur_size;
491 if (!dst_node_size) {
f0ee63cb
CK
492 ++dst_mm;
493 dst_node_size = dst_mm->size << PAGE_SHIFT;
494 dst_offset = 0;
8892f153 495 } else {
f0ee63cb 496 dst_offset += cur_size;
8892f153
CK
497 }
498 }
1eca5a53 499error:
abca90f1 500 mutex_unlock(&adev->mman.gtt_window_lock);
1eca5a53
HK
501 if (f)
502 *f = dma_fence_get(fence);
503 dma_fence_put(fence);
504 return r;
505}
506
50da5174
TSD
507/**
508 * amdgpu_move_blit - Copy an entire buffer to another buffer
509 *
2e603d04
HR
510 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
511 * help move buffers to and from VRAM.
50da5174 512 */
1eca5a53
HK
513static int amdgpu_move_blit(struct ttm_buffer_object *bo,
514 bool evict, bool no_wait_gpu,
515 struct ttm_mem_reg *new_mem,
516 struct ttm_mem_reg *old_mem)
517{
518 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
effb97cc 519 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1eca5a53
HK
520 struct amdgpu_copy_mem src, dst;
521 struct dma_fence *fence = NULL;
522 int r;
523
524 src.bo = bo;
525 dst.bo = bo;
526 src.mem = old_mem;
527 dst.mem = new_mem;
528 src.offset = 0;
529 dst.offset = 0;
530
531 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
532 new_mem->num_pages << PAGE_SHIFT,
effb97cc 533 amdgpu_bo_encrypted(abo),
5a5011a7 534 bo->base.resv, &fence);
1eca5a53
HK
535 if (r)
536 goto error;
ce64bc25 537
ab2f7a5c
FK
538 /* clear the space being freed */
539 if (old_mem->mem_type == TTM_PL_VRAM &&
effb97cc 540 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
ab2f7a5c
FK
541 struct dma_fence *wipe_fence = NULL;
542
543 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
544 NULL, &wipe_fence);
545 if (r) {
546 goto error;
547 } else if (wipe_fence) {
548 dma_fence_put(fence);
549 fence = wipe_fence;
550 }
551 }
552
4947b2f2
CK
553 /* Always block for VM page tables before committing the new location */
554 if (bo->type == ttm_bo_type_kernel)
555 r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
556 else
557 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
f54d1867 558 dma_fence_put(fence);
d38ceaf9 559 return r;
8892f153
CK
560
561error:
562 if (fence)
220196b3
DA
563 dma_fence_wait(fence, false);
564 dma_fence_put(fence);
8892f153 565 return r;
d38ceaf9
AD
566}
567
50da5174
TSD
568/**
569 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
570 *
571 * Called by amdgpu_bo_move().
572 */
dfb8fa98
CK
573static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
574 struct ttm_operation_ctx *ctx,
d38ceaf9
AD
575 struct ttm_mem_reg *new_mem)
576{
d38ceaf9
AD
577 struct ttm_mem_reg *old_mem = &bo->mem;
578 struct ttm_mem_reg tmp_mem;
579 struct ttm_place placements;
580 struct ttm_placement placement;
581 int r;
582
50da5174 583 /* create space/pages for new_mem in GTT space */
d38ceaf9
AD
584 tmp_mem = *new_mem;
585 tmp_mem.mm_node = NULL;
586 placement.num_placement = 1;
587 placement.placement = &placements;
588 placement.num_busy_placement = 1;
589 placement.busy_placement = &placements;
590 placements.fpfn = 0;
5e7e8396 591 placements.lpfn = 0;
d38ceaf9 592 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
dfb8fa98 593 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
d38ceaf9 594 if (unlikely(r)) {
67adb569 595 pr_err("Failed to find GTT space for blit from VRAM\n");
d38ceaf9
AD
596 return r;
597 }
598
50da5174 599 /* set caching flags */
d38ceaf9
AD
600 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
601 if (unlikely(r)) {
602 goto out_cleanup;
603 }
604
50da5174 605 /* Bind the memory to the GTT space */
993baf15 606 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
d38ceaf9
AD
607 if (unlikely(r)) {
608 goto out_cleanup;
609 }
50da5174
TSD
610
611 /* blit VRAM to GTT */
204029e1 612 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
d38ceaf9
AD
613 if (unlikely(r)) {
614 goto out_cleanup;
615 }
50da5174
TSD
616
617 /* move BO (in tmp_mem) to new_mem */
3e98d829 618 r = ttm_bo_move_ttm(bo, ctx, new_mem);
d38ceaf9
AD
619out_cleanup:
620 ttm_bo_mem_put(bo, &tmp_mem);
621 return r;
622}
623
50da5174
TSD
624/**
625 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
626 *
627 * Called by amdgpu_bo_move().
628 */
dfb8fa98
CK
629static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
630 struct ttm_operation_ctx *ctx,
d38ceaf9
AD
631 struct ttm_mem_reg *new_mem)
632{
d38ceaf9
AD
633 struct ttm_mem_reg *old_mem = &bo->mem;
634 struct ttm_mem_reg tmp_mem;
635 struct ttm_placement placement;
636 struct ttm_place placements;
637 int r;
638
50da5174 639 /* make space in GTT for old_mem buffer */
d38ceaf9
AD
640 tmp_mem = *new_mem;
641 tmp_mem.mm_node = NULL;
642 placement.num_placement = 1;
643 placement.placement = &placements;
644 placement.num_busy_placement = 1;
645 placement.busy_placement = &placements;
646 placements.fpfn = 0;
5e7e8396 647 placements.lpfn = 0;
d38ceaf9 648 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
dfb8fa98 649 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
d38ceaf9 650 if (unlikely(r)) {
67adb569 651 pr_err("Failed to find GTT space for blit to VRAM\n");
d38ceaf9
AD
652 return r;
653 }
50da5174
TSD
654
655 /* move/bind old memory to GTT space */
3e98d829 656 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
d38ceaf9
AD
657 if (unlikely(r)) {
658 goto out_cleanup;
659 }
50da5174
TSD
660
661 /* copy to VRAM */
204029e1 662 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
d38ceaf9
AD
663 if (unlikely(r)) {
664 goto out_cleanup;
665 }
666out_cleanup:
667 ttm_bo_mem_put(bo, &tmp_mem);
668 return r;
669}
670
67adb569
FK
671/**
672 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
673 *
674 * Called by amdgpu_bo_move()
675 */
676static bool amdgpu_mem_visible(struct amdgpu_device *adev,
677 struct ttm_mem_reg *mem)
678{
679 struct drm_mm_node *nodes = mem->mm_node;
680
681 if (mem->mem_type == TTM_PL_SYSTEM ||
682 mem->mem_type == TTM_PL_TT)
683 return true;
684 if (mem->mem_type != TTM_PL_VRAM)
685 return false;
686
687 /* ttm_mem_reg_ioremap only supports contiguous memory */
688 if (nodes->size != mem->num_pages)
689 return false;
690
691 return ((nodes->start + nodes->size) << PAGE_SHIFT)
692 <= adev->gmc.visible_vram_size;
693}
694
50da5174
TSD
695/**
696 * amdgpu_bo_move - Move a buffer object to a new memory location
697 *
698 * Called by ttm_bo_handle_move_mem()
699 */
2823f4f0
CK
700static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
701 struct ttm_operation_ctx *ctx,
702 struct ttm_mem_reg *new_mem)
d38ceaf9
AD
703{
704 struct amdgpu_device *adev;
104ece97 705 struct amdgpu_bo *abo;
d38ceaf9
AD
706 struct ttm_mem_reg *old_mem = &bo->mem;
707 int r;
708
104ece97 709 /* Can't move a pinned BO */
b82485fd 710 abo = ttm_to_amdgpu_bo(bo);
104ece97
MD
711 if (WARN_ON_ONCE(abo->pin_count > 0))
712 return -EINVAL;
713
a7d64de6 714 adev = amdgpu_ttm_adev(bo->bdev);
dbd5ed60 715
d38ceaf9
AD
716 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
717 amdgpu_move_null(bo, new_mem);
718 return 0;
719 }
720 if ((old_mem->mem_type == TTM_PL_TT &&
721 new_mem->mem_type == TTM_PL_SYSTEM) ||
722 (old_mem->mem_type == TTM_PL_SYSTEM &&
723 new_mem->mem_type == TTM_PL_TT)) {
724 /* bind is enough */
725 amdgpu_move_null(bo, new_mem);
726 return 0;
727 }
3b2de699
CK
728 if (old_mem->mem_type == AMDGPU_PL_GDS ||
729 old_mem->mem_type == AMDGPU_PL_GWS ||
730 old_mem->mem_type == AMDGPU_PL_OA ||
731 new_mem->mem_type == AMDGPU_PL_GDS ||
732 new_mem->mem_type == AMDGPU_PL_GWS ||
733 new_mem->mem_type == AMDGPU_PL_OA) {
734 /* Nothing to save here */
735 amdgpu_move_null(bo, new_mem);
736 return 0;
737 }
81988f9c 738
67adb569
FK
739 if (!adev->mman.buffer_funcs_enabled) {
740 r = -ENODEV;
d38ceaf9 741 goto memcpy;
67adb569 742 }
d38ceaf9
AD
743
744 if (old_mem->mem_type == TTM_PL_VRAM &&
745 new_mem->mem_type == TTM_PL_SYSTEM) {
dfb8fa98 746 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
d38ceaf9
AD
747 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
748 new_mem->mem_type == TTM_PL_VRAM) {
dfb8fa98 749 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
d38ceaf9 750 } else {
2823f4f0
CK
751 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
752 new_mem, old_mem);
d38ceaf9
AD
753 }
754
755 if (r) {
756memcpy:
67adb569
FK
757 /* Check that all memory is CPU accessible */
758 if (!amdgpu_mem_visible(adev, old_mem) ||
759 !amdgpu_mem_visible(adev, new_mem)) {
760 pr_err("Move buffer fallback to memcpy unavailable\n");
d38ceaf9
AD
761 return r;
762 }
67adb569
FK
763
764 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
765 if (r)
766 return r;
d38ceaf9
AD
767 }
768
96cf8271
JB
769 if (bo->type == ttm_bo_type_device &&
770 new_mem->mem_type == TTM_PL_VRAM &&
771 old_mem->mem_type != TTM_PL_VRAM) {
772 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
773 * accesses the BO after it's moved.
774 */
775 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
776 }
777
d38ceaf9
AD
778 /* update statistics */
779 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
780 return 0;
781}
782
50da5174
TSD
783/**
784 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
785 *
786 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
787 */
d38ceaf9
AD
788static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
789{
790 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
a7d64de6 791 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
f8f4b9a6 792 struct drm_mm_node *mm_node = mem->mm_node;
d38ceaf9
AD
793
794 mem->bus.addr = NULL;
795 mem->bus.offset = 0;
796 mem->bus.size = mem->num_pages << PAGE_SHIFT;
797 mem->bus.base = 0;
798 mem->bus.is_iomem = false;
799 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
800 return -EINVAL;
801 switch (mem->mem_type) {
802 case TTM_PL_SYSTEM:
803 /* system memory */
804 return 0;
805 case TTM_PL_TT:
806 break;
807 case TTM_PL_VRAM:
808 mem->bus.offset = mem->start << PAGE_SHIFT;
809 /* check if it's visible */
770d13b1 810 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
d38ceaf9 811 return -EINVAL;
f8f4b9a6
AL
812 /* Only physically contiguous buffers apply. In a contiguous
813 * buffer, size of the first mm_node would match the number of
814 * pages in ttm_mem_reg.
815 */
816 if (adev->mman.aper_base_kaddr &&
817 (mm_node->size == mem->num_pages))
818 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
819 mem->bus.offset;
820
770d13b1 821 mem->bus.base = adev->gmc.aper_base;
d38ceaf9 822 mem->bus.is_iomem = true;
d38ceaf9
AD
823 break;
824 default:
825 return -EINVAL;
826 }
827 return 0;
828}
829
830static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
831{
832}
833
9bbdcc0f
CK
834static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
835 unsigned long page_offset)
836{
f0ee63cb 837 uint64_t offset = (page_offset << PAGE_SHIFT);
e1d51505 838 struct drm_mm_node *mm;
9bbdcc0f 839
e1d51505
HK
840 mm = amdgpu_find_mm_node(&bo->mem, &offset);
841 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
842 (offset >> PAGE_SHIFT);
9bbdcc0f
CK
843}
844
b1a8ef95
ND
845/**
846 * amdgpu_ttm_domain_start - Returns GPU start address
847 * @adev: amdgpu device object
848 * @type: type of the memory
849 *
850 * Returns:
851 * GPU start address of a memory domain
852 */
853
854uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
855{
856 switch (type) {
857 case TTM_PL_TT:
858 return adev->gmc.gart_start;
859 case TTM_PL_VRAM:
860 return adev->gmc.vram_start;
861 }
862
863 return 0;
864}
865
d38ceaf9
AD
866/*
867 * TTM backend functions.
868 */
869struct amdgpu_ttm_tt {
637dd3b5 870 struct ttm_dma_tt ttm;
a3941471 871 struct drm_gem_object *gobj;
637dd3b5
CK
872 u64 offset;
873 uint64_t userptr;
0919195f 874 struct task_struct *usertask;
637dd3b5 875 uint32_t userflags;
ad595b86 876#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
66c45500 877 struct hmm_range *range;
ad595b86 878#endif
d38ceaf9
AD
879};
880
81fa1af3 881#ifdef CONFIG_DRM_AMDGPU_USERPTR
50da5174 882/**
899fbde1
PY
883 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
884 * memory and start HMM tracking CPU page table update
50da5174 885 *
899fbde1
PY
886 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
887 * once afterwards to stop HMM tracking
50da5174 888 */
e5eaa7cc 889int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
d38ceaf9 890{
e5eaa7cc 891 struct ttm_tt *ttm = bo->tbo.ttm;
d38ceaf9 892 struct amdgpu_ttm_tt *gtt = (void *)ttm;
6826cb3b 893 unsigned long start = gtt->userptr;
66c45500
PY
894 struct vm_area_struct *vma;
895 struct hmm_range *range;
81fa1af3
JG
896 unsigned long timeout;
897 struct mm_struct *mm;
66c45500 898 unsigned long i;
1986a3b0 899 int r = 0;
d38ceaf9 900
81fa1af3
JG
901 mm = bo->notifier.mm;
902 if (unlikely(!mm)) {
903 DRM_DEBUG_DRIVER("BO is not registered?\n");
a9ae8731 904 return -EFAULT;
e5eaa7cc 905 }
5aeaccca 906
81fa1af3
JG
907 /* Another get_user_pages is running at the same time?? */
908 if (WARN_ON(gtt->range))
909 return -EFAULT;
910
a9ae8731 911 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
0919195f
FK
912 return -ESRCH;
913
66c45500
PY
914 range = kzalloc(sizeof(*range), GFP_KERNEL);
915 if (unlikely(!range)) {
899fbde1 916 r = -ENOMEM;
e5eaa7cc
PY
917 goto out;
918 }
81fa1af3 919 range->notifier = &bo->notifier;
81fa1af3
JG
920 range->start = bo->notifier.interval_tree.start;
921 range->end = bo->notifier.interval_tree.last + 1;
2733ea14 922 range->default_flags = HMM_PFN_REQ_FAULT;
81fa1af3 923 if (!amdgpu_ttm_tt_is_readonly(ttm))
2733ea14 924 range->default_flags |= HMM_PFN_REQ_WRITE;
81fa1af3 925
2733ea14
JG
926 range->hmm_pfns = kvmalloc_array(ttm->num_pages,
927 sizeof(*range->hmm_pfns), GFP_KERNEL);
928 if (unlikely(!range->hmm_pfns)) {
6826cb3b
PY
929 r = -ENOMEM;
930 goto out_free_ranges;
d38ceaf9 931 }
5aeaccca 932
d8ed45c5 933 mmap_read_lock(mm);
66c45500
PY
934 vma = find_vma(mm, start);
935 if (unlikely(!vma || start < vma->vm_start)) {
936 r = -EFAULT;
a9ae8731 937 goto out_unlock;
66c45500 938 }
6826cb3b 939 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
66c45500 940 vma->vm_file)) {
899fbde1 941 r = -EPERM;
a9ae8731 942 goto out_unlock;
6826cb3b 943 }
d8ed45c5 944 mmap_read_unlock(mm);
81fa1af3 945 timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
5aeaccca 946
81fa1af3
JG
947retry:
948 range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
d38ceaf9 949
d8ed45c5 950 mmap_read_lock(mm);
6bfef2f9 951 r = hmm_range_fault(range);
d8ed45c5 952 mmap_read_unlock(mm);
be957c88 953 if (unlikely(r)) {
81fa1af3
JG
954 /*
955 * FIXME: This timeout should encompass the retry from
956 * mmu_interval_read_retry() as well.
957 */
be957c88 958 if (r == -EBUSY && !time_after(jiffies, timeout))
81fa1af3 959 goto retry;
9d0a1665 960 goto out_free_pfns;
81fa1af3 961 }
9d0a1665 962
4e249084
JG
963 /*
964 * Due to default_flags, all pages are HMM_PFN_VALID or
965 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
966 * the notifier_lock, and mmu_interval_read_retry() must be done first.
967 */
968 for (i = 0; i < ttm->num_pages; i++)
2733ea14 969 pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
66c45500
PY
970
971 gtt->range = range;
a9ae8731 972 mmput(mm);
915d3eec 973
318c3f4b 974 return 0;
2f568dbd 975
a9ae8731 976out_unlock:
d8ed45c5 977 mmap_read_unlock(mm);
899fbde1 978out_free_pfns:
2733ea14 979 kvfree(range->hmm_pfns);
6826cb3b 980out_free_ranges:
66c45500 981 kfree(range);
899fbde1 982out:
a9ae8731 983 mmput(mm);
2f568dbd
CK
984 return r;
985}
986
50da5174 987/**
899fbde1
PY
988 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
989 * Check if the pages backing this ttm range have been invalidated
50da5174 990 *
899fbde1 991 * Returns: true if pages are still valid
50da5174 992 */
899fbde1 993bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
aca81718 994{
318c3f4b 995 struct amdgpu_ttm_tt *gtt = (void *)ttm;
899fbde1 996 bool r = false;
aca81718 997
899fbde1
PY
998 if (!gtt || !gtt->userptr)
999 return false;
318c3f4b 1000
66c45500
PY
1001 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
1002 gtt->userptr, ttm->num_pages);
6826cb3b 1003
2733ea14 1004 WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
6826cb3b
PY
1005 "No user pages to check\n");
1006
66c45500 1007 if (gtt->range) {
81fa1af3
JG
1008 /*
1009 * FIXME: Must always hold notifier_lock for this, and must
1010 * not ignore the return code.
1011 */
1012 r = mmu_interval_read_retry(gtt->range->notifier,
1013 gtt->range->notifier_seq);
2733ea14 1014 kvfree(gtt->range->hmm_pfns);
66c45500
PY
1015 kfree(gtt->range);
1016 gtt->range = NULL;
318c3f4b 1017 }
2f568dbd 1018
81fa1af3 1019 return !r;
aca81718 1020}
ad595b86 1021#endif
aca81718 1022
8944042d 1023/**
2e603d04 1024 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
8944042d 1025 *
2e603d04 1026 * Called by amdgpu_cs_list_validate(). This creates the page list
50da5174
TSD
1027 * that backs user memory and will ultimately be mapped into the device
1028 * address space.
8944042d 1029 */
a216ab09 1030void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
8944042d 1031{
1986a3b0 1032 unsigned long i;
8944042d 1033
899fbde1 1034 for (i = 0; i < ttm->num_pages; ++i)
a216ab09 1035 ttm->pages[i] = pages ? pages[i] : NULL;
8944042d
AD
1036}
1037
50da5174 1038/**
2e603d04 1039 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
50da5174
TSD
1040 *
1041 * Called by amdgpu_ttm_backend_bind()
1042 **/
2f568dbd
CK
1043static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
1044{
a7d64de6 1045 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
2f568dbd
CK
1046 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1047 unsigned nents;
1048 int r;
1049
1050 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1051 enum dma_data_direction direction = write ?
1052 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1053
50da5174 1054 /* Allocate an SG array and squash pages into it */
d38ceaf9
AD
1055 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
1056 ttm->num_pages << PAGE_SHIFT,
1057 GFP_KERNEL);
1058 if (r)
1059 goto release_sg;
1060
50da5174 1061 /* Map SG to device */
d38ceaf9
AD
1062 r = -ENOMEM;
1063 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
0199172f 1064 if (nents == 0)
d38ceaf9
AD
1065 goto release_sg;
1066
50da5174 1067 /* convert SG to linear array of pages and dma addresses */
d38ceaf9
AD
1068 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1069 gtt->ttm.dma_address, ttm->num_pages);
1070
1071 return 0;
1072
1073release_sg:
1074 kfree(ttm->sg);
d38ceaf9
AD
1075 return r;
1076}
1077
50da5174
TSD
1078/**
1079 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
1080 */
d38ceaf9
AD
1081static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
1082{
a7d64de6 1083 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
d38ceaf9 1084 struct amdgpu_ttm_tt *gtt = (void *)ttm;
d38ceaf9
AD
1085
1086 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1087 enum dma_data_direction direction = write ?
1088 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1089
1090 /* double check that we don't free the table twice */
1091 if (!ttm->sg->sgl)
1092 return;
1093
50da5174 1094 /* unmap the pages mapped to the device */
d38ceaf9
AD
1095 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
1096
318c3f4b 1097 sg_free_table(ttm->sg);
899fbde1 1098
ad595b86 1099#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
81fa1af3
JG
1100 if (gtt->range) {
1101 unsigned long i;
1102
1103 for (i = 0; i < ttm->num_pages; i++) {
1104 if (ttm->pages[i] !=
2733ea14 1105 hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
81fa1af3
JG
1106 break;
1107 }
1108
1109 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
1110 }
ad595b86 1111#endif
d38ceaf9
AD
1112}
1113
f3167919 1114static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
959a2091
YZ
1115 struct ttm_buffer_object *tbo,
1116 uint64_t flags)
1117{
1118 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1119 struct ttm_tt *ttm = tbo->ttm;
1120 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1121 int r;
1122
bffc8c5c
CK
1123 if (amdgpu_bo_encrypted(abo))
1124 flags |= AMDGPU_PTE_TMZ;
1125
fa5bde80 1126 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
959a2091
YZ
1127 uint64_t page_idx = 1;
1128
1129 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1130 ttm->pages, gtt->ttm.dma_address, flags);
1131 if (r)
1132 goto gart_bind_fail;
1133
fa5bde80
YZ
1134 /* The memory type of the first page defaults to UC. Now
1135 * modify the memory type to NC from the second page of
1136 * the BO onward.
1137 */
7596ab68
HZ
1138 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1139 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
959a2091
YZ
1140
1141 r = amdgpu_gart_bind(adev,
1142 gtt->offset + (page_idx << PAGE_SHIFT),
1143 ttm->num_pages - page_idx,
1144 &ttm->pages[page_idx],
1145 &(gtt->ttm.dma_address[page_idx]), flags);
1146 } else {
1147 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1148 ttm->pages, gtt->ttm.dma_address, flags);
1149 }
1150
1151gart_bind_fail:
1152 if (r)
1153 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1154 ttm->num_pages, gtt->offset);
1155
1156 return r;
1157}
1158
50da5174
TSD
1159/**
1160 * amdgpu_ttm_backend_bind - Bind GTT memory
1161 *
1162 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1163 * This handles binding GTT memory to the device address space.
1164 */
d38ceaf9
AD
1165static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
1166 struct ttm_mem_reg *bo_mem)
1167{
d9a13766 1168 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
d38ceaf9 1169 struct amdgpu_ttm_tt *gtt = (void*)ttm;
ac7afe6b 1170 uint64_t flags;
2ce3f5dc 1171 int r = 0;
d38ceaf9 1172
e2f784fa
CZ
1173 if (gtt->userptr) {
1174 r = amdgpu_ttm_tt_pin_userptr(ttm);
1175 if (r) {
1176 DRM_ERROR("failed to pin userptr\n");
1177 return r;
1178 }
1179 }
d38ceaf9
AD
1180 if (!ttm->num_pages) {
1181 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1182 ttm->num_pages, bo_mem, ttm);
1183 }
1184
1185 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1186 bo_mem->mem_type == AMDGPU_PL_GWS ||
1187 bo_mem->mem_type == AMDGPU_PL_OA)
1188 return -EINVAL;
1189
3da917b6
CK
1190 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1191 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
ac7afe6b 1192 return 0;
3da917b6 1193 }
ac7afe6b 1194
50da5174 1195 /* compute PTE flags relevant to this BO memory */
d9a13766 1196 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
50da5174
TSD
1197
1198 /* bind pages into GART page tables */
0957dc70 1199 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
d9a13766 1200 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
ac7afe6b
CK
1201 ttm->pages, gtt->ttm.dma_address, flags);
1202
c1c7ce8f 1203 if (r)
ac7afe6b
CK
1204 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1205 ttm->num_pages, gtt->offset);
98a7f88c 1206 return r;
c855e250
CK
1207}
1208
50da5174
TSD
1209/**
1210 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
1211 */
c5835bbb 1212int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
c855e250 1213{
1d00402b 1214 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
c13c55d6 1215 struct ttm_operation_ctx ctx = { false, false };
40575732 1216 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1d00402b 1217 struct ttm_mem_reg tmp;
1d00402b
CK
1218 struct ttm_placement placement;
1219 struct ttm_place placements;
485fc361 1220 uint64_t addr, flags;
c855e250
CK
1221 int r;
1222
0e33495d 1223 if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
c855e250
CK
1224 return 0;
1225
485fc361
CK
1226 addr = amdgpu_gmc_agp_addr(bo);
1227 if (addr != AMDGPU_BO_INVALID_OFFSET) {
1228 bo->mem.start = addr >> PAGE_SHIFT;
1229 } else {
1d00402b 1230
485fc361
CK
1231 /* allocate GART space */
1232 tmp = bo->mem;
1233 tmp.mm_node = NULL;
1234 placement.num_placement = 1;
1235 placement.placement = &placements;
1236 placement.num_busy_placement = 1;
1237 placement.busy_placement = &placements;
1238 placements.fpfn = 0;
1239 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1240 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
1241 TTM_PL_FLAG_TT;
1242
1243 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1244 if (unlikely(r))
1245 return r;
bb990bb0 1246
485fc361
CK
1247 /* compute PTE flags for this buffer object */
1248 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
50da5174 1249
485fc361 1250 /* Bind pages */
0957dc70 1251 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
485fc361
CK
1252 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1253 if (unlikely(r)) {
1254 ttm_bo_mem_put(bo, &tmp);
1255 return r;
1256 }
1257
1258 ttm_bo_mem_put(bo, &bo->mem);
1259 bo->mem = tmp;
40575732 1260 }
1d00402b 1261
40575732 1262 return 0;
d38ceaf9
AD
1263}
1264
50da5174
TSD
1265/**
1266 * amdgpu_ttm_recover_gart - Rebind GTT pages
1267 *
1268 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1269 * rebind GTT pages during a GPU reset.
1270 */
c1c7ce8f 1271int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
2c0d7318 1272{
c1c7ce8f 1273 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1d1a2cd5 1274 uint64_t flags;
2c0d7318
CZ
1275 int r;
1276
959a2091 1277 if (!tbo->ttm)
c1c7ce8f
CK
1278 return 0;
1279
959a2091
YZ
1280 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1281 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1282
c1c7ce8f 1283 return r;
2c0d7318
CZ
1284}
1285
50da5174
TSD
1286/**
1287 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1288 *
1289 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1290 * ttm_tt_destroy().
1291 */
d38ceaf9
AD
1292static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
1293{
d9a13766 1294 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
d38ceaf9 1295 struct amdgpu_ttm_tt *gtt = (void *)ttm;
738f64cc 1296 int r;
d38ceaf9 1297
50da5174 1298 /* if the pages have userptr pinning then clear that first */
85a4b579
CK
1299 if (gtt->userptr)
1300 amdgpu_ttm_tt_unpin_userptr(ttm);
1301
3da917b6 1302 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
78ab0a38
CK
1303 return 0;
1304
d38ceaf9 1305 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
d9a13766 1306 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
c1c7ce8f 1307 if (r)
738f64cc
RH
1308 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1309 gtt->ttm.ttm.num_pages, gtt->offset);
738f64cc 1310 return r;
d38ceaf9
AD
1311}
1312
1313static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
1314{
1315 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1316
0919195f
FK
1317 if (gtt->usertask)
1318 put_task_struct(gtt->usertask);
1319
d38ceaf9
AD
1320 ttm_dma_tt_fini(&gtt->ttm);
1321 kfree(gtt);
1322}
1323
1324static struct ttm_backend_func amdgpu_backend_func = {
1325 .bind = &amdgpu_ttm_backend_bind,
1326 .unbind = &amdgpu_ttm_backend_unbind,
1327 .destroy = &amdgpu_ttm_backend_destroy,
1328};
1329
50da5174
TSD
1330/**
1331 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1332 *
1333 * @bo: The buffer object to create a GTT ttm_tt object around
1334 *
1335 * Called by ttm_tt_create().
1336 */
dde5da23
CK
1337static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1338 uint32_t page_flags)
d38ceaf9 1339{
d38ceaf9
AD
1340 struct amdgpu_ttm_tt *gtt;
1341
d38ceaf9
AD
1342 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1343 if (gtt == NULL) {
1344 return NULL;
1345 }
1346 gtt->ttm.ttm.func = &amdgpu_backend_func;
a3941471 1347 gtt->gobj = &bo->base;
50da5174
TSD
1348
1349 /* allocate space for the uninitialized page entries */
dde5da23 1350 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
d38ceaf9
AD
1351 kfree(gtt);
1352 return NULL;
1353 }
1354 return &gtt->ttm.ttm;
1355}
1356
50da5174
TSD
1357/**
1358 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1359 *
1360 * Map the pages of a ttm_tt object to an address space visible
1361 * to the underlying device.
1362 */
d0cef9fa
RH
1363static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1364 struct ttm_operation_ctx *ctx)
d38ceaf9 1365{
aca81718 1366 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
d38ceaf9 1367 struct amdgpu_ttm_tt *gtt = (void *)ttm;
d38ceaf9 1368
50da5174 1369 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
d38ceaf9 1370 if (gtt && gtt->userptr) {
5f0b34cc 1371 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
d38ceaf9
AD
1372 if (!ttm->sg)
1373 return -ENOMEM;
1374
1375 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1376 ttm->state = tt_unbound;
1377 return 0;
1378 }
1379
a3941471
CK
1380 if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
1381 if (!ttm->sg) {
1382 struct dma_buf_attachment *attach;
1383 struct sg_table *sgt;
1384
1385 attach = gtt->gobj->import_attach;
1386 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1387 if (IS_ERR(sgt))
1388 return PTR_ERR(sgt);
1389
1390 ttm->sg = sgt;
1391 }
1392
d38ceaf9 1393 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
e89d0d33
CK
1394 gtt->ttm.dma_address,
1395 ttm->num_pages);
d38ceaf9 1396 ttm->state = tt_unbound;
79ba2800 1397 return 0;
d38ceaf9
AD
1398 }
1399
d38ceaf9 1400#ifdef CONFIG_SWIOTLB
fd5fd480 1401 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
d0cef9fa 1402 return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
d38ceaf9
AD
1403 }
1404#endif
1405
50da5174
TSD
1406 /* fall back to generic helper to populate the page array
1407 * and map them to the device */
d0cef9fa 1408 return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
d38ceaf9
AD
1409}
1410
50da5174
TSD
1411/**
1412 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1413 *
1414 * Unmaps pages of a ttm_tt object from the device address space and
1415 * unpopulates the page array backing it.
1416 */
d38ceaf9
AD
1417static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1418{
d38ceaf9 1419 struct amdgpu_ttm_tt *gtt = (void *)ttm;
a3941471 1420 struct amdgpu_device *adev;
d38ceaf9
AD
1421
1422 if (gtt && gtt->userptr) {
a216ab09 1423 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
d38ceaf9
AD
1424 kfree(ttm->sg);
1425 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1426 return;
1427 }
1428
a3941471
CK
1429 if (ttm->sg && gtt->gobj->import_attach) {
1430 struct dma_buf_attachment *attach;
1431
1432 attach = gtt->gobj->import_attach;
1433 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1434 ttm->sg = NULL;
1435 return;
1436 }
1437
1438 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
d38ceaf9
AD
1439 return;
1440
a7d64de6 1441 adev = amdgpu_ttm_adev(ttm->bdev);
d38ceaf9
AD
1442
1443#ifdef CONFIG_SWIOTLB
fd5fd480 1444 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
d38ceaf9
AD
1445 ttm_dma_unpopulate(&gtt->ttm, adev->dev);
1446 return;
1447 }
1448#endif
1449
50da5174 1450 /* fall back to generic helper to unmap and unpopulate array */
7405e0da 1451 ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
d38ceaf9
AD
1452}
1453
50da5174 1454/**
2e603d04
HR
1455 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1456 * task
50da5174
TSD
1457 *
1458 * @ttm: The ttm_tt object to bind this userptr object to
1459 * @addr: The address in the current tasks VM space to use
1460 * @flags: Requirements of userptr object.
1461 *
1462 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1463 * to current task
1464 */
d38ceaf9
AD
1465int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1466 uint32_t flags)
1467{
1468 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1469
1470 if (gtt == NULL)
1471 return -EINVAL;
1472
1473 gtt->userptr = addr;
d38ceaf9 1474 gtt->userflags = flags;
0919195f
FK
1475
1476 if (gtt->usertask)
1477 put_task_struct(gtt->usertask);
1478 gtt->usertask = current->group_leader;
1479 get_task_struct(gtt->usertask);
1480
d38ceaf9
AD
1481 return 0;
1482}
1483
50da5174
TSD
1484/**
1485 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1486 */
cc325d19 1487struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
d38ceaf9
AD
1488{
1489 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1490
1491 if (gtt == NULL)
cc325d19 1492 return NULL;
d38ceaf9 1493
0919195f
FK
1494 if (gtt->usertask == NULL)
1495 return NULL;
1496
1497 return gtt->usertask->mm;
d38ceaf9
AD
1498}
1499
50da5174 1500/**
2e603d04
HR
1501 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1502 * address range for the current task.
50da5174
TSD
1503 *
1504 */
cc1de6e8
CK
1505bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1506 unsigned long end)
1507{
1508 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1509 unsigned long size;
1510
637dd3b5 1511 if (gtt == NULL || !gtt->userptr)
cc1de6e8
CK
1512 return false;
1513
50da5174
TSD
1514 /* Return false if no part of the ttm_tt object lies within
1515 * the range
1516 */
cc1de6e8
CK
1517 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1518 if (gtt->userptr > end || gtt->userptr + size <= start)
1519 return false;
1520
1521 return true;
1522}
1523
50da5174 1524/**
899fbde1 1525 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
318c3f4b 1526 */
899fbde1 1527bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
ca666a3c
CK
1528{
1529 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1530
1531 if (gtt == NULL || !gtt->userptr)
1532 return false;
1533
899fbde1 1534 return true;
ca666a3c
CK
1535}
1536
50da5174
TSD
1537/**
1538 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1539 */
d38ceaf9
AD
1540bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1541{
1542 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1543
1544 if (gtt == NULL)
1545 return false;
1546
1547 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1548}
1549
50da5174 1550/**
24a8d289 1551 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
50da5174
TSD
1552 *
1553 * @ttm: The ttm_tt object to compute the flags for
1554 * @mem: The memory registry backing this ttm_tt object
24a8d289
CK
1555 *
1556 * Figure out the flags to use for a VM PDE (Page Directory Entry).
50da5174 1557 */
24a8d289 1558uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
d38ceaf9 1559{
6b777607 1560 uint64_t flags = 0;
d38ceaf9
AD
1561
1562 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1563 flags |= AMDGPU_PTE_VALID;
1564
6d99905a 1565 if (mem && mem->mem_type == TTM_PL_TT) {
d38ceaf9
AD
1566 flags |= AMDGPU_PTE_SYSTEM;
1567
6d99905a
CK
1568 if (ttm->caching_state == tt_cached)
1569 flags |= AMDGPU_PTE_SNOOPED;
1570 }
d38ceaf9 1571
24a8d289
CK
1572 return flags;
1573}
1574
1575/**
1576 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1577 *
1578 * @ttm: The ttm_tt object to compute the flags for
1579 * @mem: The memory registry backing this ttm_tt object
1580
1581 * Figure out the flags to use for a VM PTE (Page Table Entry).
1582 */
1583uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1584 struct ttm_mem_reg *mem)
1585{
1586 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1587
4b98e0c4 1588 flags |= adev->gart.gart_pte_flags;
d38ceaf9
AD
1589 flags |= AMDGPU_PTE_READABLE;
1590
1591 if (!amdgpu_ttm_tt_is_readonly(ttm))
1592 flags |= AMDGPU_PTE_WRITEABLE;
1593
1594 return flags;
1595}
1596
50da5174 1597/**
2e603d04
HR
1598 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1599 * object.
50da5174 1600 *
2e603d04
HR
1601 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1602 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1603 * it can find space for a new object and by ttm_bo_force_list_clean() which is
50da5174
TSD
1604 * used to clean out a memory space.
1605 */
9982ca68
CK
1606static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1607 const struct ttm_place *place)
1608{
4fcae787
CK
1609 unsigned long num_pages = bo->mem.num_pages;
1610 struct drm_mm_node *node = bo->mem.mm_node;
52791eee 1611 struct dma_resv_list *flist;
d8d019cc
FK
1612 struct dma_fence *f;
1613 int i;
1614
1bd4e4ca 1615 if (bo->type == ttm_bo_type_kernel &&
6ceeb144 1616 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1bd4e4ca
CK
1617 return false;
1618
d8d019cc
FK
1619 /* If bo is a KFD BO, check if the bo belongs to the current process.
1620 * If true, then return false as any KFD process needs all its BOs to
1621 * be resident to run successfully
1622 */
52791eee 1623 flist = dma_resv_get_list(bo->base.resv);
d8d019cc
FK
1624 if (flist) {
1625 for (i = 0; i < flist->shared_count; ++i) {
1626 f = rcu_dereference_protected(flist->shared[i],
52791eee 1627 dma_resv_held(bo->base.resv));
d8d019cc
FK
1628 if (amdkfd_fence_check_mm(f, current->mm))
1629 return false;
1630 }
1631 }
9982ca68 1632
4fcae787
CK
1633 switch (bo->mem.mem_type) {
1634 case TTM_PL_TT:
218c0b7f
CK
1635 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1636 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1637 return false;
4fcae787 1638 return true;
9982ca68 1639
4fcae787 1640 case TTM_PL_VRAM:
9982ca68
CK
1641 /* Check each drm MM node individually */
1642 while (num_pages) {
1643 if (place->fpfn < (node->start + node->size) &&
1644 !(place->lpfn && place->lpfn <= node->start))
1645 return true;
1646
1647 num_pages -= node->size;
1648 ++node;
1649 }
7da2e3e0 1650 return false;
9982ca68 1651
4fcae787
CK
1652 default:
1653 break;
9982ca68
CK
1654 }
1655
1656 return ttm_bo_eviction_valuable(bo, place);
1657}
1658
50da5174 1659/**
2e603d04 1660 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
50da5174
TSD
1661 *
1662 * @bo: The buffer object to read/write
1663 * @offset: Offset into buffer object
1664 * @buf: Secondary buffer to write/read from
1665 * @len: Length in bytes of access
1666 * @write: true if writing
1667 *
1668 * This is used to access VRAM that backs a buffer object via MMIO
1669 * access for debugging purposes.
1670 */
e342610c
FK
1671static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1672 unsigned long offset,
1673 void *buf, int len, int write)
1674{
b82485fd 1675 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
e342610c 1676 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
e1d51505 1677 struct drm_mm_node *nodes;
e342610c
FK
1678 uint32_t value = 0;
1679 int ret = 0;
1680 uint64_t pos;
1681 unsigned long flags;
1682
1683 if (bo->mem.mem_type != TTM_PL_VRAM)
1684 return -EIO;
1685
f0ee63cb
CK
1686 pos = offset;
1687 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
1688 pos += (nodes->start << PAGE_SHIFT);
e342610c 1689
770d13b1 1690 while (len && pos < adev->gmc.mc_vram_size) {
e342610c 1691 uint64_t aligned_pos = pos & ~(uint64_t)3;
dd1ab799 1692 uint64_t bytes = 4 - (pos & 3);
e342610c
FK
1693 uint32_t shift = (pos & 3) * 8;
1694 uint32_t mask = 0xffffffff << shift;
1695
1696 if (len < bytes) {
1697 mask &= 0xffffffff >> (bytes - len) * 8;
1698 bytes = len;
1699 }
1700
dd1ab799
CK
1701 if (mask != 0xffffffff) {
1702 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1703 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1704 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1705 if (!write || mask != 0xffffffff)
1706 value = RREG32_NO_KIQ(mmMM_DATA);
1707 if (write) {
1708 value &= ~mask;
1709 value |= (*(uint32_t *)buf << shift) & mask;
1710 WREG32_NO_KIQ(mmMM_DATA, value);
1711 }
1712 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1713 if (!write) {
1714 value = (value & mask) >> shift;
1715 memcpy(buf, &value, bytes);
1716 }
1717 } else {
1718 bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
1719 bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
1720
1721 amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
1722 bytes, write);
e342610c
FK
1723 }
1724
1725 ret += bytes;
1726 buf = (uint8_t *)buf + bytes;
1727 pos += bytes;
1728 len -= bytes;
1729 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1730 ++nodes;
1731 pos = (nodes->start << PAGE_SHIFT);
1732 }
1733 }
1734
1735 return ret;
1736}
1737
d38ceaf9
AD
1738static struct ttm_bo_driver amdgpu_bo_driver = {
1739 .ttm_tt_create = &amdgpu_ttm_tt_create,
1740 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1741 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
d38ceaf9 1742 .init_mem_type = &amdgpu_init_mem_type,
9982ca68 1743 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
d38ceaf9
AD
1744 .evict_flags = &amdgpu_evict_flags,
1745 .move = &amdgpu_bo_move,
1746 .verify_access = &amdgpu_verify_access,
1747 .move_notify = &amdgpu_bo_move_notify,
ab2f7a5c 1748 .release_notify = &amdgpu_bo_release_notify,
d38ceaf9
AD
1749 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1750 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1751 .io_mem_free = &amdgpu_ttm_io_mem_free,
9bbdcc0f 1752 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
b61857b5
CZ
1753 .access_memory = &amdgpu_ttm_access_memory,
1754 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
d38ceaf9
AD
1755};
1756
f5ec697e
AD
1757/*
1758 * Firmware Reservation functions
1759 */
1760/**
1761 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1762 *
1763 * @adev: amdgpu_device pointer
1764 *
1765 * free fw reserved vram if it has been reserved.
1766 */
1767static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1768{
87ded5ca
AD
1769 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1770 NULL, &adev->mman.fw_vram_usage_va);
f5ec697e
AD
1771}
1772
1773/**
1774 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1775 *
1776 * @adev: amdgpu_device pointer
1777 *
1778 * create bo vram reservation from fw.
1779 */
1780static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1781{
de7b45ba 1782 uint64_t vram_size = adev->gmc.visible_vram_size;
de7b45ba 1783
87ded5ca
AD
1784 adev->mman.fw_vram_usage_va = NULL;
1785 adev->mman.fw_vram_usage_reserved_bo = NULL;
f5ec697e 1786
87ded5ca
AD
1787 if (adev->mman.fw_vram_usage_size == 0 ||
1788 adev->mman.fw_vram_usage_size > vram_size)
de7b45ba 1789 return 0;
f5ec697e 1790
de7b45ba 1791 return amdgpu_bo_create_kernel_at(adev,
87ded5ca
AD
1792 adev->mman.fw_vram_usage_start_offset,
1793 adev->mman.fw_vram_usage_size,
de7b45ba 1794 AMDGPU_GEM_DOMAIN_VRAM,
87ded5ca
AD
1795 &adev->mman.fw_vram_usage_reserved_bo,
1796 &adev->mman.fw_vram_usage_va);
f5ec697e 1797}
de7b45ba 1798
778e8c42
TY
1799/*
1800 * Memoy training reservation functions
1801 */
1802
1803/**
1804 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1805 *
1806 * @adev: amdgpu_device pointer
1807 *
1808 * free memory training reserved vram if it has been reserved.
1809 */
1810static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1811{
1812 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1813
1814 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1815 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1816 ctx->c2p_bo = NULL;
1817
778e8c42
TY
1818 return 0;
1819}
1820
83d7f66a 1821static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
8d40002f 1822{
83d7f66a
LG
1823 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1824
1825 memset(ctx, 0, sizeof(*ctx));
8d40002f 1826
83d7f66a 1827 ctx->c2p_train_data_offset =
72de33f8 1828 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
83d7f66a
LG
1829 ctx->p2c_train_data_offset =
1830 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1831 ctx->train_data_size =
1832 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1833
1834 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1835 ctx->train_data_size,
1836 ctx->p2c_train_data_offset,
1837 ctx->c2p_train_data_offset);
8d40002f
TY
1838}
1839
83d7f66a
LG
1840/*
1841 * reserve TMR memory at the top of VRAM which holds
1842 * IP Discovery data and is protected by PSP.
778e8c42 1843 */
83d7f66a 1844static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
778e8c42
TY
1845{
1846 int ret;
1847 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
83d7f66a 1848 bool mem_train_support = false;
778e8c42 1849
83d7f66a 1850 if (!amdgpu_sriov_vf(adev)) {
72d208c2 1851 ret = amdgpu_mem_train_support(adev);
2c6e83a1 1852 if (ret == 1)
83d7f66a 1853 mem_train_support = true;
2c6e83a1 1854 else if (ret == -1)
72d208c2
LG
1855 return -EINVAL;
1856 else
83d7f66a 1857 DRM_DEBUG("memory training does not support!\n");
778e8c42
TY
1858 }
1859
83d7f66a
LG
1860 /*
1861 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1862 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1863 *
1864 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1865 * discovery data and G6 memory training data respectively
1866 */
72de33f8 1867 adev->mman.discovery_tmr_size =
83d7f66a 1868 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
72de33f8
AD
1869 if (!adev->mman.discovery_tmr_size)
1870 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
2c6e83a1
LG
1871
1872 if (mem_train_support) {
1873 /* reserve vram for mem train according to TMR location */
1874 amdgpu_ttm_training_data_block_init(adev);
1875 ret = amdgpu_bo_create_kernel_at(adev,
778e8c42
TY
1876 ctx->c2p_train_data_offset,
1877 ctx->train_data_size,
1878 AMDGPU_GEM_DOMAIN_VRAM,
1879 &ctx->c2p_bo,
1880 NULL);
2c6e83a1
LG
1881 if (ret) {
1882 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1883 amdgpu_ttm_training_reserve_vram_fini(adev);
1884 return ret;
83d7f66a 1885 }
2c6e83a1 1886 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
83d7f66a 1887 }
778e8c42 1888
778e8c42 1889 ret = amdgpu_bo_create_kernel_at(adev,
72de33f8
AD
1890 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1891 adev->mman.discovery_tmr_size,
83d7f66a 1892 AMDGPU_GEM_DOMAIN_VRAM,
72de33f8 1893 &adev->mman.discovery_memory,
83d7f66a 1894 NULL);
778e8c42 1895 if (ret) {
83d7f66a 1896 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
72de33f8 1897 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
33a9a5ab 1898 return ret;
778e8c42
TY
1899 }
1900
778e8c42 1901 return 0;
778e8c42
TY
1902}
1903
50da5174 1904/**
2e603d04
HR
1905 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1906 * gtt/vram related fields.
50da5174
TSD
1907 *
1908 * This initializes all of the memory space pools that the TTM layer
1909 * will need such as the GTT space (system memory mapped to the device),
1910 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1911 * can be mapped per VMID.
1912 */
d38ceaf9
AD
1913int amdgpu_ttm_init(struct amdgpu_device *adev)
1914{
36d38372 1915 uint64_t gtt_size;
d38ceaf9 1916 int r;
218b5dcd 1917 u64 vis_vram_limit;
d38ceaf9 1918
a64f784b
CK
1919 mutex_init(&adev->mman.gtt_window_lock);
1920
d38ceaf9
AD
1921 /* No others user of address space so set it to 0 */
1922 r = ttm_bo_device_init(&adev->mman.bdev,
d38ceaf9 1923 &amdgpu_bo_driver,
4a580877
LT
1924 adev_to_drm(adev)->anon_inode->i_mapping,
1925 adev_to_drm(adev)->vma_offset_manager,
90489ce1 1926 dma_addressing_limited(adev->dev));
d38ceaf9
AD
1927 if (r) {
1928 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1929 return r;
1930 }
1931 adev->mman.initialized = true;
7cce9584
AG
1932
1933 /* We opt to avoid OOM on system pages allocations */
1934 adev->mman.bdev.no_retry = true;
1935
50da5174 1936 /* Initialize VRAM pool with all of VRAM divided into pages */
d38ceaf9 1937 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
770d13b1 1938 adev->gmc.real_vram_size >> PAGE_SHIFT);
d38ceaf9
AD
1939 if (r) {
1940 DRM_ERROR("Failed initializing VRAM heap.\n");
1941 return r;
1942 }
218b5dcd
JB
1943
1944 /* Reduce size of CPU-visible VRAM if requested */
1945 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1946 if (amdgpu_vis_vram_limit > 0 &&
770d13b1
CK
1947 vis_vram_limit <= adev->gmc.visible_vram_size)
1948 adev->gmc.visible_vram_size = vis_vram_limit;
218b5dcd 1949
d38ceaf9 1950 /* Change the size here instead of the init above so only lpfn is affected */
57adc4ce 1951 amdgpu_ttm_set_buffer_funcs_status(adev, false);
f8f4b9a6
AL
1952#ifdef CONFIG_64BIT
1953 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1954 adev->gmc.visible_vram_size);
1955#endif
d38ceaf9 1956
a05502e5
HC
1957 /*
1958 *The reserved vram for firmware must be pinned to the specified
1959 *place on the VRAM, so reserve it early.
1960 */
f5ec697e 1961 r = amdgpu_ttm_fw_reserve_vram_init(adev);
a05502e5
HC
1962 if (r) {
1963 return r;
1964 }
1965
778e8c42 1966 /*
83d7f66a
LG
1967 * only NAVI10 and onwards ASIC support for IP discovery.
1968 * If IP discovery enabled, a block of memory should be
1969 * reserved for IP discovey.
778e8c42 1970 */
72de33f8 1971 if (adev->mman.discovery_bin) {
83d7f66a 1972 r = amdgpu_ttm_reserve_tmr(adev);
e862b08b
ML
1973 if (r)
1974 return r;
1975 }
778e8c42 1976
50da5174
TSD
1977 /* allocate memory as required for VGA
1978 * This is used for VGA emulation and pre-OS scanout buffers to
1979 * avoid display artifacts while transitioning between pre-OS
1980 * and driver. */
cacbbe7c 1981 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
adb5be81 1982 AMDGPU_GEM_DOMAIN_VRAM,
cacbbe7c 1983 &adev->mman.stolen_vga_memory,
14b18937 1984 NULL);
52975728
CK
1985 if (r)
1986 return r;
cacbbe7c
AD
1987 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1988 adev->mman.stolen_extended_size,
06350194 1989 AMDGPU_GEM_DOMAIN_VRAM,
cacbbe7c 1990 &adev->mman.stolen_extended_memory,
14b18937 1991 NULL);
06350194
AD
1992 if (r)
1993 return r;
5f6a556f 1994
d38ceaf9 1995 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
770d13b1 1996 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
36d38372 1997
50da5174
TSD
1998 /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1999 * or whatever the user passed on module init */
424e2c85
RH
2000 if (amdgpu_gtt_size == -1) {
2001 struct sysinfo si;
2002
2003 si_meminfo(&si);
24562523 2004 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
770d13b1 2005 adev->gmc.mc_vram_size),
24562523
AG
2006 ((uint64_t)si.totalram * si.mem_unit * 3/4));
2007 }
2008 else
36d38372 2009 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
50da5174
TSD
2010
2011 /* Initialize GTT memory pool */
36d38372 2012 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
d38ceaf9
AD
2013 if (r) {
2014 DRM_ERROR("Failed initializing GTT heap.\n");
2015 return r;
2016 }
2017 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
36d38372 2018 (unsigned)(gtt_size / (1024 * 1024)));
d38ceaf9 2019
50da5174 2020 /* Initialize various on-chip memory pools */
c832c346 2021 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
dca29491 2022 adev->gds.gds_size);
c832c346
CK
2023 if (r) {
2024 DRM_ERROR("Failed initializing GDS heap.\n");
2025 return r;
d38ceaf9
AD
2026 }
2027
c832c346 2028 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
dca29491 2029 adev->gds.gws_size);
c832c346
CK
2030 if (r) {
2031 DRM_ERROR("Failed initializing gws heap.\n");
2032 return r;
d38ceaf9
AD
2033 }
2034
c832c346 2035 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
dca29491 2036 adev->gds.oa_size);
c832c346
CK
2037 if (r) {
2038 DRM_ERROR("Failed initializing oa heap.\n");
2039 return r;
d38ceaf9
AD
2040 }
2041
d38ceaf9
AD
2042 return 0;
2043}
2044
50da5174 2045/**
2e603d04 2046 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
50da5174 2047 */
6f752ec2
AG
2048void amdgpu_ttm_late_init(struct amdgpu_device *adev)
2049{
50da5174 2050 /* return the VGA stolen memory (if any) back to VRAM */
cacbbe7c
AD
2051 if (!adev->mman.keep_stolen_vga_memory)
2052 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
2053 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
6f752ec2
AG
2054}
2055
50da5174
TSD
2056/**
2057 * amdgpu_ttm_fini - De-initialize the TTM memory pools
2058 */
d38ceaf9
AD
2059void amdgpu_ttm_fini(struct amdgpu_device *adev)
2060{
d38ceaf9
AD
2061 if (!adev->mman.initialized)
2062 return;
11c6b82a 2063
778e8c42 2064 amdgpu_ttm_training_reserve_vram_fini(adev);
5db62dc8 2065 /* return the stolen vga memory back to VRAM */
cacbbe7c
AD
2066 if (adev->mman.keep_stolen_vga_memory)
2067 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
224f82e5 2068 /* return the IP Discovery TMR memory back to VRAM */
72de33f8 2069 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
f5ec697e 2070 amdgpu_ttm_fw_reserve_vram_fini(adev);
224f82e5 2071
f8f4b9a6
AL
2072 if (adev->mman.aper_base_kaddr)
2073 iounmap(adev->mman.aper_base_kaddr);
2074 adev->mman.aper_base_kaddr = NULL;
11c6b82a 2075
d38ceaf9
AD
2076 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
2077 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
c832c346
CK
2078 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
2079 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
2080 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
d38ceaf9 2081 ttm_bo_device_release(&adev->mman.bdev);
d38ceaf9
AD
2082 adev->mman.initialized = false;
2083 DRM_INFO("amdgpu: ttm finalized\n");
2084}
2085
57adc4ce
CK
2086/**
2087 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2088 *
2089 * @adev: amdgpu_device pointer
2090 * @enable: true when we can use buffer functions.
2091 *
2092 * Enable/disable use of buffer functions during suspend/resume. This should
2093 * only be called at bootup or when userspace isn't running.
2094 */
2095void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
d38ceaf9 2096{
57adc4ce
CK
2097 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
2098 uint64_t size;
b7d85e1d 2099 int r;
d38ceaf9 2100
53b3f8f4 2101 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
b7d85e1d 2102 adev->mman.buffer_funcs_enabled == enable)
d38ceaf9
AD
2103 return;
2104
b7d85e1d
CK
2105 if (enable) {
2106 struct amdgpu_ring *ring;
b3ac1766 2107 struct drm_gpu_scheduler *sched;
b7d85e1d
CK
2108
2109 ring = adev->mman.buffer_funcs_ring;
b3ac1766
ND
2110 sched = &ring->sched;
2111 r = drm_sched_entity_init(&adev->mman.entity,
e2d732fd 2112 DRM_SCHED_PRIORITY_KERNEL, &sched,
b3ac1766 2113 1, NULL);
b7d85e1d
CK
2114 if (r) {
2115 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
2116 r);
2117 return;
2118 }
2119 } else {
cdc50176 2120 drm_sched_entity_destroy(&adev->mman.entity);
7766484b
AG
2121 dma_fence_put(man->move);
2122 man->move = NULL;
b7d85e1d
CK
2123 }
2124
d38ceaf9 2125 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
57adc4ce
CK
2126 if (enable)
2127 size = adev->gmc.real_vram_size;
2128 else
2129 size = adev->gmc.visible_vram_size;
d38ceaf9 2130 man->size = size >> PAGE_SHIFT;
81988f9c 2131 adev->mman.buffer_funcs_enabled = enable;
d38ceaf9
AD
2132}
2133
d38ceaf9
AD
2134int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
2135{
bed2dd84 2136 struct drm_file *file_priv = filp->private_data;
1348969a 2137 struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
d38ceaf9 2138
e176fe17 2139 if (adev == NULL)
d38ceaf9 2140 return -EINVAL;
e176fe17
CK
2141
2142 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
d38ceaf9
AD
2143}
2144
fc9c8f54
CK
2145int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2146 uint64_t dst_offset, uint32_t byte_count,
52791eee 2147 struct dma_resv *resv,
fc9c8f54 2148 struct dma_fence **fence, bool direct_submit,
c9dc9cfe 2149 bool vm_needs_flush, bool tmz)
d38ceaf9 2150{
9ecefb19
CK
2151 enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
2152 AMDGPU_IB_POOL_DELAYED;
d38ceaf9 2153 struct amdgpu_device *adev = ring->adev;
d71518b5
CK
2154 struct amdgpu_job *job;
2155
d38ceaf9
AD
2156 uint32_t max_bytes;
2157 unsigned num_loops, num_dw;
2158 unsigned i;
2159 int r;
2160
c66ed765 2161 if (direct_submit && !ring->sched.ready) {
81988f9c
CK
2162 DRM_ERROR("Trying to move memory with ring turned off.\n");
2163 return -EINVAL;
2164 }
2165
d38ceaf9
AD
2166 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2167 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
4e930d96 2168 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
c7ae72c0 2169
9ecefb19 2170 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
d71518b5 2171 if (r)
9066b0c3 2172 return r;
c7ae72c0 2173
cbd52851 2174 if (vm_needs_flush) {
11c3a249 2175 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
cbd52851
CK
2176 job->vm_needs_flush = true;
2177 }
c7ae72c0 2178 if (resv) {
e86f9cee 2179 r = amdgpu_sync_resv(adev, &job->sync, resv,
5d319660
CK
2180 AMDGPU_SYNC_ALWAYS,
2181 AMDGPU_FENCE_OWNER_UNDEFINED);
c7ae72c0
CZ
2182 if (r) {
2183 DRM_ERROR("sync failed (%d).\n", r);
2184 goto error_free;
2185 }
d38ceaf9 2186 }
d38ceaf9
AD
2187
2188 for (i = 0; i < num_loops; i++) {
2189 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2190
d71518b5 2191 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
c9dc9cfe 2192 dst_offset, cur_size_in_bytes, tmz);
d38ceaf9
AD
2193
2194 src_offset += cur_size_in_bytes;
2195 dst_offset += cur_size_in_bytes;
2196 byte_count -= cur_size_in_bytes;
2197 }
2198
d71518b5
CK
2199 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2200 WARN_ON(job->ibs[0].length_dw > num_dw);
ee913fd9
CK
2201 if (direct_submit)
2202 r = amdgpu_job_submit_direct(job, ring, fence);
2203 else
0e28b10f 2204 r = amdgpu_job_submit(job, &adev->mman.entity,
e24db985 2205 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
ee913fd9
CK
2206 if (r)
2207 goto error_free;
d38ceaf9 2208
e24db985 2209 return r;
d71518b5 2210
c7ae72c0 2211error_free:
d71518b5 2212 amdgpu_job_free(job);
ee913fd9 2213 DRM_ERROR("Error scheduling IBs (%d)\n", r);
c7ae72c0 2214 return r;
d38ceaf9
AD
2215}
2216
59b4a977 2217int amdgpu_fill_buffer(struct amdgpu_bo *bo,
44e1baeb 2218 uint32_t src_data,
52791eee 2219 struct dma_resv *resv,
f29224a6 2220 struct dma_fence **fence)
59b4a977 2221{
a7d64de6 2222 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
44e1baeb 2223 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
59b4a977
FC
2224 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2225
f29224a6
CK
2226 struct drm_mm_node *mm_node;
2227 unsigned long num_pages;
59b4a977 2228 unsigned int num_loops, num_dw;
f29224a6
CK
2229
2230 struct amdgpu_job *job;
59b4a977
FC
2231 int r;
2232
81988f9c 2233 if (!adev->mman.buffer_funcs_enabled) {
f29224a6
CK
2234 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2235 return -EINVAL;
2236 }
2237
92c60d9c 2238 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
c5835bbb 2239 r = amdgpu_ttm_alloc_gart(&bo->tbo);
92c60d9c
CK
2240 if (r)
2241 return r;
2242 }
2243
f29224a6
CK
2244 num_pages = bo->tbo.num_pages;
2245 mm_node = bo->tbo.mem.mm_node;
2246 num_loops = 0;
2247 while (num_pages) {
7e4dec58 2248 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
f29224a6 2249
7e4dec58 2250 num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
f29224a6
CK
2251 num_pages -= mm_node->size;
2252 ++mm_node;
2253 }
44e1baeb 2254 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
59b4a977
FC
2255
2256 /* for IB padding */
f29224a6 2257 num_dw += 64;
59b4a977 2258
9ecefb19
CK
2259 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
2260 &job);
59b4a977
FC
2261 if (r)
2262 return r;
2263
2264 if (resv) {
2265 r = amdgpu_sync_resv(adev, &job->sync, resv,
5d319660
CK
2266 AMDGPU_SYNC_ALWAYS,
2267 AMDGPU_FENCE_OWNER_UNDEFINED);
59b4a977
FC
2268 if (r) {
2269 DRM_ERROR("sync failed (%d).\n", r);
2270 goto error_free;
2271 }
2272 }
2273
f29224a6
CK
2274 num_pages = bo->tbo.num_pages;
2275 mm_node = bo->tbo.mem.mm_node;
59b4a977 2276
f29224a6 2277 while (num_pages) {
7e4dec58 2278 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
f29224a6 2279 uint64_t dst_addr;
59b4a977 2280
92c60d9c 2281 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
f29224a6 2282 while (byte_count) {
7e4dec58
FK
2283 uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2284 max_bytes);
f29224a6 2285
44e1baeb
CK
2286 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2287 dst_addr, cur_size_in_bytes);
f29224a6
CK
2288
2289 dst_addr += cur_size_in_bytes;
2290 byte_count -= cur_size_in_bytes;
2291 }
2292
2293 num_pages -= mm_node->size;
2294 ++mm_node;
59b4a977
FC
2295 }
2296
2297 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2298 WARN_ON(job->ibs[0].length_dw > num_dw);
0e28b10f 2299 r = amdgpu_job_submit(job, &adev->mman.entity,
f29224a6 2300 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
59b4a977
FC
2301 if (r)
2302 goto error_free;
2303
2304 return 0;
2305
2306error_free:
2307 amdgpu_job_free(job);
2308 return r;
2309}
2310
d38ceaf9
AD
2311#if defined(CONFIG_DEBUG_FS)
2312
2313static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2314{
2315 struct drm_info_node *node = (struct drm_info_node *)m->private;
0ee86853 2316 unsigned ttm_pl = (uintptr_t)node->info_ent->data;
d38ceaf9 2317 struct drm_device *dev = node->minor->dev;
1348969a 2318 struct amdgpu_device *adev = drm_to_adev(dev);
12d4ac58 2319 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
b5c3714f 2320 struct drm_printer p = drm_seq_file_printer(m);
d38ceaf9 2321
12d4ac58 2322 man->func->debug(man, &p);
b5c3714f 2323 return 0;
d38ceaf9
AD
2324}
2325
06ab6832 2326static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
0ee86853
CK
2327 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2328 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2329 {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2330 {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2331 {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
d38ceaf9
AD
2332 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
2333#ifdef CONFIG_SWIOTLB
2334 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
2335#endif
2336};
2337
50da5174
TSD
2338/**
2339 * amdgpu_ttm_vram_read - Linear read access to VRAM
2340 *
2341 * Accesses VRAM via MMIO for debugging purposes.
2342 */
d38ceaf9
AD
2343static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2344 size_t size, loff_t *pos)
2345{
45063097 2346 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9 2347 ssize_t result = 0;
d38ceaf9
AD
2348
2349 if (size & 0x3 || *pos & 0x3)
2350 return -EINVAL;
2351
770d13b1 2352 if (*pos >= adev->gmc.mc_vram_size)
9156e723
TSD
2353 return -ENXIO;
2354
030d5b97 2355 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
d38ceaf9 2356 while (size) {
030d5b97
CK
2357 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2358 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
d38ceaf9 2359
030d5b97 2360 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
434cbcb1
DC
2361 if (copy_to_user(buf, value, bytes))
2362 return -EFAULT;
d38ceaf9 2363
030d5b97
CK
2364 result += bytes;
2365 buf += bytes;
2366 *pos += bytes;
2367 size -= bytes;
d38ceaf9
AD
2368 }
2369
2370 return result;
2371}
2372
50da5174
TSD
2373/**
2374 * amdgpu_ttm_vram_write - Linear write access to VRAM
2375 *
2376 * Accesses VRAM via MMIO for debugging purposes.
2377 */
08cab989
TSD
2378static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2379 size_t size, loff_t *pos)
2380{
2381 struct amdgpu_device *adev = file_inode(f)->i_private;
2382 ssize_t result = 0;
2383 int r;
2384
2385 if (size & 0x3 || *pos & 0x3)
2386 return -EINVAL;
2387
770d13b1 2388 if (*pos >= adev->gmc.mc_vram_size)
08cab989
TSD
2389 return -ENXIO;
2390
2391 while (size) {
2392 unsigned long flags;
2393 uint32_t value;
2394
770d13b1 2395 if (*pos >= adev->gmc.mc_vram_size)
08cab989
TSD
2396 return result;
2397
2398 r = get_user(value, (uint32_t *)buf);
2399 if (r)
2400 return r;
2401
2402 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
c3057281
TSD
2403 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2404 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2405 WREG32_NO_KIQ(mmMM_DATA, value);
08cab989
TSD
2406 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2407
2408 result += 4;
2409 buf += 4;
2410 *pos += 4;
2411 size -= 4;
2412 }
2413
2414 return result;
2415}
2416
d38ceaf9
AD
2417static const struct file_operations amdgpu_ttm_vram_fops = {
2418 .owner = THIS_MODULE,
2419 .read = amdgpu_ttm_vram_read,
08cab989
TSD
2420 .write = amdgpu_ttm_vram_write,
2421 .llseek = default_llseek,
d38ceaf9
AD
2422};
2423
a1d29476
CK
2424#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2425
50da5174
TSD
2426/**
2427 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2428 */
d38ceaf9
AD
2429static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2430 size_t size, loff_t *pos)
2431{
45063097 2432 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9
AD
2433 ssize_t result = 0;
2434 int r;
2435
2436 while (size) {
2437 loff_t p = *pos / PAGE_SIZE;
2438 unsigned off = *pos & ~PAGE_MASK;
2439 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2440 struct page *page;
2441 void *ptr;
2442
2443 if (p >= adev->gart.num_cpu_pages)
2444 return result;
2445
2446 page = adev->gart.pages[p];
2447 if (page) {
2448 ptr = kmap(page);
2449 ptr += off;
2450
2451 r = copy_to_user(buf, ptr, cur_size);
2452 kunmap(adev->gart.pages[p]);
2453 } else
2454 r = clear_user(buf, cur_size);
2455
2456 if (r)
2457 return -EFAULT;
2458
2459 result += cur_size;
2460 buf += cur_size;
2461 *pos += cur_size;
2462 size -= cur_size;
2463 }
2464
2465 return result;
2466}
2467
2468static const struct file_operations amdgpu_ttm_gtt_fops = {
2469 .owner = THIS_MODULE,
2470 .read = amdgpu_ttm_gtt_read,
2471 .llseek = default_llseek
2472};
2473
2474#endif
2475
50da5174
TSD
2476/**
2477 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2478 *
2479 * This function is used to read memory that has been mapped to the
2480 * GPU and the known addresses are not physical addresses but instead
2481 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2482 */
ebb043f2
TSD
2483static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2484 size_t size, loff_t *pos)
38290b2c
TSD
2485{
2486 struct amdgpu_device *adev = file_inode(f)->i_private;
38290b2c 2487 struct iommu_domain *dom;
ebb043f2
TSD
2488 ssize_t result = 0;
2489 int r;
38290b2c 2490
50da5174 2491 /* retrieve the IOMMU domain if any for this device */
ebb043f2 2492 dom = iommu_get_domain_for_dev(adev->dev);
38290b2c 2493
ebb043f2
TSD
2494 while (size) {
2495 phys_addr_t addr = *pos & PAGE_MASK;
2496 loff_t off = *pos & ~PAGE_MASK;
2497 size_t bytes = PAGE_SIZE - off;
2498 unsigned long pfn;
2499 struct page *p;
2500 void *ptr;
2501
2502 bytes = bytes < size ? bytes : size;
2503
50da5174
TSD
2504 /* Translate the bus address to a physical address. If
2505 * the domain is NULL it means there is no IOMMU active
2506 * and the address translation is the identity
2507 */
ebb043f2
TSD
2508 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2509
2510 pfn = addr >> PAGE_SHIFT;
2511 if (!pfn_valid(pfn))
2512 return -EPERM;
2513
2514 p = pfn_to_page(pfn);
2515 if (p->mapping != adev->mman.bdev.dev_mapping)
2516 return -EPERM;
2517
2518 ptr = kmap(p);
864917a3 2519 r = copy_to_user(buf, ptr + off, bytes);
ebb043f2
TSD
2520 kunmap(p);
2521 if (r)
2522 return -EFAULT;
2523
2524 size -= bytes;
2525 *pos += bytes;
2526 result += bytes;
2527 }
2528
2529 return result;
2530}
2531
50da5174
TSD
2532/**
2533 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2534 *
2535 * This function is used to write memory that has been mapped to the
2536 * GPU and the known addresses are not physical addresses but instead
2537 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2538 */
ebb043f2
TSD
2539static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2540 size_t size, loff_t *pos)
2541{
2542 struct amdgpu_device *adev = file_inode(f)->i_private;
2543 struct iommu_domain *dom;
2544 ssize_t result = 0;
2545 int r;
38290b2c
TSD
2546
2547 dom = iommu_get_domain_for_dev(adev->dev);
a40cfa0b 2548
ebb043f2
TSD
2549 while (size) {
2550 phys_addr_t addr = *pos & PAGE_MASK;
2551 loff_t off = *pos & ~PAGE_MASK;
2552 size_t bytes = PAGE_SIZE - off;
2553 unsigned long pfn;
2554 struct page *p;
2555 void *ptr;
2556
2557 bytes = bytes < size ? bytes : size;
38290b2c 2558
ebb043f2
TSD
2559 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2560
2561 pfn = addr >> PAGE_SHIFT;
2562 if (!pfn_valid(pfn))
2563 return -EPERM;
2564
2565 p = pfn_to_page(pfn);
2566 if (p->mapping != adev->mman.bdev.dev_mapping)
2567 return -EPERM;
2568
2569 ptr = kmap(p);
864917a3 2570 r = copy_from_user(ptr + off, buf, bytes);
ebb043f2
TSD
2571 kunmap(p);
2572 if (r)
2573 return -EFAULT;
2574
2575 size -= bytes;
2576 *pos += bytes;
2577 result += bytes;
2578 }
2579
2580 return result;
38290b2c
TSD
2581}
2582
ebb043f2 2583static const struct file_operations amdgpu_ttm_iomem_fops = {
38290b2c 2584 .owner = THIS_MODULE,
ebb043f2
TSD
2585 .read = amdgpu_iomem_read,
2586 .write = amdgpu_iomem_write,
38290b2c
TSD
2587 .llseek = default_llseek
2588};
a40cfa0b
TSD
2589
2590static const struct {
2591 char *name;
2592 const struct file_operations *fops;
2593 int domain;
2594} ttm_debugfs_entries[] = {
2595 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2596#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2597 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2598#endif
ebb043f2 2599 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
a40cfa0b
TSD
2600};
2601
a1d29476
CK
2602#endif
2603
c5820361 2604int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
d38ceaf9
AD
2605{
2606#if defined(CONFIG_DEBUG_FS)
2607 unsigned count;
2608
4a580877 2609 struct drm_minor *minor = adev_to_drm(adev)->primary;
d38ceaf9
AD
2610 struct dentry *ent, *root = minor->debugfs_root;
2611
a40cfa0b
TSD
2612 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2613 ent = debugfs_create_file(
2614 ttm_debugfs_entries[count].name,
2615 S_IFREG | S_IRUGO, root,
2616 adev,
2617 ttm_debugfs_entries[count].fops);
2618 if (IS_ERR(ent))
2619 return PTR_ERR(ent);
2620 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
770d13b1 2621 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
a40cfa0b 2622 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
770d13b1 2623 i_size_write(ent->d_inode, adev->gmc.gart_size);
a40cfa0b
TSD
2624 adev->mman.debugfs_entries[count] = ent;
2625 }
d38ceaf9
AD
2626
2627 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2628
2629#ifdef CONFIG_SWIOTLB
fd5fd480 2630 if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
d38ceaf9
AD
2631 --count;
2632#endif
2633
2634 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2635#else
d38ceaf9
AD
2636 return 0;
2637#endif
2638}