drm/amdgpu: use VRAM|GTT for a bunch of kernel allocations
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
c366be54 32
fdf2f6c5 33#include <linux/dma-mapping.h>
c366be54
SR
34#include <linux/iommu.h>
35#include <linux/pagemap.h>
36#include <linux/sched/task.h>
a9ae8731 37#include <linux/sched/mm.h>
c366be54
SR
38#include <linux/seq_file.h>
39#include <linux/slab.h>
40#include <linux/swap.h>
41#include <linux/swiotlb.h>
a3941471 42#include <linux/dma-buf.h>
f81110b8 43#include <linux/sizes.h>
16b0314a 44#include <linux/module.h>
c366be54 45
62d5f9f7 46#include <drm/drm_drv.h>
248a1d6f
MY
47#include <drm/ttm/ttm_bo_api.h>
48#include <drm/ttm/ttm_bo_driver.h>
49#include <drm/ttm/ttm_placement.h>
3eb7d96e 50#include <drm/ttm/ttm_range_manager.h>
fdf2f6c5 51
d38ceaf9 52#include <drm/amdgpu_drm.h>
cb5cc4f5 53#include <drm/drm_drv.h>
2454fcea 54
d38ceaf9 55#include "amdgpu.h"
b82485fd 56#include "amdgpu_object.h"
aca81718 57#include "amdgpu_trace.h"
d8d019cc 58#include "amdgpu_amdkfd.h"
bb7743bc 59#include "amdgpu_sdma.h"
1a6fc071 60#include "amdgpu_ras.h"
d9483ecd 61#include "amdgpu_hmm.h"
87ba7fea 62#include "amdgpu_atomfirmware.h"
be956c57 63#include "amdgpu_res_cursor.h"
d38ceaf9
AD
64#include "bif/bif_4_1_d.h"
65
16b0314a
GKH
66MODULE_IMPORT_NS(DMA_BUF);
67
030d5b97
CK
68#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
69
8af8a109 70static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
cae515f4
DA
71 struct ttm_tt *ttm,
72 struct ttm_resource *bo_mem);
8af8a109 73static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
29a1d482 74 struct ttm_tt *ttm);
cae515f4 75
47363354 76static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
158d20d1 77 unsigned int type,
d836917d 78 uint64_t size_in_page)
47363354 79{
37205891 80 return ttm_range_man_init(&adev->mman.bdev, type,
d836917d 81 false, size_in_page);
d38ceaf9
AD
82}
83
50da5174
TSD
84/**
85 * amdgpu_evict_flags - Compute placement flags
86 *
87 * @bo: The buffer object to evict
88 * @placement: Possible destination(s) for evicted BO
89 *
90 * Fill in placement data when ttm_bo_evict() is called
91 */
d38ceaf9
AD
92static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
93 struct ttm_placement *placement)
94{
a7d64de6 95 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
765e7fbf 96 struct amdgpu_bo *abo;
1aaa5602 97 static const struct ttm_place placements = {
d38ceaf9
AD
98 .fpfn = 0,
99 .lpfn = 0,
48e07c23 100 .mem_type = TTM_PL_SYSTEM,
ce65b874 101 .flags = 0
d38ceaf9
AD
102 };
103
50da5174 104 /* Don't handle scatter gather BOs */
82dee241
CK
105 if (bo->type == ttm_bo_type_sg) {
106 placement->num_placement = 0;
107 placement->num_busy_placement = 0;
108 return;
109 }
110
50da5174 111 /* Object isn't an AMDGPU object so ignore */
c704ab18 112 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
d38ceaf9
AD
113 placement->placement = &placements;
114 placement->busy_placement = &placements;
115 placement->num_placement = 1;
116 placement->num_busy_placement = 1;
117 return;
118 }
50da5174 119
b82485fd 120 abo = ttm_to_amdgpu_bo(bo);
fab2cc83 121 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
5f319c5c
AS
122 placement->num_placement = 0;
123 placement->num_busy_placement = 0;
5f319c5c
AS
124 return;
125 }
d3116756
CK
126
127 switch (bo->resource->mem_type) {
3b2de699
CK
128 case AMDGPU_PL_GDS:
129 case AMDGPU_PL_GWS:
130 case AMDGPU_PL_OA:
131 placement->num_placement = 0;
132 placement->num_busy_placement = 0;
133 return;
134
d38ceaf9 135 case TTM_PL_VRAM:
81988f9c 136 if (!adev->mman.buffer_funcs_enabled) {
50da5174 137 /* Move to system memory */
c704ab18 138 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
c8c5e569 139 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
5422a28f
CK
140 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
141 amdgpu_bo_in_cpu_visible_vram(abo)) {
cb2dd1a6
MD
142
143 /* Try evicting to the CPU inaccessible part of VRAM
144 * first, but only set GTT as busy placement, so this
145 * BO will be evicted to GTT rather than causing other
146 * BOs to be evicted from VRAM
147 */
c704ab18 148 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
9a22149e
CK
149 AMDGPU_GEM_DOMAIN_GTT |
150 AMDGPU_GEM_DOMAIN_CPU);
5422a28f 151 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
cb2dd1a6
MD
152 abo->placements[0].lpfn = 0;
153 abo->placement.busy_placement = &abo->placements[1];
154 abo->placement.num_busy_placement = 1;
08291c5c 155 } else {
50da5174 156 /* Move to GTT memory */
9a22149e
CK
157 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
158 AMDGPU_GEM_DOMAIN_CPU);
08291c5c 159 }
d38ceaf9
AD
160 break;
161 case TTM_PL_TT:
b453e42a 162 case AMDGPU_PL_PREEMPT:
d38ceaf9 163 default:
c704ab18 164 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
3b2de699 165 break;
d38ceaf9 166 }
765e7fbf 167 *placement = abo->placement;
d38ceaf9
AD
168}
169
f0ee63cb
CK
170/**
171 * amdgpu_ttm_map_buffer - Map memory into the GART windows
172 * @bo: buffer object to map
173 * @mem: memory object to map
be956c57 174 * @mm_cur: range to map
f0ee63cb
CK
175 * @window: which GART window to use
176 * @ring: DMA ring to use for the copy
177 * @tmz: if we should setup a TMZ enabled mapping
6927913d 178 * @size: in number of bytes to map, out number of bytes mapped
f0ee63cb
CK
179 * @addr: resulting address inside the MC address space
180 *
181 * Setup one of the GART windows to access a specific piece of memory or return
182 * the physical address for local memory.
183 */
184static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
2966141a 185 struct ttm_resource *mem,
be956c57 186 struct amdgpu_res_cursor *mm_cur,
6927913d
CK
187 unsigned window, struct amdgpu_ring *ring,
188 bool tmz, uint64_t *size, uint64_t *addr)
f0ee63cb 189{
f0ee63cb 190 struct amdgpu_device *adev = ring->adev;
6927913d 191 unsigned offset, num_pages, num_dw, num_bytes;
f0ee63cb 192 uint64_t src_addr, dst_addr;
6927913d 193 struct amdgpu_job *job;
95045783 194 void *cpu_addr;
f0ee63cb 195 uint64_t flags;
95045783 196 unsigned int i;
f0ee63cb
CK
197 int r;
198
199 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
200 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
e0a4459d
CK
201
202 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
203 return -EINVAL;
f0ee63cb
CK
204
205 /* Map only what can't be accessed directly */
95045783 206 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
be956c57
CK
207 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
208 mm_cur->start;
f0ee63cb
CK
209 return 0;
210 }
211
6927913d
CK
212
213 /*
214 * If start begins at an offset inside the page, then adjust the size
215 * and addr accordingly
216 */
217 offset = mm_cur->start & ~PAGE_MASK;
218
219 num_pages = PFN_UP(*size + offset);
220 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
221
222 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
223
f0ee63cb
CK
224 *addr = adev->gmc.gart_start;
225 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
226 AMDGPU_GPU_PAGE_SIZE;
6927913d 227 *addr += offset;
f0ee63cb
CK
228
229 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
d5375156 230 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
f0ee63cb 231
f7d66fb2
CK
232 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
233 AMDGPU_FENCE_OWNER_UNDEFINED,
234 num_dw * 4 + num_bytes,
9ecefb19 235 AMDGPU_IB_POOL_DELAYED, &job);
f0ee63cb
CK
236 if (r)
237 return r;
238
239 src_addr = num_dw * 4;
240 src_addr += job->ibs[0].gpu_addr;
241
242 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
243 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
244 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
245 dst_addr, num_bytes, false);
246
247 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
248 WARN_ON(job->ibs[0].length_dw > num_dw);
249
f0ee63cb
CK
250 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
251 if (tmz)
252 flags |= AMDGPU_PTE_TMZ;
253
95045783
CK
254 cpu_addr = &job->ibs[0].ptr[num_dw];
255
256 if (mem->mem_type == TTM_PL_TT) {
be956c57 257 dma_addr_t *dma_addr;
95045783 258
be956c57 259 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
1b08dfb8 260 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
95045783
CK
261 } else {
262 dma_addr_t dma_address;
263
be956c57 264 dma_address = mm_cur->start;
95045783
CK
265 dma_address += adev->vm_manager.vram_base_offset;
266
267 for (i = 0; i < num_pages; ++i) {
1b08dfb8
CK
268 amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
269 flags, cpu_addr);
95045783
CK
270 dma_address += PAGE_SIZE;
271 }
272 }
f0ee63cb 273
f7d66fb2
CK
274 dma_fence_put(amdgpu_job_submit(job));
275 return 0;
f0ee63cb
CK
276}
277
e1d51505 278/**
27aa4a69 279 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
effb97cc
CK
280 * @adev: amdgpu device
281 * @src: buffer/address where to read from
282 * @dst: buffer/address where to write to
283 * @size: number of bytes to copy
284 * @tmz: if a secure copy should be used
285 * @resv: resv object to sync to
286 * @f: Returns the last fence if multiple jobs are submitted.
1eca5a53
HK
287 *
288 * The function copies @size bytes from {src->mem + src->offset} to
289 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
290 * move and different for a BO to BO copy.
291 *
1eca5a53
HK
292 */
293int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
f0ee63cb
CK
294 const struct amdgpu_copy_mem *src,
295 const struct amdgpu_copy_mem *dst,
effb97cc 296 uint64_t size, bool tmz,
52791eee 297 struct dma_resv *resv,
1eca5a53 298 struct dma_fence **f)
8892f153 299{
8892f153 300 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
be956c57 301 struct amdgpu_res_cursor src_mm, dst_mm;
220196b3 302 struct dma_fence *fence = NULL;
1eca5a53 303 int r = 0;
8892f153 304
81988f9c 305 if (!adev->mman.buffer_funcs_enabled) {
d38ceaf9
AD
306 DRM_ERROR("Trying to move memory with ring turned off.\n");
307 return -EINVAL;
308 }
309
be956c57
CK
310 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
311 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
8892f153 312
abca90f1 313 mutex_lock(&adev->mman.gtt_window_lock);
be956c57 314 while (src_mm.remaining) {
6927913d 315 uint64_t from, to, cur_size;
220196b3 316 struct dma_fence *next;
8892f153 317
6927913d
CK
318 /* Never copy more than 256MiB at once to avoid a timeout */
319 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
f0ee63cb
CK
320
321 /* Map src to window 0 and dst to window 1. */
be956c57 322 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
6927913d 323 0, ring, tmz, &cur_size, &from);
f0ee63cb
CK
324 if (r)
325 goto error;
abca90f1 326
be956c57 327 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
6927913d 328 1, ring, tmz, &cur_size, &to);
f0ee63cb
CK
329 if (r)
330 goto error;
abca90f1 331
1eca5a53 332 r = amdgpu_copy_buffer(ring, from, to, cur_size,
effb97cc 333 resv, &next, false, true, tmz);
8892f153
CK
334 if (r)
335 goto error;
336
220196b3 337 dma_fence_put(fence);
8892f153
CK
338 fence = next;
339
be956c57
CK
340 amdgpu_res_next(&src_mm, cur_size);
341 amdgpu_res_next(&dst_mm, cur_size);
8892f153 342 }
1eca5a53 343error:
abca90f1 344 mutex_unlock(&adev->mman.gtt_window_lock);
1eca5a53
HK
345 if (f)
346 *f = dma_fence_get(fence);
347 dma_fence_put(fence);
348 return r;
349}
350
75501872 351/*
50da5174
TSD
352 * amdgpu_move_blit - Copy an entire buffer to another buffer
353 *
2e603d04
HR
354 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
355 * help move buffers to and from VRAM.
50da5174 356 */
1eca5a53 357static int amdgpu_move_blit(struct ttm_buffer_object *bo,
05010c1e 358 bool evict,
2966141a
DA
359 struct ttm_resource *new_mem,
360 struct ttm_resource *old_mem)
1eca5a53
HK
361{
362 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
effb97cc 363 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1eca5a53
HK
364 struct amdgpu_copy_mem src, dst;
365 struct dma_fence *fence = NULL;
366 int r;
367
368 src.bo = bo;
369 dst.bo = bo;
370 src.mem = old_mem;
371 dst.mem = new_mem;
372 src.offset = 0;
373 dst.offset = 0;
374
375 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
e3c92eb4 376 new_mem->size,
effb97cc 377 amdgpu_bo_encrypted(abo),
5a5011a7 378 bo->base.resv, &fence);
1eca5a53
HK
379 if (r)
380 goto error;
ce64bc25 381
ab2f7a5c
FK
382 /* clear the space being freed */
383 if (old_mem->mem_type == TTM_PL_VRAM &&
effb97cc 384 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
ab2f7a5c
FK
385 struct dma_fence *wipe_fence = NULL;
386
22f7cc75 387 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
ab2f7a5c
FK
388 if (r) {
389 goto error;
390 } else if (wipe_fence) {
391 dma_fence_put(fence);
392 fence = wipe_fence;
393 }
394 }
395
4947b2f2
CK
396 /* Always block for VM page tables before committing the new location */
397 if (bo->type == ttm_bo_type_kernel)
e46f468f 398 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
4947b2f2 399 else
e46f468f 400 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
f54d1867 401 dma_fence_put(fence);
d38ceaf9 402 return r;
8892f153
CK
403
404error:
405 if (fence)
220196b3
DA
406 dma_fence_wait(fence, false);
407 dma_fence_put(fence);
8892f153 408 return r;
d38ceaf9
AD
409}
410
75501872 411/*
67adb569
FK
412 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
413 *
414 * Called by amdgpu_bo_move()
415 */
416static bool amdgpu_mem_visible(struct amdgpu_device *adev,
2966141a 417 struct ttm_resource *mem)
67adb569 418{
e3c92eb4 419 u64 mem_size = (u64)mem->size;
755eadf6 420 struct amdgpu_res_cursor cursor;
312b4dc1 421 u64 end;
67adb569
FK
422
423 if (mem->mem_type == TTM_PL_SYSTEM ||
424 mem->mem_type == TTM_PL_TT)
425 return true;
426 if (mem->mem_type != TTM_PL_VRAM)
427 return false;
428
755eadf6 429 amdgpu_res_first(mem, 0, mem_size, &cursor);
312b4dc1
APS
430 end = cursor.start + cursor.size;
431 while (cursor.remaining) {
432 amdgpu_res_next(&cursor, cursor.size);
755eadf6 433
8273b404
APS
434 if (!cursor.remaining)
435 break;
436
312b4dc1
APS
437 /* ttm_resource_ioremap only supports contiguous memory */
438 if (end != cursor.start)
439 return false;
440
441 end = cursor.start + cursor.size;
442 }
67adb569 443
312b4dc1 444 return end <= adev->gmc.visible_vram_size;
67adb569
FK
445}
446
75501872 447/*
50da5174
TSD
448 * amdgpu_bo_move - Move a buffer object to a new memory location
449 *
450 * Called by ttm_bo_handle_move_mem()
451 */
2823f4f0
CK
452static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
453 struct ttm_operation_ctx *ctx,
ebdf5651
DA
454 struct ttm_resource *new_mem,
455 struct ttm_place *hop)
d38ceaf9
AD
456{
457 struct amdgpu_device *adev;
104ece97 458 struct amdgpu_bo *abo;
d3116756 459 struct ttm_resource *old_mem = bo->resource;
d38ceaf9
AD
460 int r;
461
b453e42a
FK
462 if (new_mem->mem_type == TTM_PL_TT ||
463 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
bfe5e585
DA
464 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
465 if (r)
466 return r;
467 }
468
104ece97 469 /* Can't move a pinned BO */
b82485fd 470 abo = ttm_to_amdgpu_bo(bo);
4671078e 471 if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
104ece97
MD
472 return -EINVAL;
473
a7d64de6 474 adev = amdgpu_ttm_adev(bo->bdev);
dbd5ed60 475
63af82cf
CK
476 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
477 bo->ttm == NULL)) {
ecfe6953 478 ttm_bo_move_null(bo, new_mem);
aefec409 479 goto out;
d38ceaf9 480 }
3a08446b 481 if (old_mem->mem_type == TTM_PL_SYSTEM &&
b453e42a
FK
482 (new_mem->mem_type == TTM_PL_TT ||
483 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
ecfe6953 484 ttm_bo_move_null(bo, new_mem);
aefec409 485 goto out;
d38ceaf9 486 }
b453e42a
FK
487 if ((old_mem->mem_type == TTM_PL_TT ||
488 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
c37d951c 489 new_mem->mem_type == TTM_PL_SYSTEM) {
29a1d482 490 r = ttm_bo_wait_ctx(bo, ctx);
c37d951c 491 if (r)
aefec409 492 return r;
29a1d482
DA
493
494 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
bfa3357e 495 ttm_resource_free(bo, &bo->resource);
c37d951c 496 ttm_bo_assign_mem(bo, new_mem);
aefec409 497 goto out;
c37d951c 498 }
3a08446b 499
3b2de699
CK
500 if (old_mem->mem_type == AMDGPU_PL_GDS ||
501 old_mem->mem_type == AMDGPU_PL_GWS ||
502 old_mem->mem_type == AMDGPU_PL_OA ||
503 new_mem->mem_type == AMDGPU_PL_GDS ||
504 new_mem->mem_type == AMDGPU_PL_GWS ||
505 new_mem->mem_type == AMDGPU_PL_OA) {
506 /* Nothing to save here */
ecfe6953 507 ttm_bo_move_null(bo, new_mem);
aefec409 508 goto out;
3b2de699 509 }
81988f9c 510
c92db8d6
CK
511 if (bo->type == ttm_bo_type_device &&
512 new_mem->mem_type == TTM_PL_VRAM &&
513 old_mem->mem_type != TTM_PL_VRAM) {
514 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
515 * accesses the BO after it's moved.
516 */
517 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
518 }
519
aefec409
CK
520 if (adev->mman.buffer_funcs_enabled) {
521 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
522 new_mem->mem_type == TTM_PL_VRAM) ||
523 (old_mem->mem_type == TTM_PL_VRAM &&
524 new_mem->mem_type == TTM_PL_SYSTEM))) {
525 hop->fpfn = 0;
526 hop->lpfn = 0;
527 hop->mem_type = TTM_PL_TT;
3e640f1b 528 hop->flags = TTM_PL_FLAG_TEMPORARY;
aefec409
CK
529 return -EMULTIHOP;
530 }
531
532 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
533 } else {
67adb569 534 r = -ENODEV;
67adb569 535 }
d38ceaf9 536
d38ceaf9 537 if (r) {
67adb569
FK
538 /* Check that all memory is CPU accessible */
539 if (!amdgpu_mem_visible(adev, old_mem) ||
540 !amdgpu_mem_visible(adev, new_mem)) {
541 pr_err("Move buffer fallback to memcpy unavailable\n");
aefec409 542 return r;
d38ceaf9 543 }
67adb569
FK
544
545 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
546 if (r)
aefec409 547 return r;
d38ceaf9
AD
548 }
549
aefec409 550out:
d38ceaf9 551 /* update statistics */
e11bfb99 552 atomic64_add(bo->base.size, &adev->num_bytes_moved);
aefec409 553 amdgpu_bo_move_notify(bo, evict, new_mem);
d38ceaf9
AD
554 return 0;
555}
556
75501872 557/*
50da5174
TSD
558 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
559 *
560 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
561 */
dfffdf5e
CK
562static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
563 struct ttm_resource *mem)
d38ceaf9 564{
a7d64de6 565 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
e3c92eb4 566 size_t bus_size = (size_t)mem->size;
d38ceaf9 567
d38ceaf9
AD
568 switch (mem->mem_type) {
569 case TTM_PL_SYSTEM:
570 /* system memory */
571 return 0;
572 case TTM_PL_TT:
b453e42a 573 case AMDGPU_PL_PREEMPT:
d38ceaf9
AD
574 break;
575 case TTM_PL_VRAM:
576 mem->bus.offset = mem->start << PAGE_SHIFT;
577 /* check if it's visible */
ebb21aa1 578 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
d38ceaf9 579 return -EINVAL;
dfffdf5e 580
f8f4b9a6 581 if (adev->mman.aper_base_kaddr &&
dfffdf5e 582 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
f8f4b9a6
AL
583 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
584 mem->bus.offset;
585
54d04ea8 586 mem->bus.offset += adev->gmc.aper_base;
d38ceaf9 587 mem->bus.is_iomem = true;
d38ceaf9
AD
588 break;
589 default:
590 return -EINVAL;
591 }
592 return 0;
593}
594
9bbdcc0f
CK
595static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
596 unsigned long page_offset)
597{
54d04ea8 598 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
10ebcd95 599 struct amdgpu_res_cursor cursor;
9bbdcc0f 600
d3116756
CK
601 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
602 &cursor);
10ebcd95 603 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
9bbdcc0f
CK
604}
605
b1a8ef95
ND
606/**
607 * amdgpu_ttm_domain_start - Returns GPU start address
608 * @adev: amdgpu device object
609 * @type: type of the memory
610 *
611 * Returns:
612 * GPU start address of a memory domain
613 */
614
615uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
616{
617 switch (type) {
618 case TTM_PL_TT:
619 return adev->gmc.gart_start;
620 case TTM_PL_VRAM:
621 return adev->gmc.vram_start;
622 }
623
624 return 0;
625}
626
d38ceaf9
AD
627/*
628 * TTM backend functions.
629 */
630struct amdgpu_ttm_tt {
e34b8fee 631 struct ttm_tt ttm;
a3941471 632 struct drm_gem_object *gobj;
637dd3b5
CK
633 u64 offset;
634 uint64_t userptr;
0919195f 635 struct task_struct *usertask;
637dd3b5 636 uint32_t userflags;
0b988ca1 637 bool bound;
d38ceaf9
AD
638};
639
c4c10a68
RB
640#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
641
81fa1af3 642#ifdef CONFIG_DRM_AMDGPU_USERPTR
75501872 643/*
899fbde1
PY
644 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
645 * memory and start HMM tracking CPU page table update
50da5174 646 *
899fbde1
PY
647 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
648 * once afterwards to stop HMM tracking
50da5174 649 */
fec8fdb5
CK
650int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
651 struct hmm_range **range)
d38ceaf9 652{
e5eaa7cc 653 struct ttm_tt *ttm = bo->tbo.ttm;
c4c10a68 654 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
6826cb3b 655 unsigned long start = gtt->userptr;
66c45500 656 struct vm_area_struct *vma;
81fa1af3 657 struct mm_struct *mm;
04d8d73d 658 bool readonly;
1986a3b0 659 int r = 0;
d38ceaf9 660
fec8fdb5
CK
661 /* Make sure get_user_pages_done() can cleanup gracefully */
662 *range = NULL;
663
81fa1af3
JG
664 mm = bo->notifier.mm;
665 if (unlikely(!mm)) {
666 DRM_DEBUG_DRIVER("BO is not registered?\n");
a9ae8731 667 return -EFAULT;
e5eaa7cc 668 }
5aeaccca 669
a9ae8731 670 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
0919195f
FK
671 return -ESRCH;
672
d8ed45c5 673 mmap_read_lock(mm);
da68547d
LH
674 vma = vma_lookup(mm, start);
675 if (unlikely(!vma)) {
66c45500 676 r = -EFAULT;
a9ae8731 677 goto out_unlock;
66c45500 678 }
6826cb3b 679 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
66c45500 680 vma->vm_file)) {
899fbde1 681 r = -EPERM;
a9ae8731 682 goto out_unlock;
6826cb3b 683 }
2f568dbd 684
04d8d73d 685 readonly = amdgpu_ttm_tt_is_readonly(ttm);
d4cbff46
CK
686 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
687 readonly, NULL, pages, range);
a9ae8731 688out_unlock:
d8ed45c5 689 mmap_read_unlock(mm);
3b8a23ae
PY
690 if (r)
691 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
692
a9ae8731 693 mmput(mm);
915d3eec 694
2f568dbd
CK
695 return r;
696}
697
f95f51a4
FK
698/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
699 */
700void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
701 struct hmm_range *range)
702{
703 struct amdgpu_ttm_tt *gtt = (void *)ttm;
704
705 if (gtt && gtt->userptr && range)
706 amdgpu_hmm_range_get_pages_done(range);
707}
708
75501872 709/*
f95f51a4 710 * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
899fbde1 711 * Check if the pages backing this ttm range have been invalidated
50da5174 712 *
899fbde1 713 * Returns: true if pages are still valid
50da5174 714 */
fec8fdb5
CK
715bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
716 struct hmm_range *range)
aca81718 717{
c4c10a68 718 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
aca81718 719
fec8fdb5 720 if (!gtt || !gtt->userptr || !range)
899fbde1 721 return false;
318c3f4b 722
230c079f 723 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
66c45500 724 gtt->userptr, ttm->num_pages);
6826cb3b 725
fec8fdb5 726 WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
2f568dbd 727
fec8fdb5 728 return !amdgpu_hmm_range_get_pages_done(range);
aca81718 729}
ad595b86 730#endif
aca81718 731
75501872 732/*
2e603d04 733 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
8944042d 734 *
2e603d04 735 * Called by amdgpu_cs_list_validate(). This creates the page list
50da5174
TSD
736 * that backs user memory and will ultimately be mapped into the device
737 * address space.
8944042d 738 */
a216ab09 739void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
8944042d 740{
1986a3b0 741 unsigned long i;
8944042d 742
899fbde1 743 for (i = 0; i < ttm->num_pages; ++i)
a216ab09 744 ttm->pages[i] = pages ? pages[i] : NULL;
8944042d
AD
745}
746
75501872 747/*
f3729f7b 748 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
50da5174
TSD
749 *
750 * Called by amdgpu_ttm_backend_bind()
751 **/
8af8a109 752static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
0a667b50 753 struct ttm_tt *ttm)
2f568dbd 754{
0a667b50 755 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
c4c10a68 756 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
2f568dbd
CK
757 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
758 enum dma_data_direction direction = write ?
759 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
9973de10 760 int r;
2f568dbd 761
50da5174 762 /* Allocate an SG array and squash pages into it */
d38ceaf9 763 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
b16e6857 764 (u64)ttm->num_pages << PAGE_SHIFT,
d38ceaf9
AD
765 GFP_KERNEL);
766 if (r)
767 goto release_sg;
768
50da5174 769 /* Map SG to device */
39913934
MS
770 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
771 if (r)
d38ceaf9
AD
772 goto release_sg;
773
50da5174 774 /* convert SG to linear array of pages and dma addresses */
c67e6279
CK
775 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
776 ttm->num_pages);
d38ceaf9
AD
777
778 return 0;
779
780release_sg:
781 kfree(ttm->sg);
c8e74b17 782 ttm->sg = NULL;
d38ceaf9
AD
783 return r;
784}
785
75501872 786/*
50da5174
TSD
787 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
788 */
8af8a109 789static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
0a667b50 790 struct ttm_tt *ttm)
d38ceaf9 791{
0a667b50 792 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
c4c10a68 793 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
d38ceaf9
AD
794 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
795 enum dma_data_direction direction = write ?
796 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
797
798 /* double check that we don't free the table twice */
3c3dc654 799 if (!ttm->sg || !ttm->sg->sgl)
d38ceaf9
AD
800 return;
801
50da5174 802 /* unmap the pages mapped to the device */
39913934 803 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
318c3f4b 804 sg_free_table(ttm->sg);
d38ceaf9
AD
805}
806
1b08dfb8
CK
807static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
808 struct ttm_buffer_object *tbo,
809 uint64_t flags)
959a2091
YZ
810{
811 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
812 struct ttm_tt *ttm = tbo->ttm;
c4c10a68 813 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
959a2091 814
bffc8c5c
CK
815 if (amdgpu_bo_encrypted(abo))
816 flags |= AMDGPU_PTE_TMZ;
817
fa5bde80 818 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
959a2091
YZ
819 uint64_t page_idx = 1;
820
1b08dfb8
CK
821 amdgpu_gart_bind(adev, gtt->offset, page_idx,
822 gtt->ttm.dma_address, flags);
959a2091 823
fa5bde80
YZ
824 /* The memory type of the first page defaults to UC. Now
825 * modify the memory type to NC from the second page of
826 * the BO onward.
827 */
7596ab68
HZ
828 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
829 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
959a2091 830
1b08dfb8
CK
831 amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
832 ttm->num_pages - page_idx,
833 &(gtt->ttm.dma_address[page_idx]), flags);
959a2091 834 } else {
1b08dfb8
CK
835 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
836 gtt->ttm.dma_address, flags);
959a2091 837 }
959a2091
YZ
838}
839
75501872 840/*
50da5174
TSD
841 * amdgpu_ttm_backend_bind - Bind GTT memory
842 *
843 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
844 * This handles binding GTT memory to the device address space.
845 */
8af8a109 846static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
0a667b50 847 struct ttm_tt *ttm,
2966141a 848 struct ttm_resource *bo_mem)
d38ceaf9 849{
0a667b50 850 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
c4c10a68 851 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
ac7afe6b 852 uint64_t flags;
1b08dfb8 853 int r;
d38ceaf9 854
0b988ca1
DA
855 if (!bo_mem)
856 return -EINVAL;
857
858 if (gtt->bound)
859 return 0;
860
e2f784fa 861 if (gtt->userptr) {
0a667b50 862 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
e2f784fa
CZ
863 if (r) {
864 DRM_ERROR("failed to pin userptr\n");
865 return r;
866 }
43d46f0b 867 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
e552ee40
FK
868 if (!ttm->sg) {
869 struct dma_buf_attachment *attach;
870 struct sg_table *sgt;
871
872 attach = gtt->gobj->import_attach;
873 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
874 if (IS_ERR(sgt))
875 return PTR_ERR(sgt);
876
877 ttm->sg = sgt;
878 }
879
880 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
881 ttm->num_pages);
e2f784fa 882 }
e552ee40 883
d38ceaf9 884 if (!ttm->num_pages) {
230c079f 885 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
d38ceaf9
AD
886 ttm->num_pages, bo_mem, ttm);
887 }
888
ba2472ea
ND
889 if (bo_mem->mem_type != TTM_PL_TT ||
890 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
3da917b6 891 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
ac7afe6b 892 return 0;
3da917b6 893 }
ac7afe6b 894
50da5174 895 /* compute PTE flags relevant to this BO memory */
d9a13766 896 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
50da5174
TSD
897
898 /* bind pages into GART page tables */
0957dc70 899 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1b08dfb8
CK
900 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
901 gtt->ttm.dma_address, flags);
0b988ca1 902 gtt->bound = true;
1b08dfb8 903 return 0;
c855e250
CK
904}
905
75501872 906/*
91b59005
OZ
907 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
908 * through AGP or GART aperture.
909 *
910 * If bo is accessible through AGP aperture, then use AGP aperture
911 * to access bo; otherwise allocate logical space in GART aperture
912 * and map bo to GART aperture.
50da5174 913 */
c5835bbb 914int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
c855e250 915{
1d00402b 916 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
c13c55d6 917 struct ttm_operation_ctx ctx = { false, false };
c4c10a68 918 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1d00402b
CK
919 struct ttm_placement placement;
920 struct ttm_place placements;
bfa3357e 921 struct ttm_resource *tmp;
485fc361 922 uint64_t addr, flags;
c855e250
CK
923 int r;
924
d3116756 925 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
c855e250
CK
926 return 0;
927
485fc361
CK
928 addr = amdgpu_gmc_agp_addr(bo);
929 if (addr != AMDGPU_BO_INVALID_OFFSET) {
d3116756 930 bo->resource->start = addr >> PAGE_SHIFT;
bfa3357e
CK
931 return 0;
932 }
1d00402b 933
bfa3357e
CK
934 /* allocate GART space */
935 placement.num_placement = 1;
936 placement.placement = &placements;
937 placement.num_busy_placement = 1;
938 placement.busy_placement = &placements;
939 placements.fpfn = 0;
940 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
941 placements.mem_type = TTM_PL_TT;
942 placements.flags = bo->resource->placement;
bb990bb0 943
bfa3357e
CK
944 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
945 if (unlikely(r))
946 return r;
50da5174 947
bfa3357e
CK
948 /* compute PTE flags for this buffer object */
949 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
485fc361 950
bfa3357e
CK
951 /* Bind pages */
952 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
1b08dfb8 953 amdgpu_ttm_gart_bind(adev, bo, flags);
09b020bb 954 amdgpu_gart_invalidate_tlb(adev);
bfa3357e
CK
955 ttm_resource_free(bo, &bo->resource);
956 ttm_bo_assign_mem(bo, tmp);
957
40575732 958 return 0;
d38ceaf9
AD
959}
960
75501872 961/*
50da5174
TSD
962 * amdgpu_ttm_recover_gart - Rebind GTT pages
963 *
964 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
965 * rebind GTT pages during a GPU reset.
966 */
1b08dfb8 967void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
2c0d7318 968{
c1c7ce8f 969 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1d1a2cd5 970 uint64_t flags;
2c0d7318 971
959a2091 972 if (!tbo->ttm)
1b08dfb8 973 return;
c1c7ce8f 974
d3116756 975 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1b08dfb8 976 amdgpu_ttm_gart_bind(adev, tbo, flags);
2c0d7318
CZ
977}
978
75501872 979/*
50da5174
TSD
980 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
981 *
982 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
983 * ttm_tt_destroy().
984 */
8af8a109 985static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
0a667b50 986 struct ttm_tt *ttm)
d38ceaf9 987{
0a667b50 988 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
c4c10a68 989 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
d38ceaf9 990
50da5174 991 /* if the pages have userptr pinning then clear that first */
e552ee40 992 if (gtt->userptr) {
0a667b50 993 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
e552ee40
FK
994 } else if (ttm->sg && gtt->gobj->import_attach) {
995 struct dma_buf_attachment *attach;
996
997 attach = gtt->gobj->import_attach;
998 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
999 ttm->sg = NULL;
1000 }
85a4b579 1001
0f6f9dd4
DG
1002 if (!gtt->bound)
1003 return;
1004
3da917b6 1005 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
08bb88cf 1006 return;
78ab0a38 1007
d38ceaf9 1008 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1b08dfb8 1009 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
0b988ca1 1010 gtt->bound = false;
d38ceaf9
AD
1011}
1012
8af8a109 1013static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
0a667b50 1014 struct ttm_tt *ttm)
d38ceaf9 1015{
c4c10a68 1016 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
d38ceaf9 1017
0919195f
FK
1018 if (gtt->usertask)
1019 put_task_struct(gtt->usertask);
1020
e34b8fee 1021 ttm_tt_fini(&gtt->ttm);
d38ceaf9
AD
1022 kfree(gtt);
1023}
1024
50da5174
TSD
1025/**
1026 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1027 *
1028 * @bo: The buffer object to create a GTT ttm_tt object around
6abc3f97 1029 * @page_flags: Page flags to be added to the ttm_tt object
50da5174
TSD
1030 *
1031 * Called by ttm_tt_create().
1032 */
dde5da23
CK
1033static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1034 uint32_t page_flags)
d38ceaf9 1035{
1b4ea4c5 1036 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
d38ceaf9 1037 struct amdgpu_ttm_tt *gtt;
1b4ea4c5 1038 enum ttm_caching caching;
d38ceaf9 1039
d38ceaf9
AD
1040 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1041 if (gtt == NULL) {
1042 return NULL;
1043 }
a3941471 1044 gtt->gobj = &bo->base;
50da5174 1045
1b4ea4c5
CK
1046 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1047 caching = ttm_write_combined;
1048 else
1049 caching = ttm_cached;
1050
50da5174 1051 /* allocate space for the uninitialized page entries */
1b4ea4c5 1052 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
d38ceaf9
AD
1053 kfree(gtt);
1054 return NULL;
1055 }
e34b8fee 1056 return &gtt->ttm;
d38ceaf9
AD
1057}
1058
75501872 1059/*
50da5174
TSD
1060 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1061 *
1062 * Map the pages of a ttm_tt object to an address space visible
1063 * to the underlying device.
1064 */
8af8a109 1065static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
0a667b50
DA
1066 struct ttm_tt *ttm,
1067 struct ttm_operation_ctx *ctx)
d38ceaf9 1068{
0a667b50 1069 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
c4c10a68 1070 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
21856e1e
MA
1071 pgoff_t i;
1072 int ret;
d38ceaf9 1073
50da5174 1074 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
a204ea8c 1075 if (gtt->userptr) {
5f0b34cc 1076 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
d38ceaf9
AD
1077 if (!ttm->sg)
1078 return -ENOMEM;
d38ceaf9
AD
1079 return 0;
1080 }
1081
43d46f0b 1082 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
79ba2800 1083 return 0;
d38ceaf9 1084
21856e1e
MA
1085 ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1086 if (ret)
1087 return ret;
1088
1089 for (i = 0; i < ttm->num_pages; ++i)
1090 ttm->pages[i]->mapping = bdev->dev_mapping;
1091
1092 return 0;
d38ceaf9
AD
1093}
1094
75501872 1095/*
50da5174
TSD
1096 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1097 *
1098 * Unmaps pages of a ttm_tt object from the device address space and
1099 * unpopulates the page array backing it.
1100 */
8af8a109 1101static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
e93b2da9 1102 struct ttm_tt *ttm)
d38ceaf9 1103{
c4c10a68 1104 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
a3941471 1105 struct amdgpu_device *adev;
21856e1e 1106 pgoff_t i;
d38ceaf9 1107
b7e8b086
CK
1108 amdgpu_ttm_backend_unbind(bdev, ttm);
1109
a204ea8c 1110 if (gtt->userptr) {
a216ab09 1111 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
d38ceaf9 1112 kfree(ttm->sg);
1e5c3738 1113 ttm->sg = NULL;
a3941471
CK
1114 return;
1115 }
1116
43d46f0b 1117 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
d38ceaf9
AD
1118 return;
1119
21856e1e
MA
1120 for (i = 0; i < ttm->num_pages; ++i)
1121 ttm->pages[i]->mapping = NULL;
1122
0a667b50 1123 adev = amdgpu_ttm_adev(bdev);
e93b2da9 1124 return ttm_pool_free(&adev->mman.bdev.pool, ttm);
d38ceaf9
AD
1125}
1126
5ccbb057
RB
1127/**
1128 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1129 * task
1130 *
1131 * @tbo: The ttm_buffer_object that contains the userptr
1132 * @user_addr: The returned value
1133 */
1134int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1135 uint64_t *user_addr)
1136{
1137 struct amdgpu_ttm_tt *gtt;
1138
1139 if (!tbo->ttm)
1140 return -EINVAL;
1141
1142 gtt = (void *)tbo->ttm;
1143 *user_addr = gtt->userptr;
1144 return 0;
1145}
1146
50da5174 1147/**
2e603d04
HR
1148 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1149 * task
50da5174 1150 *
77f47d23 1151 * @bo: The ttm_buffer_object to bind this userptr to
50da5174
TSD
1152 * @addr: The address in the current tasks VM space to use
1153 * @flags: Requirements of userptr object.
1154 *
adf65dff
RB
1155 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1156 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1157 * initialize GPU VM for a KFD process.
50da5174 1158 */
77f47d23
CK
1159int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1160 uint64_t addr, uint32_t flags)
d38ceaf9 1161{
77f47d23 1162 struct amdgpu_ttm_tt *gtt;
d38ceaf9 1163
77f47d23
CK
1164 if (!bo->ttm) {
1165 /* TODO: We want a separate TTM object type for userptrs */
1166 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1167 if (bo->ttm == NULL)
1168 return -ENOMEM;
1169 }
d38ceaf9 1170
43d46f0b
MA
1171 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1172 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
84408d5f 1173
c4c10a68 1174 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
d38ceaf9 1175 gtt->userptr = addr;
d38ceaf9 1176 gtt->userflags = flags;
0919195f
FK
1177
1178 if (gtt->usertask)
1179 put_task_struct(gtt->usertask);
1180 gtt->usertask = current->group_leader;
1181 get_task_struct(gtt->usertask);
1182
d38ceaf9
AD
1183 return 0;
1184}
1185
75501872 1186/*
50da5174
TSD
1187 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1188 */
cc325d19 1189struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
d38ceaf9 1190{
c4c10a68 1191 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
d38ceaf9
AD
1192
1193 if (gtt == NULL)
cc325d19 1194 return NULL;
d38ceaf9 1195
0919195f
FK
1196 if (gtt->usertask == NULL)
1197 return NULL;
1198
1199 return gtt->usertask->mm;
d38ceaf9
AD
1200}
1201
75501872 1202/*
2e603d04
HR
1203 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1204 * address range for the current task.
50da5174
TSD
1205 *
1206 */
cc1de6e8 1207bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
43fc10c1 1208 unsigned long end, unsigned long *userptr)
cc1de6e8 1209{
c4c10a68 1210 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
cc1de6e8
CK
1211 unsigned long size;
1212
637dd3b5 1213 if (gtt == NULL || !gtt->userptr)
cc1de6e8
CK
1214 return false;
1215
50da5174
TSD
1216 /* Return false if no part of the ttm_tt object lies within
1217 * the range
1218 */
e34b8fee 1219 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
cc1de6e8
CK
1220 if (gtt->userptr > end || gtt->userptr + size <= start)
1221 return false;
1222
43fc10c1
PY
1223 if (userptr)
1224 *userptr = gtt->userptr;
cc1de6e8
CK
1225 return true;
1226}
1227
75501872 1228/*
899fbde1 1229 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
318c3f4b 1230 */
899fbde1 1231bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
ca666a3c 1232{
c4c10a68 1233 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
ca666a3c
CK
1234
1235 if (gtt == NULL || !gtt->userptr)
1236 return false;
1237
899fbde1 1238 return true;
ca666a3c
CK
1239}
1240
75501872 1241/*
50da5174
TSD
1242 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1243 */
d38ceaf9
AD
1244bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1245{
c4c10a68 1246 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
d38ceaf9
AD
1247
1248 if (gtt == NULL)
1249 return false;
1250
1251 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1252}
1253
50da5174 1254/**
24a8d289 1255 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
50da5174
TSD
1256 *
1257 * @ttm: The ttm_tt object to compute the flags for
1258 * @mem: The memory registry backing this ttm_tt object
24a8d289
CK
1259 *
1260 * Figure out the flags to use for a VM PDE (Page Directory Entry).
50da5174 1261 */
2966141a 1262uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
d38ceaf9 1263{
6b777607 1264 uint64_t flags = 0;
d38ceaf9
AD
1265
1266 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1267 flags |= AMDGPU_PTE_VALID;
1268
b453e42a
FK
1269 if (mem && (mem->mem_type == TTM_PL_TT ||
1270 mem->mem_type == AMDGPU_PL_PREEMPT)) {
d38ceaf9
AD
1271 flags |= AMDGPU_PTE_SYSTEM;
1272
1b4ea4c5 1273 if (ttm->caching == ttm_cached)
6d99905a
CK
1274 flags |= AMDGPU_PTE_SNOOPED;
1275 }
d38ceaf9 1276
2e2f197f
EH
1277 if (mem && mem->mem_type == TTM_PL_VRAM &&
1278 mem->bus.caching == ttm_cached)
1279 flags |= AMDGPU_PTE_SNOOPED;
1280
24a8d289
CK
1281 return flags;
1282}
1283
1284/**
1285 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1286 *
75501872 1287 * @adev: amdgpu_device pointer
24a8d289
CK
1288 * @ttm: The ttm_tt object to compute the flags for
1289 * @mem: The memory registry backing this ttm_tt object
75501872 1290 *
24a8d289
CK
1291 * Figure out the flags to use for a VM PTE (Page Table Entry).
1292 */
1293uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2966141a 1294 struct ttm_resource *mem)
24a8d289
CK
1295{
1296 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1297
4b98e0c4 1298 flags |= adev->gart.gart_pte_flags;
d38ceaf9
AD
1299 flags |= AMDGPU_PTE_READABLE;
1300
1301 if (!amdgpu_ttm_tt_is_readonly(ttm))
1302 flags |= AMDGPU_PTE_WRITEABLE;
1303
1304 return flags;
1305}
1306
75501872 1307/*
2e603d04
HR
1308 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1309 * object.
50da5174 1310 *
2e603d04
HR
1311 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1312 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1313 * it can find space for a new object and by ttm_bo_force_list_clean() which is
50da5174
TSD
1314 * used to clean out a memory space.
1315 */
9982ca68
CK
1316static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1317 const struct ttm_place *place)
1318{
25b8a14e 1319 struct dma_resv_iter resv_cursor;
d8d019cc 1320 struct dma_fence *f;
d8d019cc 1321
6d3c900c
APS
1322 if (!amdgpu_bo_is_amdgpu_bo(bo))
1323 return ttm_bo_eviction_valuable(bo, place);
1324
abb50d67
TH
1325 /* Swapout? */
1326 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1327 return true;
1328
1bd4e4ca 1329 if (bo->type == ttm_bo_type_kernel &&
6ceeb144 1330 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1bd4e4ca
CK
1331 return false;
1332
d8d019cc
FK
1333 /* If bo is a KFD BO, check if the bo belongs to the current process.
1334 * If true, then return false as any KFD process needs all its BOs to
1335 * be resident to run successfully
1336 */
7bc80a54 1337 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
0cc848a7 1338 DMA_RESV_USAGE_BOOKKEEP, f) {
25b8a14e
CK
1339 if (amdkfd_fence_check_mm(f, current->mm))
1340 return false;
d8d019cc 1341 }
9982ca68 1342
6d3c900c
APS
1343 /* Preemptible BOs don't own system resources managed by the
1344 * driver (pages, VRAM, GART space). They point to resources
1345 * owned by someone else (e.g. pageable memory in user mode
1346 * or a DMABuf). They are used in a preemptible context so we
1347 * can guarantee no deadlocks and good QoS in case of MMU
1348 * notifiers or DMABuf move notifiers from the resource owner.
1349 */
1350 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
b453e42a 1351 return false;
9982ca68 1352
6d3c900c
APS
1353 if (bo->resource->mem_type == TTM_PL_TT &&
1354 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
7da2e3e0 1355 return false;
9982ca68 1356
9982ca68
CK
1357 return ttm_bo_eviction_valuable(bo, place);
1358}
1359
03373e2b
KW
1360static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1361 void *buf, size_t size, bool write)
1362{
1363 while (size) {
1364 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1365 uint64_t bytes = 4 - (pos & 0x3);
1366 uint32_t shift = (pos & 0x3) * 8;
1367 uint32_t mask = 0xffffffff << shift;
1368 uint32_t value = 0;
1369
1370 if (size < bytes) {
1371 mask &= 0xffffffff >> (bytes - size) * 8;
1372 bytes = size;
1373 }
1374
1375 if (mask != 0xffffffff) {
1376 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1377 if (write) {
1378 value &= ~mask;
1379 value |= (*(uint32_t *)buf << shift) & mask;
1380 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1381 } else {
1382 value = (value & mask) >> shift;
1383 memcpy(buf, &value, bytes);
1384 }
1385 } else {
1386 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1387 }
1388
1389 pos += bytes;
1390 buf += bytes;
1391 size -= bytes;
1392 }
1393}
1394
cb5cc4f5 1395static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
f7d66fb2
CK
1396 unsigned long offset, void *buf,
1397 int len, int write)
cb5cc4f5
JK
1398{
1399 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1400 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
590e86fe 1401 struct amdgpu_res_cursor src_mm;
cb5cc4f5
JK
1402 struct amdgpu_job *job;
1403 struct dma_fence *fence;
1404 uint64_t src_addr, dst_addr;
1405 unsigned int num_dw;
1406 int r, idx;
1407
1408 if (len != PAGE_SIZE)
1409 return -EINVAL;
1410
1411 if (!adev->mman.sdma_access_ptr)
1412 return -EACCES;
1413
590e86fe
JK
1414 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1415 return -ENODEV;
cb5cc4f5
JK
1416
1417 if (write)
1418 memcpy(adev->mman.sdma_access_ptr, buf, len);
1419
1420 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
f7d66fb2
CK
1421 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
1422 AMDGPU_FENCE_OWNER_UNDEFINED,
1423 num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1424 &job);
cb5cc4f5
JK
1425 if (r)
1426 goto out;
1427
590e86fe 1428 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
f7d66fb2
CK
1429 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1430 src_mm.start;
400ef298
JK
1431 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1432 if (write)
1433 swap(src_addr, dst_addr);
1434
f7d66fb2
CK
1435 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1436 PAGE_SIZE, false);
cb5cc4f5
JK
1437
1438 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
1439 WARN_ON(job->ibs[0].length_dw > num_dw);
1440
f7d66fb2 1441 fence = amdgpu_job_submit(job);
cb5cc4f5
JK
1442
1443 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1444 r = -ETIMEDOUT;
1445 dma_fence_put(fence);
1446
1447 if (!(r || write))
1448 memcpy(buf, adev->mman.sdma_access_ptr, len);
1449out:
1450 drm_dev_exit(idx);
1451 return r;
1452}
1453
50da5174 1454/**
2e603d04 1455 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
50da5174
TSD
1456 *
1457 * @bo: The buffer object to read/write
1458 * @offset: Offset into buffer object
1459 * @buf: Secondary buffer to write/read from
1460 * @len: Length in bytes of access
1461 * @write: true if writing
1462 *
1463 * This is used to access VRAM that backs a buffer object via MMIO
1464 * access for debugging purposes.
1465 */
e342610c 1466static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
498ad8ec
CK
1467 unsigned long offset, void *buf, int len,
1468 int write)
e342610c 1469{
b82485fd 1470 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
e342610c 1471 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
498ad8ec 1472 struct amdgpu_res_cursor cursor;
e342610c 1473 int ret = 0;
e342610c 1474
d3116756 1475 if (bo->resource->mem_type != TTM_PL_VRAM)
e342610c
FK
1476 return -EIO;
1477
400ef298 1478 if (amdgpu_device_has_timeouts_enabled(adev) &&
cb5cc4f5
JK
1479 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1480 return len;
1481
d3116756 1482 amdgpu_res_first(bo->resource, offset, len, &cursor);
498ad8ec 1483 while (cursor.remaining) {
03373e2b
KW
1484 size_t count, size = cursor.size;
1485 loff_t pos = cursor.start;
1486
1487 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1488 size -= count;
1489 if (size) {
1490 /* using MM to access rest vram and handle un-aligned address */
1491 pos += count;
1492 buf += count;
1493 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
e342610c
FK
1494 }
1495
03373e2b
KW
1496 ret += cursor.size;
1497 buf += cursor.size;
1498 amdgpu_res_next(&cursor, cursor.size);
e342610c
FK
1499 }
1500
1501 return ret;
1502}
1503
6a6e5988
DA
1504static void
1505amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1506{
1507 amdgpu_bo_move_notify(bo, false, NULL);
1508}
1509
8af8a109 1510static struct ttm_device_funcs amdgpu_bo_driver = {
d38ceaf9
AD
1511 .ttm_tt_create = &amdgpu_ttm_tt_create,
1512 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1513 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
5d26eba9 1514 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
9982ca68 1515 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
d38ceaf9
AD
1516 .evict_flags = &amdgpu_evict_flags,
1517 .move = &amdgpu_bo_move,
6a6e5988 1518 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
ab2f7a5c 1519 .release_notify = &amdgpu_bo_release_notify,
d38ceaf9 1520 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
9bbdcc0f 1521 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
b61857b5 1522 .access_memory = &amdgpu_ttm_access_memory,
d38ceaf9
AD
1523};
1524
f5ec697e
AD
1525/*
1526 * Firmware Reservation functions
1527 */
1528/**
1529 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1530 *
1531 * @adev: amdgpu_device pointer
1532 *
1533 * free fw reserved vram if it has been reserved.
1534 */
1535static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1536{
87ded5ca
AD
1537 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1538 NULL, &adev->mman.fw_vram_usage_va);
f5ec697e
AD
1539}
1540
4864f2ee
TL
1541/*
1542 * Driver Reservation functions
1543 */
1544/**
1545 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1546 *
1547 * @adev: amdgpu_device pointer
1548 *
1549 * free drv reserved vram if it has been reserved.
1550 */
1551static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
1552{
1553 amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
1554 NULL,
6d96ced7 1555 &adev->mman.drv_vram_usage_va);
4864f2ee
TL
1556}
1557
f5ec697e
AD
1558/**
1559 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1560 *
1561 * @adev: amdgpu_device pointer
1562 *
1563 * create bo vram reservation from fw.
1564 */
1565static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1566{
de7b45ba 1567 uint64_t vram_size = adev->gmc.visible_vram_size;
de7b45ba 1568
87ded5ca
AD
1569 adev->mman.fw_vram_usage_va = NULL;
1570 adev->mman.fw_vram_usage_reserved_bo = NULL;
f5ec697e 1571
87ded5ca
AD
1572 if (adev->mman.fw_vram_usage_size == 0 ||
1573 adev->mman.fw_vram_usage_size > vram_size)
de7b45ba 1574 return 0;
f5ec697e 1575
de7b45ba 1576 return amdgpu_bo_create_kernel_at(adev,
87ded5ca
AD
1577 adev->mman.fw_vram_usage_start_offset,
1578 adev->mman.fw_vram_usage_size,
87ded5ca
AD
1579 &adev->mman.fw_vram_usage_reserved_bo,
1580 &adev->mman.fw_vram_usage_va);
f5ec697e 1581}
de7b45ba 1582
4864f2ee
TL
1583/**
1584 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1585 *
1586 * @adev: amdgpu_device pointer
1587 *
1588 * create bo vram reservation from drv.
1589 */
1590static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
1591{
6d96ced7 1592 u64 vram_size = adev->gmc.visible_vram_size;
4864f2ee 1593
6d96ced7 1594 adev->mman.drv_vram_usage_va = NULL;
4864f2ee
TL
1595 adev->mman.drv_vram_usage_reserved_bo = NULL;
1596
1597 if (adev->mman.drv_vram_usage_size == 0 ||
1598 adev->mman.drv_vram_usage_size > vram_size)
1599 return 0;
1600
1601 return amdgpu_bo_create_kernel_at(adev,
1602 adev->mman.drv_vram_usage_start_offset,
1603 adev->mman.drv_vram_usage_size,
4864f2ee 1604 &adev->mman.drv_vram_usage_reserved_bo,
6d96ced7 1605 &adev->mman.drv_vram_usage_va);
4864f2ee
TL
1606}
1607
778e8c42
TY
1608/*
1609 * Memoy training reservation functions
1610 */
1611
1612/**
1613 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1614 *
1615 * @adev: amdgpu_device pointer
1616 *
1617 * free memory training reserved vram if it has been reserved.
1618 */
1619static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1620{
1621 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1622
1623 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1624 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1625 ctx->c2p_bo = NULL;
1626
778e8c42
TY
1627 return 0;
1628}
1629
83d7f66a 1630static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
8d40002f 1631{
83d7f66a 1632 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
8d40002f 1633
83d7f66a 1634 memset(ctx, 0, sizeof(*ctx));
8d40002f 1635
83d7f66a 1636 ctx->c2p_train_data_offset =
72de33f8 1637 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
83d7f66a
LG
1638 ctx->p2c_train_data_offset =
1639 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1640 ctx->train_data_size =
1641 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
88293c03 1642
83d7f66a
LG
1643 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1644 ctx->train_data_size,
1645 ctx->p2c_train_data_offset,
1646 ctx->c2p_train_data_offset);
8d40002f
TY
1647}
1648
83d7f66a
LG
1649/*
1650 * reserve TMR memory at the top of VRAM which holds
1651 * IP Discovery data and is protected by PSP.
778e8c42 1652 */
83d7f66a 1653static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
778e8c42
TY
1654{
1655 int ret;
1656 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
83d7f66a 1657 bool mem_train_support = false;
778e8c42 1658
83d7f66a 1659 if (!amdgpu_sriov_vf(adev)) {
82a52030 1660 if (amdgpu_atomfirmware_mem_training_supported(adev))
83d7f66a 1661 mem_train_support = true;
72d208c2 1662 else
83d7f66a 1663 DRM_DEBUG("memory training does not support!\n");
778e8c42
TY
1664 }
1665
83d7f66a
LG
1666 /*
1667 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1668 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1669 *
1670 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1671 * discovery data and G6 memory training data respectively
1672 */
72de33f8 1673 adev->mman.discovery_tmr_size =
83d7f66a 1674 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
72de33f8
AD
1675 if (!adev->mman.discovery_tmr_size)
1676 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
2c6e83a1
LG
1677
1678 if (mem_train_support) {
1679 /* reserve vram for mem train according to TMR location */
1680 amdgpu_ttm_training_data_block_init(adev);
1681 ret = amdgpu_bo_create_kernel_at(adev,
58ab2c08
CK
1682 ctx->c2p_train_data_offset,
1683 ctx->train_data_size,
1684 &ctx->c2p_bo,
1685 NULL);
2c6e83a1
LG
1686 if (ret) {
1687 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1688 amdgpu_ttm_training_reserve_vram_fini(adev);
1689 return ret;
83d7f66a 1690 }
2c6e83a1 1691 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
83d7f66a 1692 }
778e8c42 1693
778e8c42 1694 ret = amdgpu_bo_create_kernel_at(adev,
58ab2c08
CK
1695 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1696 adev->mman.discovery_tmr_size,
1697 &adev->mman.discovery_memory,
1698 NULL);
778e8c42 1699 if (ret) {
83d7f66a 1700 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
72de33f8 1701 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
33a9a5ab 1702 return ret;
778e8c42
TY
1703 }
1704
778e8c42 1705 return 0;
778e8c42
TY
1706}
1707
75501872 1708/*
2e603d04
HR
1709 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1710 * gtt/vram related fields.
50da5174
TSD
1711 *
1712 * This initializes all of the memory space pools that the TTM layer
1713 * will need such as the GTT space (system memory mapped to the device),
1714 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1715 * can be mapped per VMID.
1716 */
d38ceaf9
AD
1717int amdgpu_ttm_init(struct amdgpu_device *adev)
1718{
36d38372 1719 uint64_t gtt_size;
d38ceaf9 1720 int r;
218b5dcd 1721 u64 vis_vram_limit;
d38ceaf9 1722
a64f784b
CK
1723 mutex_init(&adev->mman.gtt_window_lock);
1724
d38ceaf9 1725 /* No others user of address space so set it to 0 */
8af8a109 1726 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
4a580877
LT
1727 adev_to_drm(adev)->anon_inode->i_mapping,
1728 adev_to_drm(adev)->vma_offset_manager,
ee5d2a8e 1729 adev->need_swiotlb,
90489ce1 1730 dma_addressing_limited(adev->dev));
d38ceaf9
AD
1731 if (r) {
1732 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1733 return r;
1734 }
1735 adev->mman.initialized = true;
7cce9584 1736
50da5174 1737 /* Initialize VRAM pool with all of VRAM divided into pages */
158d20d1 1738 r = amdgpu_vram_mgr_init(adev);
d38ceaf9
AD
1739 if (r) {
1740 DRM_ERROR("Failed initializing VRAM heap.\n");
1741 return r;
1742 }
218b5dcd
JB
1743
1744 /* Reduce size of CPU-visible VRAM if requested */
1745 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1746 if (amdgpu_vis_vram_limit > 0 &&
770d13b1
CK
1747 vis_vram_limit <= adev->gmc.visible_vram_size)
1748 adev->gmc.visible_vram_size = vis_vram_limit;
218b5dcd 1749
d38ceaf9 1750 /* Change the size here instead of the init above so only lpfn is affected */
57adc4ce 1751 amdgpu_ttm_set_buffer_funcs_status(adev, false);
f8f4b9a6 1752#ifdef CONFIG_64BIT
f1008370 1753#ifdef CONFIG_X86
9d0af8b4
OZ
1754 if (adev->gmc.xgmi.connected_to_cpu)
1755 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1756 adev->gmc.visible_vram_size);
1757
1758 else
f1008370 1759#endif
9d0af8b4
OZ
1760 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1761 adev->gmc.visible_vram_size);
f8f4b9a6 1762#endif
d38ceaf9 1763
a05502e5
HC
1764 /*
1765 *The reserved vram for firmware must be pinned to the specified
1766 *place on the VRAM, so reserve it early.
1767 */
f5ec697e 1768 r = amdgpu_ttm_fw_reserve_vram_init(adev);
a05502e5
HC
1769 if (r) {
1770 return r;
1771 }
1772
4864f2ee
TL
1773 /*
1774 *The reserved vram for driver must be pinned to the specified
1775 *place on the VRAM, so reserve it early.
1776 */
1777 r = amdgpu_ttm_drv_reserve_vram_init(adev);
1778 if (r)
1779 return r;
1780
778e8c42 1781 /*
83d7f66a
LG
1782 * only NAVI10 and onwards ASIC support for IP discovery.
1783 * If IP discovery enabled, a block of memory should be
1784 * reserved for IP discovey.
778e8c42 1785 */
72de33f8 1786 if (adev->mman.discovery_bin) {
83d7f66a 1787 r = amdgpu_ttm_reserve_tmr(adev);
e862b08b
ML
1788 if (r)
1789 return r;
1790 }
778e8c42 1791
50da5174
TSD
1792 /* allocate memory as required for VGA
1793 * This is used for VGA emulation and pre-OS scanout buffers to
1794 * avoid display artifacts while transitioning between pre-OS
1795 * and driver. */
cacbbe7c 1796 r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
cacbbe7c 1797 &adev->mman.stolen_vga_memory,
14b18937 1798 NULL);
52975728
CK
1799 if (r)
1800 return r;
cacbbe7c
AD
1801 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1802 adev->mman.stolen_extended_size,
cacbbe7c 1803 &adev->mman.stolen_extended_memory,
14b18937 1804 NULL);
52975728
CK
1805 if (r)
1806 return r;
e15a5fb9
HR
1807 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
1808 adev->mman.stolen_reserved_size,
e15a5fb9
HR
1809 &adev->mman.stolen_reserved_memory,
1810 NULL);
1811 if (r)
1812 return r;
5f6a556f 1813
d38ceaf9 1814 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
770d13b1 1815 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
36d38372 1816
f7ba887f 1817 /* Compute GTT size, either based on 1/2 the size of RAM size
50da5174 1818 * or whatever the user passed on module init */
424e2c85
RH
1819 if (amdgpu_gtt_size == -1) {
1820 struct sysinfo si;
1821
1822 si_meminfo(&si);
f7ba887f
AD
1823 /* Certain GL unit tests for large textures can cause problems
1824 * with the OOM killer since there is no way to link this memory
1825 * to a process. This was originally mitigated (but not necessarily
1826 * eliminated) by limiting the GTT size. The problem is this limit
1827 * is often too low for many modern games so just make the limit 1/2
1828 * of system memory which aligns with TTM. The OOM accounting needs
1829 * to be addressed, but we shouldn't prevent common 3D applications
1830 * from being usable just to potentially mitigate that corner case.
1831 */
1832 gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1833 (u64)si.totalram * si.mem_unit / 2);
1834 } else {
36d38372 1835 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
f7ba887f 1836 }
50da5174
TSD
1837
1838 /* Initialize GTT memory pool */
158d20d1 1839 r = amdgpu_gtt_mgr_init(adev, gtt_size);
d38ceaf9
AD
1840 if (r) {
1841 DRM_ERROR("Failed initializing GTT heap.\n");
1842 return r;
1843 }
1844 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
36d38372 1845 (unsigned)(gtt_size / (1024 * 1024)));
d38ceaf9 1846
b453e42a
FK
1847 /* Initialize preemptible memory pool */
1848 r = amdgpu_preempt_mgr_init(adev);
1849 if (r) {
1850 DRM_ERROR("Failed initializing PREEMPT heap.\n");
1851 return r;
1852 }
1853
50da5174 1854 /* Initialize various on-chip memory pools */
47363354 1855 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
c832c346
CK
1856 if (r) {
1857 DRM_ERROR("Failed initializing GDS heap.\n");
1858 return r;
d38ceaf9
AD
1859 }
1860
47363354 1861 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
c832c346
CK
1862 if (r) {
1863 DRM_ERROR("Failed initializing gws heap.\n");
1864 return r;
d38ceaf9
AD
1865 }
1866
47363354 1867 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
c832c346
CK
1868 if (r) {
1869 DRM_ERROR("Failed initializing oa heap.\n");
1870 return r;
d38ceaf9
AD
1871 }
1872
cb5cc4f5
JK
1873 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
1874 AMDGPU_GEM_DOMAIN_GTT,
1875 &adev->mman.sdma_access_bo, NULL,
590e86fe 1876 &adev->mman.sdma_access_ptr))
cb5cc4f5
JK
1877 DRM_WARN("Debug VRAM access will use slowpath MM access\n");
1878
d38ceaf9
AD
1879 return 0;
1880}
1881
75501872 1882/*
50da5174
TSD
1883 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1884 */
d38ceaf9
AD
1885void amdgpu_ttm_fini(struct amdgpu_device *adev)
1886{
62d5f9f7 1887 int idx;
d38ceaf9
AD
1888 if (!adev->mman.initialized)
1889 return;
11c6b82a 1890
778e8c42 1891 amdgpu_ttm_training_reserve_vram_fini(adev);
5db62dc8 1892 /* return the stolen vga memory back to VRAM */
5f6fab24
AD
1893 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1894 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
224f82e5 1895 /* return the IP Discovery TMR memory back to VRAM */
72de33f8 1896 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
e15a5fb9
HR
1897 if (adev->mman.stolen_reserved_size)
1898 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
1899 NULL, NULL);
590e86fe
JK
1900 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
1901 &adev->mman.sdma_access_ptr);
f5ec697e 1902 amdgpu_ttm_fw_reserve_vram_fini(adev);
4864f2ee 1903 amdgpu_ttm_drv_reserve_vram_fini(adev);
224f82e5 1904
62d5f9f7
LS
1905 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
1906
1907 if (adev->mman.aper_base_kaddr)
1908 iounmap(adev->mman.aper_base_kaddr);
1909 adev->mman.aper_base_kaddr = NULL;
1910
1911 drm_dev_exit(idx);
1912 }
1913
6fe1c543
DA
1914 amdgpu_vram_mgr_fini(adev);
1915 amdgpu_gtt_mgr_fini(adev);
b453e42a 1916 amdgpu_preempt_mgr_fini(adev);
37205891
DA
1917 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1918 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1919 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
8af8a109 1920 ttm_device_fini(&adev->mman.bdev);
d38ceaf9
AD
1921 adev->mman.initialized = false;
1922 DRM_INFO("amdgpu: ttm finalized\n");
1923}
1924
57adc4ce
CK
1925/**
1926 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1927 *
1928 * @adev: amdgpu_device pointer
1929 * @enable: true when we can use buffer functions.
1930 *
1931 * Enable/disable use of buffer functions during suspend/resume. This should
1932 * only be called at bootup or when userspace isn't running.
1933 */
1934void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
d38ceaf9 1935{
9de59bc2 1936 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
57adc4ce 1937 uint64_t size;
b7d85e1d 1938 int r;
d38ceaf9 1939
53b3f8f4 1940 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
b7d85e1d 1941 adev->mman.buffer_funcs_enabled == enable)
d38ceaf9
AD
1942 return;
1943
b7d85e1d
CK
1944 if (enable) {
1945 struct amdgpu_ring *ring;
b3ac1766 1946 struct drm_gpu_scheduler *sched;
b7d85e1d
CK
1947
1948 ring = adev->mman.buffer_funcs_ring;
b3ac1766
ND
1949 sched = &ring->sched;
1950 r = drm_sched_entity_init(&adev->mman.entity,
e2d732fd 1951 DRM_SCHED_PRIORITY_KERNEL, &sched,
b3ac1766 1952 1, NULL);
b7d85e1d
CK
1953 if (r) {
1954 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1955 r);
1956 return;
1957 }
1958 } else {
cdc50176 1959 drm_sched_entity_destroy(&adev->mman.entity);
7766484b
AG
1960 dma_fence_put(man->move);
1961 man->move = NULL;
b7d85e1d
CK
1962 }
1963
d38ceaf9 1964 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
57adc4ce
CK
1965 if (enable)
1966 size = adev->gmc.real_vram_size;
1967 else
1968 size = adev->gmc.visible_vram_size;
7db47b83 1969 man->size = size;
81988f9c 1970 adev->mman.buffer_funcs_enabled = enable;
d38ceaf9
AD
1971}
1972
22f7cc75
CK
1973static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
1974 bool direct_submit,
1975 unsigned int num_dw,
1976 struct dma_resv *resv,
1977 bool vm_needs_flush,
1978 struct amdgpu_job **job)
1979{
1980 enum amdgpu_ib_pool_type pool = direct_submit ?
1981 AMDGPU_IB_POOL_DIRECT :
1982 AMDGPU_IB_POOL_DELAYED;
1983 int r;
1984
f7d66fb2
CK
1985 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
1986 AMDGPU_FENCE_OWNER_UNDEFINED,
1987 num_dw * 4, pool, job);
22f7cc75
CK
1988 if (r)
1989 return r;
1990
1991 if (vm_needs_flush) {
1992 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1993 adev->gmc.pdb0_bo :
1994 adev->gart.bo);
1995 (*job)->vm_needs_flush = true;
1996 }
4f91790b
CK
1997 if (!resv)
1998 return 0;
1999
2000 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2001 DMA_RESV_USAGE_BOOKKEEP);
22f7cc75
CK
2002}
2003
fc9c8f54
CK
2004int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2005 uint64_t dst_offset, uint32_t byte_count,
52791eee 2006 struct dma_resv *resv,
fc9c8f54 2007 struct dma_fence **fence, bool direct_submit,
c9dc9cfe 2008 bool vm_needs_flush, bool tmz)
d38ceaf9
AD
2009{
2010 struct amdgpu_device *adev = ring->adev;
22f7cc75 2011 unsigned num_loops, num_dw;
d71518b5 2012 struct amdgpu_job *job;
d38ceaf9 2013 uint32_t max_bytes;
d38ceaf9
AD
2014 unsigned i;
2015 int r;
2016
fcd6b0e2 2017 if (!direct_submit && !ring->sched.ready) {
81988f9c
CK
2018 DRM_ERROR("Trying to move memory with ring turned off.\n");
2019 return -EINVAL;
2020 }
2021
d38ceaf9
AD
2022 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2023 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
4e930d96 2024 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
22f7cc75
CK
2025 r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
2026 resv, vm_needs_flush, &job);
d71518b5 2027 if (r)
9066b0c3 2028 return r;
c7ae72c0 2029
d38ceaf9
AD
2030 for (i = 0; i < num_loops; i++) {
2031 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2032
d71518b5 2033 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
c9dc9cfe 2034 dst_offset, cur_size_in_bytes, tmz);
d38ceaf9
AD
2035
2036 src_offset += cur_size_in_bytes;
2037 dst_offset += cur_size_in_bytes;
2038 byte_count -= cur_size_in_bytes;
2039 }
2040
d71518b5
CK
2041 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2042 WARN_ON(job->ibs[0].length_dw > num_dw);
ee913fd9
CK
2043 if (direct_submit)
2044 r = amdgpu_job_submit_direct(job, ring, fence);
2045 else
f7d66fb2 2046 *fence = amdgpu_job_submit(job);
ee913fd9
CK
2047 if (r)
2048 goto error_free;
d38ceaf9 2049
e24db985 2050 return r;
d71518b5 2051
c7ae72c0 2052error_free:
d71518b5 2053 amdgpu_job_free(job);
ee913fd9 2054 DRM_ERROR("Error scheduling IBs (%d)\n", r);
c7ae72c0 2055 return r;
d38ceaf9
AD
2056}
2057
22f7cc75
CK
2058static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
2059 uint64_t dst_addr, uint32_t byte_count,
2060 struct dma_resv *resv,
2061 struct dma_fence **fence,
2062 bool vm_needs_flush)
59b4a977 2063{
22f7cc75 2064 struct amdgpu_device *adev = ring->adev;
59b4a977 2065 unsigned int num_loops, num_dw;
f29224a6 2066 struct amdgpu_job *job;
22f7cc75
CK
2067 uint32_t max_bytes;
2068 unsigned int i;
59b4a977
FC
2069 int r;
2070
22f7cc75
CK
2071 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2072 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2073 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2074 r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
2075 &job);
59b4a977
FC
2076 if (r)
2077 return r;
2078
22f7cc75
CK
2079 for (i = 0; i < num_loops; i++) {
2080 uint32_t cur_size = min(byte_count, max_bytes);
f29224a6 2081
596ee296
CK
2082 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2083 cur_size);
f29224a6 2084
22f7cc75
CK
2085 dst_addr += cur_size;
2086 byte_count -= cur_size;
59b4a977
FC
2087 }
2088
2089 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2090 WARN_ON(job->ibs[0].length_dw > num_dw);
f7d66fb2 2091 *fence = amdgpu_job_submit(job);
59b4a977 2092 return 0;
59b4a977
FC
2093}
2094
22f7cc75
CK
2095int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2096 uint32_t src_data,
2097 struct dma_resv *resv,
2098 struct dma_fence **f)
2099{
2100 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2101 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2102 struct dma_fence *fence = NULL;
2103 struct amdgpu_res_cursor dst;
2104 int r;
2105
2106 if (!adev->mman.buffer_funcs_enabled) {
2107 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2108 return -EINVAL;
2109 }
2110
2111 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2112
2113 mutex_lock(&adev->mman.gtt_window_lock);
2114 while (dst.remaining) {
2115 struct dma_fence *next;
2116 uint64_t cur_size, to;
2117
2118 /* Never fill more than 256MiB at once to avoid timeouts */
2119 cur_size = min(dst.size, 256ULL << 20);
2120
2121 r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
2122 1, ring, false, &cur_size, &to);
2123 if (r)
2124 goto error;
2125
2126 r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
2127 &next, true);
2128 if (r)
2129 goto error;
2130
2131 dma_fence_put(fence);
2132 fence = next;
2133
2134 amdgpu_res_next(&dst, cur_size);
2135 }
2136error:
2137 mutex_unlock(&adev->mman.gtt_window_lock);
2138 if (f)
2139 *f = dma_fence_get(fence);
2140 dma_fence_put(fence);
59b4a977
FC
2141 return r;
2142}
2143
58144d28
ND
2144/**
2145 * amdgpu_ttm_evict_resources - evict memory buffers
2146 * @adev: amdgpu device object
2147 * @mem_type: evicted BO's memory type
2148 *
2149 * Evicts all @mem_type buffers on the lru list of the memory type.
2150 *
2151 * Returns:
2152 * 0 for success or a negative error code on failure.
2153 */
2154int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2155{
2156 struct ttm_resource_manager *man;
2157
2158 switch (mem_type) {
2159 case TTM_PL_VRAM:
2160 case TTM_PL_TT:
2161 case AMDGPU_PL_GWS:
2162 case AMDGPU_PL_GDS:
2163 case AMDGPU_PL_OA:
2164 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2165 break;
2166 default:
2167 DRM_ERROR("Trying to evict invalid memory type\n");
2168 return -EINVAL;
2169 }
2170
2171 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2172}
2173
d38ceaf9
AD
2174#if defined(CONFIG_DEBUG_FS)
2175
98d28ac2 2176static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
e93b2da9 2177{
98d28ac2 2178 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
e93b2da9
CK
2179
2180 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2181}
2182
98d28ac2 2183DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
d38ceaf9 2184
75501872 2185/*
50da5174
TSD
2186 * amdgpu_ttm_vram_read - Linear read access to VRAM
2187 *
2188 * Accesses VRAM via MMIO for debugging purposes.
2189 */
d38ceaf9
AD
2190static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2191 size_t size, loff_t *pos)
2192{
45063097 2193 struct amdgpu_device *adev = file_inode(f)->i_private;
d38ceaf9 2194 ssize_t result = 0;
d38ceaf9
AD
2195
2196 if (size & 0x3 || *pos & 0x3)
2197 return -EINVAL;
2198
770d13b1 2199 if (*pos >= adev->gmc.mc_vram_size)
9156e723
TSD
2200 return -ENXIO;
2201
030d5b97 2202 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
d38ceaf9 2203 while (size) {
030d5b97
CK
2204 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2205 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
d38ceaf9 2206
030d5b97 2207 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
434cbcb1
DC
2208 if (copy_to_user(buf, value, bytes))
2209 return -EFAULT;
d38ceaf9 2210
030d5b97
CK
2211 result += bytes;
2212 buf += bytes;
2213 *pos += bytes;
2214 size -= bytes;
d38ceaf9
AD
2215 }
2216
2217 return result;
2218}
2219
75501872 2220/*
50da5174
TSD
2221 * amdgpu_ttm_vram_write - Linear write access to VRAM
2222 *
2223 * Accesses VRAM via MMIO for debugging purposes.
2224 */
08cab989
TSD
2225static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2226 size_t size, loff_t *pos)
2227{
2228 struct amdgpu_device *adev = file_inode(f)->i_private;
2229 ssize_t result = 0;
2230 int r;
2231
2232 if (size & 0x3 || *pos & 0x3)
2233 return -EINVAL;
2234
770d13b1 2235 if (*pos >= adev->gmc.mc_vram_size)
08cab989
TSD
2236 return -ENXIO;
2237
2238 while (size) {
08cab989
TSD
2239 uint32_t value;
2240
770d13b1 2241 if (*pos >= adev->gmc.mc_vram_size)
08cab989
TSD
2242 return result;
2243
2244 r = get_user(value, (uint32_t *)buf);
2245 if (r)
2246 return r;
2247
5fb95aa7 2248 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
08cab989
TSD
2249
2250 result += 4;
2251 buf += 4;
2252 *pos += 4;
2253 size -= 4;
2254 }
2255
2256 return result;
2257}
2258
d38ceaf9
AD
2259static const struct file_operations amdgpu_ttm_vram_fops = {
2260 .owner = THIS_MODULE,
2261 .read = amdgpu_ttm_vram_read,
08cab989
TSD
2262 .write = amdgpu_ttm_vram_write,
2263 .llseek = default_llseek,
d38ceaf9
AD
2264};
2265
75501872 2266/*
50da5174
TSD
2267 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2268 *
2269 * This function is used to read memory that has been mapped to the
2270 * GPU and the known addresses are not physical addresses but instead
2271 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2272 */
ebb043f2
TSD
2273static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2274 size_t size, loff_t *pos)
38290b2c
TSD
2275{
2276 struct amdgpu_device *adev = file_inode(f)->i_private;
38290b2c 2277 struct iommu_domain *dom;
ebb043f2
TSD
2278 ssize_t result = 0;
2279 int r;
38290b2c 2280
50da5174 2281 /* retrieve the IOMMU domain if any for this device */
ebb043f2 2282 dom = iommu_get_domain_for_dev(adev->dev);
38290b2c 2283
ebb043f2
TSD
2284 while (size) {
2285 phys_addr_t addr = *pos & PAGE_MASK;
2286 loff_t off = *pos & ~PAGE_MASK;
2287 size_t bytes = PAGE_SIZE - off;
2288 unsigned long pfn;
2289 struct page *p;
2290 void *ptr;
2291
2292 bytes = bytes < size ? bytes : size;
2293
50da5174
TSD
2294 /* Translate the bus address to a physical address. If
2295 * the domain is NULL it means there is no IOMMU active
2296 * and the address translation is the identity
2297 */
ebb043f2
TSD
2298 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2299
2300 pfn = addr >> PAGE_SHIFT;
2301 if (!pfn_valid(pfn))
2302 return -EPERM;
2303
2304 p = pfn_to_page(pfn);
2305 if (p->mapping != adev->mman.bdev.dev_mapping)
2306 return -EPERM;
2307
a2c55426 2308 ptr = kmap_local_page(p);
864917a3 2309 r = copy_to_user(buf, ptr + off, bytes);
a2c55426 2310 kunmap_local(ptr);
ebb043f2
TSD
2311 if (r)
2312 return -EFAULT;
2313
2314 size -= bytes;
2315 *pos += bytes;
2316 result += bytes;
2317 }
2318
2319 return result;
2320}
2321
75501872 2322/*
50da5174
TSD
2323 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2324 *
2325 * This function is used to write memory that has been mapped to the
2326 * GPU and the known addresses are not physical addresses but instead
2327 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2328 */
ebb043f2
TSD
2329static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2330 size_t size, loff_t *pos)
2331{
2332 struct amdgpu_device *adev = file_inode(f)->i_private;
2333 struct iommu_domain *dom;
2334 ssize_t result = 0;
2335 int r;
38290b2c
TSD
2336
2337 dom = iommu_get_domain_for_dev(adev->dev);
a40cfa0b 2338
ebb043f2
TSD
2339 while (size) {
2340 phys_addr_t addr = *pos & PAGE_MASK;
2341 loff_t off = *pos & ~PAGE_MASK;
2342 size_t bytes = PAGE_SIZE - off;
2343 unsigned long pfn;
2344 struct page *p;
2345 void *ptr;
2346
2347 bytes = bytes < size ? bytes : size;
38290b2c 2348
ebb043f2
TSD
2349 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2350
2351 pfn = addr >> PAGE_SHIFT;
2352 if (!pfn_valid(pfn))
2353 return -EPERM;
2354
2355 p = pfn_to_page(pfn);
2356 if (p->mapping != adev->mman.bdev.dev_mapping)
2357 return -EPERM;
2358
a2c55426 2359 ptr = kmap_local_page(p);
864917a3 2360 r = copy_from_user(ptr + off, buf, bytes);
a2c55426 2361 kunmap_local(ptr);
ebb043f2
TSD
2362 if (r)
2363 return -EFAULT;
2364
2365 size -= bytes;
2366 *pos += bytes;
2367 result += bytes;
2368 }
2369
2370 return result;
38290b2c
TSD
2371}
2372
ebb043f2 2373static const struct file_operations amdgpu_ttm_iomem_fops = {
38290b2c 2374 .owner = THIS_MODULE,
ebb043f2
TSD
2375 .read = amdgpu_iomem_read,
2376 .write = amdgpu_iomem_write,
38290b2c
TSD
2377 .llseek = default_llseek
2378};
a40cfa0b 2379
a1d29476
CK
2380#endif
2381
98d28ac2 2382void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
d38ceaf9
AD
2383{
2384#if defined(CONFIG_DEBUG_FS)
4a580877 2385 struct drm_minor *minor = adev_to_drm(adev)->primary;
88293c03
ND
2386 struct dentry *root = minor->debugfs_root;
2387
98d28ac2 2388 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
88293c03 2389 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
98d28ac2 2390 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
88293c03 2391 &amdgpu_ttm_iomem_fops);
98d28ac2
ND
2392 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2393 &amdgpu_ttm_page_pool_fops);
7212d24c
ZR
2394 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2395 TTM_PL_VRAM),
2396 root, "amdgpu_vram_mm");
2397 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2398 TTM_PL_TT),
2399 root, "amdgpu_gtt_mm");
2400 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2401 AMDGPU_PL_GDS),
2402 root, "amdgpu_gds_mm");
2403 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2404 AMDGPU_PL_GWS),
2405 root, "amdgpu_gws_mm");
2406 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2407 AMDGPU_PL_OA),
2408 root, "amdgpu_oa_mm");
2409
d38ceaf9
AD
2410#endif
2411}