drm/amdgpu: Unmap all MMIO mappings
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/hmm.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched/task.h>
38 #include <linux/sched/mm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/swiotlb.h>
43 #include <linux/dma-buf.h>
44 #include <linux/sizes.h>
45
46 #include <drm/ttm/ttm_bo_api.h>
47 #include <drm/ttm/ttm_bo_driver.h>
48 #include <drm/ttm/ttm_placement.h>
49
50 #include <drm/amdgpu_drm.h>
51 #include <drm/drm_drv.h>
52
53 #include "amdgpu.h"
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_atomfirmware.h"
60 #include "amdgpu_res_cursor.h"
61 #include "bif/bif_4_1_d.h"
62
63 #define AMDGPU_TTM_VRAM_MAX_DW_READ     (size_t)128
64
65 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
66                                    struct ttm_tt *ttm,
67                                    struct ttm_resource *bo_mem);
68 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
69                                       struct ttm_tt *ttm);
70
71 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
72                                     unsigned int type,
73                                     uint64_t size_in_page)
74 {
75         return ttm_range_man_init(&adev->mman.bdev, type,
76                                   false, size_in_page);
77 }
78
79 /**
80  * amdgpu_evict_flags - Compute placement flags
81  *
82  * @bo: The buffer object to evict
83  * @placement: Possible destination(s) for evicted BO
84  *
85  * Fill in placement data when ttm_bo_evict() is called
86  */
87 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
88                                 struct ttm_placement *placement)
89 {
90         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
91         struct amdgpu_bo *abo;
92         static const struct ttm_place placements = {
93                 .fpfn = 0,
94                 .lpfn = 0,
95                 .mem_type = TTM_PL_SYSTEM,
96                 .flags = 0
97         };
98
99         /* Don't handle scatter gather BOs */
100         if (bo->type == ttm_bo_type_sg) {
101                 placement->num_placement = 0;
102                 placement->num_busy_placement = 0;
103                 return;
104         }
105
106         /* Object isn't an AMDGPU object so ignore */
107         if (!amdgpu_bo_is_amdgpu_bo(bo)) {
108                 placement->placement = &placements;
109                 placement->busy_placement = &placements;
110                 placement->num_placement = 1;
111                 placement->num_busy_placement = 1;
112                 return;
113         }
114
115         abo = ttm_to_amdgpu_bo(bo);
116         switch (bo->mem.mem_type) {
117         case AMDGPU_PL_GDS:
118         case AMDGPU_PL_GWS:
119         case AMDGPU_PL_OA:
120                 placement->num_placement = 0;
121                 placement->num_busy_placement = 0;
122                 return;
123
124         case TTM_PL_VRAM:
125                 if (!adev->mman.buffer_funcs_enabled) {
126                         /* Move to system memory */
127                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
128                 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
129                            !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
130                            amdgpu_bo_in_cpu_visible_vram(abo)) {
131
132                         /* Try evicting to the CPU inaccessible part of VRAM
133                          * first, but only set GTT as busy placement, so this
134                          * BO will be evicted to GTT rather than causing other
135                          * BOs to be evicted from VRAM
136                          */
137                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
138                                                          AMDGPU_GEM_DOMAIN_GTT);
139                         abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
140                         abo->placements[0].lpfn = 0;
141                         abo->placement.busy_placement = &abo->placements[1];
142                         abo->placement.num_busy_placement = 1;
143                 } else {
144                         /* Move to GTT memory */
145                         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
146                 }
147                 break;
148         case TTM_PL_TT:
149         default:
150                 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
151                 break;
152         }
153         *placement = abo->placement;
154 }
155
156 /**
157  * amdgpu_verify_access - Verify access for a mmap call
158  *
159  * @bo: The buffer object to map
160  * @filp: The file pointer from the process performing the mmap
161  *
162  * This is called by ttm_bo_mmap() to verify whether a process
163  * has the right to mmap a BO to their process space.
164  */
165 static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
166 {
167         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
168
169         /*
170          * Don't verify access for KFD BOs. They don't have a GEM
171          * object associated with them.
172          */
173         if (abo->kfd_bo)
174                 return 0;
175
176         if (amdgpu_ttm_tt_get_usermm(bo->ttm))
177                 return -EPERM;
178         return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
179                                           filp->private_data);
180 }
181
182 /**
183  * amdgpu_ttm_map_buffer - Map memory into the GART windows
184  * @bo: buffer object to map
185  * @mem: memory object to map
186  * @mm_cur: range to map
187  * @num_pages: number of pages to map
188  * @window: which GART window to use
189  * @ring: DMA ring to use for the copy
190  * @tmz: if we should setup a TMZ enabled mapping
191  * @addr: resulting address inside the MC address space
192  *
193  * Setup one of the GART windows to access a specific piece of memory or return
194  * the physical address for local memory.
195  */
196 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
197                                  struct ttm_resource *mem,
198                                  struct amdgpu_res_cursor *mm_cur,
199                                  unsigned num_pages, unsigned window,
200                                  struct amdgpu_ring *ring, bool tmz,
201                                  uint64_t *addr)
202 {
203         struct amdgpu_device *adev = ring->adev;
204         struct amdgpu_job *job;
205         unsigned num_dw, num_bytes;
206         struct dma_fence *fence;
207         uint64_t src_addr, dst_addr;
208         void *cpu_addr;
209         uint64_t flags;
210         unsigned int i;
211         int r;
212
213         BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
214                AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
215
216         /* Map only what can't be accessed directly */
217         if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
218                 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
219                         mm_cur->start;
220                 return 0;
221         }
222
223         *addr = adev->gmc.gart_start;
224         *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
225                 AMDGPU_GPU_PAGE_SIZE;
226         *addr += mm_cur->start & ~PAGE_MASK;
227
228         num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
229         num_bytes = num_pages * 8;
230
231         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
232                                      AMDGPU_IB_POOL_DELAYED, &job);
233         if (r)
234                 return r;
235
236         src_addr = num_dw * 4;
237         src_addr += job->ibs[0].gpu_addr;
238
239         dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
240         dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
241         amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
242                                 dst_addr, num_bytes, false);
243
244         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
245         WARN_ON(job->ibs[0].length_dw > num_dw);
246
247         flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
248         if (tmz)
249                 flags |= AMDGPU_PTE_TMZ;
250
251         cpu_addr = &job->ibs[0].ptr[num_dw];
252
253         if (mem->mem_type == TTM_PL_TT) {
254                 dma_addr_t *dma_addr;
255
256                 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
257                 r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
258                                     cpu_addr);
259                 if (r)
260                         goto error_free;
261         } else {
262                 dma_addr_t dma_address;
263
264                 dma_address = mm_cur->start;
265                 dma_address += adev->vm_manager.vram_base_offset;
266
267                 for (i = 0; i < num_pages; ++i) {
268                         r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
269                                             &dma_address, flags, cpu_addr);
270                         if (r)
271                                 goto error_free;
272
273                         dma_address += PAGE_SIZE;
274                 }
275         }
276
277         r = amdgpu_job_submit(job, &adev->mman.entity,
278                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
279         if (r)
280                 goto error_free;
281
282         dma_fence_put(fence);
283
284         return r;
285
286 error_free:
287         amdgpu_job_free(job);
288         return r;
289 }
290
291 /**
292  * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
293  * @adev: amdgpu device
294  * @src: buffer/address where to read from
295  * @dst: buffer/address where to write to
296  * @size: number of bytes to copy
297  * @tmz: if a secure copy should be used
298  * @resv: resv object to sync to
299  * @f: Returns the last fence if multiple jobs are submitted.
300  *
301  * The function copies @size bytes from {src->mem + src->offset} to
302  * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
303  * move and different for a BO to BO copy.
304  *
305  */
306 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
307                                const struct amdgpu_copy_mem *src,
308                                const struct amdgpu_copy_mem *dst,
309                                uint64_t size, bool tmz,
310                                struct dma_resv *resv,
311                                struct dma_fence **f)
312 {
313         const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
314                                         AMDGPU_GPU_PAGE_SIZE);
315
316         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
317         struct amdgpu_res_cursor src_mm, dst_mm;
318         struct dma_fence *fence = NULL;
319         int r = 0;
320
321         if (!adev->mman.buffer_funcs_enabled) {
322                 DRM_ERROR("Trying to move memory with ring turned off.\n");
323                 return -EINVAL;
324         }
325
326         amdgpu_res_first(src->mem, src->offset, size, &src_mm);
327         amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
328
329         mutex_lock(&adev->mman.gtt_window_lock);
330         while (src_mm.remaining) {
331                 uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
332                 uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
333                 struct dma_fence *next;
334                 uint32_t cur_size;
335                 uint64_t from, to;
336
337                 /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
338                  * begins at an offset, then adjust the size accordingly
339                  */
340                 cur_size = max(src_page_offset, dst_page_offset);
341                 cur_size = min(min3(src_mm.size, dst_mm.size, size),
342                                (uint64_t)(GTT_MAX_BYTES - cur_size));
343
344                 /* Map src to window 0 and dst to window 1. */
345                 r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
346                                           PFN_UP(cur_size + src_page_offset),
347                                           0, ring, tmz, &from);
348                 if (r)
349                         goto error;
350
351                 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
352                                           PFN_UP(cur_size + dst_page_offset),
353                                           1, ring, tmz, &to);
354                 if (r)
355                         goto error;
356
357                 r = amdgpu_copy_buffer(ring, from, to, cur_size,
358                                        resv, &next, false, true, tmz);
359                 if (r)
360                         goto error;
361
362                 dma_fence_put(fence);
363                 fence = next;
364
365                 amdgpu_res_next(&src_mm, cur_size);
366                 amdgpu_res_next(&dst_mm, cur_size);
367         }
368 error:
369         mutex_unlock(&adev->mman.gtt_window_lock);
370         if (f)
371                 *f = dma_fence_get(fence);
372         dma_fence_put(fence);
373         return r;
374 }
375
376 /*
377  * amdgpu_move_blit - Copy an entire buffer to another buffer
378  *
379  * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
380  * help move buffers to and from VRAM.
381  */
382 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
383                             bool evict,
384                             struct ttm_resource *new_mem,
385                             struct ttm_resource *old_mem)
386 {
387         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
388         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
389         struct amdgpu_copy_mem src, dst;
390         struct dma_fence *fence = NULL;
391         int r;
392
393         src.bo = bo;
394         dst.bo = bo;
395         src.mem = old_mem;
396         dst.mem = new_mem;
397         src.offset = 0;
398         dst.offset = 0;
399
400         r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
401                                        new_mem->num_pages << PAGE_SHIFT,
402                                        amdgpu_bo_encrypted(abo),
403                                        bo->base.resv, &fence);
404         if (r)
405                 goto error;
406
407         /* clear the space being freed */
408         if (old_mem->mem_type == TTM_PL_VRAM &&
409             (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
410                 struct dma_fence *wipe_fence = NULL;
411
412                 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
413                                        NULL, &wipe_fence);
414                 if (r) {
415                         goto error;
416                 } else if (wipe_fence) {
417                         dma_fence_put(fence);
418                         fence = wipe_fence;
419                 }
420         }
421
422         /* Always block for VM page tables before committing the new location */
423         if (bo->type == ttm_bo_type_kernel)
424                 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
425         else
426                 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
427         dma_fence_put(fence);
428         return r;
429
430 error:
431         if (fence)
432                 dma_fence_wait(fence, false);
433         dma_fence_put(fence);
434         return r;
435 }
436
437 /*
438  * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
439  *
440  * Called by amdgpu_bo_move()
441  */
442 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
443                                struct ttm_resource *mem)
444 {
445         uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
446         struct amdgpu_res_cursor cursor;
447
448         if (mem->mem_type == TTM_PL_SYSTEM ||
449             mem->mem_type == TTM_PL_TT)
450                 return true;
451         if (mem->mem_type != TTM_PL_VRAM)
452                 return false;
453
454         amdgpu_res_first(mem, 0, mem_size, &cursor);
455
456         /* ttm_resource_ioremap only supports contiguous memory */
457         if (cursor.size != mem_size)
458                 return false;
459
460         return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
461 }
462
463 /*
464  * amdgpu_bo_move - Move a buffer object to a new memory location
465  *
466  * Called by ttm_bo_handle_move_mem()
467  */
468 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
469                           struct ttm_operation_ctx *ctx,
470                           struct ttm_resource *new_mem,
471                           struct ttm_place *hop)
472 {
473         struct amdgpu_device *adev;
474         struct amdgpu_bo *abo;
475         struct ttm_resource *old_mem = &bo->mem;
476         int r;
477
478         if (new_mem->mem_type == TTM_PL_TT) {
479                 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
480                 if (r)
481                         return r;
482         }
483
484         /* Can't move a pinned BO */
485         abo = ttm_to_amdgpu_bo(bo);
486         if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
487                 return -EINVAL;
488
489         adev = amdgpu_ttm_adev(bo->bdev);
490
491         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
492                 ttm_bo_move_null(bo, new_mem);
493                 goto out;
494         }
495         if (old_mem->mem_type == TTM_PL_SYSTEM &&
496             new_mem->mem_type == TTM_PL_TT) {
497                 ttm_bo_move_null(bo, new_mem);
498                 goto out;
499         }
500         if (old_mem->mem_type == TTM_PL_TT &&
501             new_mem->mem_type == TTM_PL_SYSTEM) {
502                 r = ttm_bo_wait_ctx(bo, ctx);
503                 if (r)
504                         return r;
505
506                 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
507                 ttm_resource_free(bo, &bo->mem);
508                 ttm_bo_assign_mem(bo, new_mem);
509                 goto out;
510         }
511
512         if (old_mem->mem_type == AMDGPU_PL_GDS ||
513             old_mem->mem_type == AMDGPU_PL_GWS ||
514             old_mem->mem_type == AMDGPU_PL_OA ||
515             new_mem->mem_type == AMDGPU_PL_GDS ||
516             new_mem->mem_type == AMDGPU_PL_GWS ||
517             new_mem->mem_type == AMDGPU_PL_OA) {
518                 /* Nothing to save here */
519                 ttm_bo_move_null(bo, new_mem);
520                 goto out;
521         }
522
523         if (adev->mman.buffer_funcs_enabled) {
524                 if (((old_mem->mem_type == TTM_PL_SYSTEM &&
525                       new_mem->mem_type == TTM_PL_VRAM) ||
526                      (old_mem->mem_type == TTM_PL_VRAM &&
527                       new_mem->mem_type == TTM_PL_SYSTEM))) {
528                         hop->fpfn = 0;
529                         hop->lpfn = 0;
530                         hop->mem_type = TTM_PL_TT;
531                         hop->flags = 0;
532                         return -EMULTIHOP;
533                 }
534
535                 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
536         } else {
537                 r = -ENODEV;
538         }
539
540         if (r) {
541                 /* Check that all memory is CPU accessible */
542                 if (!amdgpu_mem_visible(adev, old_mem) ||
543                     !amdgpu_mem_visible(adev, new_mem)) {
544                         pr_err("Move buffer fallback to memcpy unavailable\n");
545                         return r;
546                 }
547
548                 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
549                 if (r)
550                         return r;
551         }
552
553         if (bo->type == ttm_bo_type_device &&
554             new_mem->mem_type == TTM_PL_VRAM &&
555             old_mem->mem_type != TTM_PL_VRAM) {
556                 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
557                  * accesses the BO after it's moved.
558                  */
559                 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
560         }
561
562 out:
563         /* update statistics */
564         atomic64_add(bo->base.size, &adev->num_bytes_moved);
565         amdgpu_bo_move_notify(bo, evict, new_mem);
566         return 0;
567 }
568
569 /*
570  * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
571  *
572  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
573  */
574 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
575 {
576         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
577         struct drm_mm_node *mm_node = mem->mm_node;
578         size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
579
580         switch (mem->mem_type) {
581         case TTM_PL_SYSTEM:
582                 /* system memory */
583                 return 0;
584         case TTM_PL_TT:
585                 break;
586         case TTM_PL_VRAM:
587                 mem->bus.offset = mem->start << PAGE_SHIFT;
588                 /* check if it's visible */
589                 if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
590                         return -EINVAL;
591                 /* Only physically contiguous buffers apply. In a contiguous
592                  * buffer, size of the first mm_node would match the number of
593                  * pages in ttm_resource.
594                  */
595                 if (adev->mman.aper_base_kaddr &&
596                     (mm_node->size == mem->num_pages))
597                         mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
598                                         mem->bus.offset;
599
600                 mem->bus.offset += adev->gmc.aper_base;
601                 mem->bus.is_iomem = true;
602                 if (adev->gmc.xgmi.connected_to_cpu)
603                         mem->bus.caching = ttm_cached;
604                 else
605                         mem->bus.caching = ttm_write_combined;
606                 break;
607         default:
608                 return -EINVAL;
609         }
610         return 0;
611 }
612
613 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
614                                            unsigned long page_offset)
615 {
616         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
617         struct amdgpu_res_cursor cursor;
618
619         amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
620         return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
621 }
622
623 /**
624  * amdgpu_ttm_domain_start - Returns GPU start address
625  * @adev: amdgpu device object
626  * @type: type of the memory
627  *
628  * Returns:
629  * GPU start address of a memory domain
630  */
631
632 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
633 {
634         switch (type) {
635         case TTM_PL_TT:
636                 return adev->gmc.gart_start;
637         case TTM_PL_VRAM:
638                 return adev->gmc.vram_start;
639         }
640
641         return 0;
642 }
643
644 /*
645  * TTM backend functions.
646  */
647 struct amdgpu_ttm_tt {
648         struct ttm_tt   ttm;
649         struct drm_gem_object   *gobj;
650         u64                     offset;
651         uint64_t                userptr;
652         struct task_struct      *usertask;
653         uint32_t                userflags;
654         bool                    bound;
655 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
656         struct hmm_range        *range;
657 #endif
658 };
659
660 #ifdef CONFIG_DRM_AMDGPU_USERPTR
661 /*
662  * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
663  * memory and start HMM tracking CPU page table update
664  *
665  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
666  * once afterwards to stop HMM tracking
667  */
668 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
669 {
670         struct ttm_tt *ttm = bo->tbo.ttm;
671         struct amdgpu_ttm_tt *gtt = (void *)ttm;
672         unsigned long start = gtt->userptr;
673         struct vm_area_struct *vma;
674         struct hmm_range *range;
675         unsigned long timeout;
676         struct mm_struct *mm;
677         unsigned long i;
678         int r = 0;
679
680         mm = bo->notifier.mm;
681         if (unlikely(!mm)) {
682                 DRM_DEBUG_DRIVER("BO is not registered?\n");
683                 return -EFAULT;
684         }
685
686         /* Another get_user_pages is running at the same time?? */
687         if (WARN_ON(gtt->range))
688                 return -EFAULT;
689
690         if (!mmget_not_zero(mm)) /* Happens during process shutdown */
691                 return -ESRCH;
692
693         range = kzalloc(sizeof(*range), GFP_KERNEL);
694         if (unlikely(!range)) {
695                 r = -ENOMEM;
696                 goto out;
697         }
698         range->notifier = &bo->notifier;
699         range->start = bo->notifier.interval_tree.start;
700         range->end = bo->notifier.interval_tree.last + 1;
701         range->default_flags = HMM_PFN_REQ_FAULT;
702         if (!amdgpu_ttm_tt_is_readonly(ttm))
703                 range->default_flags |= HMM_PFN_REQ_WRITE;
704
705         range->hmm_pfns = kvmalloc_array(ttm->num_pages,
706                                          sizeof(*range->hmm_pfns), GFP_KERNEL);
707         if (unlikely(!range->hmm_pfns)) {
708                 r = -ENOMEM;
709                 goto out_free_ranges;
710         }
711
712         mmap_read_lock(mm);
713         vma = find_vma(mm, start);
714         if (unlikely(!vma || start < vma->vm_start)) {
715                 r = -EFAULT;
716                 goto out_unlock;
717         }
718         if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
719                 vma->vm_file)) {
720                 r = -EPERM;
721                 goto out_unlock;
722         }
723         mmap_read_unlock(mm);
724         timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
725
726 retry:
727         range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
728
729         mmap_read_lock(mm);
730         r = hmm_range_fault(range);
731         mmap_read_unlock(mm);
732         if (unlikely(r)) {
733                 /*
734                  * FIXME: This timeout should encompass the retry from
735                  * mmu_interval_read_retry() as well.
736                  */
737                 if (r == -EBUSY && !time_after(jiffies, timeout))
738                         goto retry;
739                 goto out_free_pfns;
740         }
741
742         /*
743          * Due to default_flags, all pages are HMM_PFN_VALID or
744          * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
745          * the notifier_lock, and mmu_interval_read_retry() must be done first.
746          */
747         for (i = 0; i < ttm->num_pages; i++)
748                 pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
749
750         gtt->range = range;
751         mmput(mm);
752
753         return 0;
754
755 out_unlock:
756         mmap_read_unlock(mm);
757 out_free_pfns:
758         kvfree(range->hmm_pfns);
759 out_free_ranges:
760         kfree(range);
761 out:
762         mmput(mm);
763         return r;
764 }
765
766 /*
767  * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
768  * Check if the pages backing this ttm range have been invalidated
769  *
770  * Returns: true if pages are still valid
771  */
772 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
773 {
774         struct amdgpu_ttm_tt *gtt = (void *)ttm;
775         bool r = false;
776
777         if (!gtt || !gtt->userptr)
778                 return false;
779
780         DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
781                 gtt->userptr, ttm->num_pages);
782
783         WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
784                 "No user pages to check\n");
785
786         if (gtt->range) {
787                 /*
788                  * FIXME: Must always hold notifier_lock for this, and must
789                  * not ignore the return code.
790                  */
791                 r = mmu_interval_read_retry(gtt->range->notifier,
792                                          gtt->range->notifier_seq);
793                 kvfree(gtt->range->hmm_pfns);
794                 kfree(gtt->range);
795                 gtt->range = NULL;
796         }
797
798         return !r;
799 }
800 #endif
801
802 /*
803  * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
804  *
805  * Called by amdgpu_cs_list_validate(). This creates the page list
806  * that backs user memory and will ultimately be mapped into the device
807  * address space.
808  */
809 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
810 {
811         unsigned long i;
812
813         for (i = 0; i < ttm->num_pages; ++i)
814                 ttm->pages[i] = pages ? pages[i] : NULL;
815 }
816
817 /*
818  * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
819  *
820  * Called by amdgpu_ttm_backend_bind()
821  **/
822 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
823                                      struct ttm_tt *ttm)
824 {
825         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
826         struct amdgpu_ttm_tt *gtt = (void *)ttm;
827         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
828         enum dma_data_direction direction = write ?
829                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
830         int r;
831
832         /* Allocate an SG array and squash pages into it */
833         r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
834                                       (u64)ttm->num_pages << PAGE_SHIFT,
835                                       GFP_KERNEL);
836         if (r)
837                 goto release_sg;
838
839         /* Map SG to device */
840         r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
841         if (r)
842                 goto release_sg;
843
844         /* convert SG to linear array of pages and dma addresses */
845         drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
846                                        ttm->num_pages);
847
848         return 0;
849
850 release_sg:
851         kfree(ttm->sg);
852         ttm->sg = NULL;
853         return r;
854 }
855
856 /*
857  * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
858  */
859 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
860                                         struct ttm_tt *ttm)
861 {
862         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
863         struct amdgpu_ttm_tt *gtt = (void *)ttm;
864         int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
865         enum dma_data_direction direction = write ?
866                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
867
868         /* double check that we don't free the table twice */
869         if (!ttm->sg || !ttm->sg->sgl)
870                 return;
871
872         /* unmap the pages mapped to the device */
873         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
874         sg_free_table(ttm->sg);
875
876 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
877         if (gtt->range) {
878                 unsigned long i;
879
880                 for (i = 0; i < ttm->num_pages; i++) {
881                         if (ttm->pages[i] !=
882                             hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
883                                 break;
884                 }
885
886                 WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
887         }
888 #endif
889 }
890
891 static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
892                                 struct ttm_buffer_object *tbo,
893                                 uint64_t flags)
894 {
895         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
896         struct ttm_tt *ttm = tbo->ttm;
897         struct amdgpu_ttm_tt *gtt = (void *)ttm;
898         int r;
899
900         if (amdgpu_bo_encrypted(abo))
901                 flags |= AMDGPU_PTE_TMZ;
902
903         if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
904                 uint64_t page_idx = 1;
905
906                 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
907                                 ttm->pages, gtt->ttm.dma_address, flags);
908                 if (r)
909                         goto gart_bind_fail;
910
911                 /* The memory type of the first page defaults to UC. Now
912                  * modify the memory type to NC from the second page of
913                  * the BO onward.
914                  */
915                 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
916                 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
917
918                 r = amdgpu_gart_bind(adev,
919                                 gtt->offset + (page_idx << PAGE_SHIFT),
920                                 ttm->num_pages - page_idx,
921                                 &ttm->pages[page_idx],
922                                 &(gtt->ttm.dma_address[page_idx]), flags);
923         } else {
924                 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
925                                      ttm->pages, gtt->ttm.dma_address, flags);
926         }
927
928 gart_bind_fail:
929         if (r)
930                 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
931                           ttm->num_pages, gtt->offset);
932
933         return r;
934 }
935
936 /*
937  * amdgpu_ttm_backend_bind - Bind GTT memory
938  *
939  * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
940  * This handles binding GTT memory to the device address space.
941  */
942 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
943                                    struct ttm_tt *ttm,
944                                    struct ttm_resource *bo_mem)
945 {
946         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
947         struct amdgpu_ttm_tt *gtt = (void*)ttm;
948         uint64_t flags;
949         int r = 0;
950
951         if (!bo_mem)
952                 return -EINVAL;
953
954         if (gtt->bound)
955                 return 0;
956
957         if (gtt->userptr) {
958                 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
959                 if (r) {
960                         DRM_ERROR("failed to pin userptr\n");
961                         return r;
962                 }
963         }
964         if (!ttm->num_pages) {
965                 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
966                      ttm->num_pages, bo_mem, ttm);
967         }
968
969         if (bo_mem->mem_type == AMDGPU_PL_GDS ||
970             bo_mem->mem_type == AMDGPU_PL_GWS ||
971             bo_mem->mem_type == AMDGPU_PL_OA)
972                 return -EINVAL;
973
974         if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
975                 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
976                 return 0;
977         }
978
979         /* compute PTE flags relevant to this BO memory */
980         flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
981
982         /* bind pages into GART page tables */
983         gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
984         r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
985                 ttm->pages, gtt->ttm.dma_address, flags);
986
987         if (r)
988                 DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
989                           ttm->num_pages, gtt->offset);
990         gtt->bound = true;
991         return r;
992 }
993
994 /*
995  * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
996  * through AGP or GART aperture.
997  *
998  * If bo is accessible through AGP aperture, then use AGP aperture
999  * to access bo; otherwise allocate logical space in GART aperture
1000  * and map bo to GART aperture.
1001  */
1002 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1003 {
1004         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1005         struct ttm_operation_ctx ctx = { false, false };
1006         struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1007         struct ttm_resource tmp;
1008         struct ttm_placement placement;
1009         struct ttm_place placements;
1010         uint64_t addr, flags;
1011         int r;
1012
1013         if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1014                 return 0;
1015
1016         addr = amdgpu_gmc_agp_addr(bo);
1017         if (addr != AMDGPU_BO_INVALID_OFFSET) {
1018                 bo->mem.start = addr >> PAGE_SHIFT;
1019         } else {
1020
1021                 /* allocate GART space */
1022                 placement.num_placement = 1;
1023                 placement.placement = &placements;
1024                 placement.num_busy_placement = 1;
1025                 placement.busy_placement = &placements;
1026                 placements.fpfn = 0;
1027                 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1028                 placements.mem_type = TTM_PL_TT;
1029                 placements.flags = bo->mem.placement;
1030
1031                 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1032                 if (unlikely(r))
1033                         return r;
1034
1035                 /* compute PTE flags for this buffer object */
1036                 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1037
1038                 /* Bind pages */
1039                 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1040                 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1041                 if (unlikely(r)) {
1042                         ttm_resource_free(bo, &tmp);
1043                         return r;
1044                 }
1045
1046                 ttm_resource_free(bo, &bo->mem);
1047                 bo->mem = tmp;
1048         }
1049
1050         return 0;
1051 }
1052
1053 /*
1054  * amdgpu_ttm_recover_gart - Rebind GTT pages
1055  *
1056  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1057  * rebind GTT pages during a GPU reset.
1058  */
1059 int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1060 {
1061         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1062         uint64_t flags;
1063         int r;
1064
1065         if (!tbo->ttm)
1066                 return 0;
1067
1068         flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1069         r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1070
1071         return r;
1072 }
1073
1074 /*
1075  * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1076  *
1077  * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1078  * ttm_tt_destroy().
1079  */
1080 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1081                                       struct ttm_tt *ttm)
1082 {
1083         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1084         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1085         int r;
1086
1087         /* if the pages have userptr pinning then clear that first */
1088         if (gtt->userptr)
1089                 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1090
1091         if (!gtt->bound)
1092                 return;
1093
1094         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1095                 return;
1096
1097         /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1098         r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1099         if (r)
1100                 DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
1101                           gtt->ttm.num_pages, gtt->offset);
1102         gtt->bound = false;
1103 }
1104
1105 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1106                                        struct ttm_tt *ttm)
1107 {
1108         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1109
1110         amdgpu_ttm_backend_unbind(bdev, ttm);
1111         ttm_tt_destroy_common(bdev, ttm);
1112         if (gtt->usertask)
1113                 put_task_struct(gtt->usertask);
1114
1115         ttm_tt_fini(&gtt->ttm);
1116         kfree(gtt);
1117 }
1118
1119 /**
1120  * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1121  *
1122  * @bo: The buffer object to create a GTT ttm_tt object around
1123  * @page_flags: Page flags to be added to the ttm_tt object
1124  *
1125  * Called by ttm_tt_create().
1126  */
1127 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1128                                            uint32_t page_flags)
1129 {
1130         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1131         struct amdgpu_ttm_tt *gtt;
1132         enum ttm_caching caching;
1133
1134         gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1135         if (gtt == NULL) {
1136                 return NULL;
1137         }
1138         gtt->gobj = &bo->base;
1139
1140         if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1141                 caching = ttm_write_combined;
1142         else
1143                 caching = ttm_cached;
1144
1145         /* allocate space for the uninitialized page entries */
1146         if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1147                 kfree(gtt);
1148                 return NULL;
1149         }
1150         return &gtt->ttm;
1151 }
1152
1153 /*
1154  * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1155  *
1156  * Map the pages of a ttm_tt object to an address space visible
1157  * to the underlying device.
1158  */
1159 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1160                                   struct ttm_tt *ttm,
1161                                   struct ttm_operation_ctx *ctx)
1162 {
1163         struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1164         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1165
1166         /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1167         if (gtt && gtt->userptr) {
1168                 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1169                 if (!ttm->sg)
1170                         return -ENOMEM;
1171
1172                 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1173                 return 0;
1174         }
1175
1176         if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
1177                 if (!ttm->sg) {
1178                         struct dma_buf_attachment *attach;
1179                         struct sg_table *sgt;
1180
1181                         attach = gtt->gobj->import_attach;
1182                         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1183                         if (IS_ERR(sgt))
1184                                 return PTR_ERR(sgt);
1185
1186                         ttm->sg = sgt;
1187                 }
1188
1189                 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
1190                                                ttm->num_pages);
1191                 return 0;
1192         }
1193
1194         return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1195 }
1196
1197 /*
1198  * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1199  *
1200  * Unmaps pages of a ttm_tt object from the device address space and
1201  * unpopulates the page array backing it.
1202  */
1203 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1204                                      struct ttm_tt *ttm)
1205 {
1206         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1207         struct amdgpu_device *adev;
1208
1209         if (gtt && gtt->userptr) {
1210                 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1211                 kfree(ttm->sg);
1212                 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1213                 return;
1214         }
1215
1216         if (ttm->sg && gtt->gobj->import_attach) {
1217                 struct dma_buf_attachment *attach;
1218
1219                 attach = gtt->gobj->import_attach;
1220                 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1221                 ttm->sg = NULL;
1222                 return;
1223         }
1224
1225         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1226                 return;
1227
1228         adev = amdgpu_ttm_adev(bdev);
1229         return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1230 }
1231
1232 /**
1233  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1234  * task
1235  *
1236  * @bo: The ttm_buffer_object to bind this userptr to
1237  * @addr:  The address in the current tasks VM space to use
1238  * @flags: Requirements of userptr object.
1239  *
1240  * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1241  * to current task
1242  */
1243 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1244                               uint64_t addr, uint32_t flags)
1245 {
1246         struct amdgpu_ttm_tt *gtt;
1247
1248         if (!bo->ttm) {
1249                 /* TODO: We want a separate TTM object type for userptrs */
1250                 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1251                 if (bo->ttm == NULL)
1252                         return -ENOMEM;
1253         }
1254
1255         gtt = (void *)bo->ttm;
1256         gtt->userptr = addr;
1257         gtt->userflags = flags;
1258
1259         if (gtt->usertask)
1260                 put_task_struct(gtt->usertask);
1261         gtt->usertask = current->group_leader;
1262         get_task_struct(gtt->usertask);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1269  */
1270 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1271 {
1272         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1273
1274         if (gtt == NULL)
1275                 return NULL;
1276
1277         if (gtt->usertask == NULL)
1278                 return NULL;
1279
1280         return gtt->usertask->mm;
1281 }
1282
1283 /*
1284  * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1285  * address range for the current task.
1286  *
1287  */
1288 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1289                                   unsigned long end)
1290 {
1291         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1292         unsigned long size;
1293
1294         if (gtt == NULL || !gtt->userptr)
1295                 return false;
1296
1297         /* Return false if no part of the ttm_tt object lies within
1298          * the range
1299          */
1300         size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1301         if (gtt->userptr > end || gtt->userptr + size <= start)
1302                 return false;
1303
1304         return true;
1305 }
1306
1307 /*
1308  * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1309  */
1310 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1311 {
1312         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1313
1314         if (gtt == NULL || !gtt->userptr)
1315                 return false;
1316
1317         return true;
1318 }
1319
1320 /*
1321  * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1322  */
1323 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1324 {
1325         struct amdgpu_ttm_tt *gtt = (void *)ttm;
1326
1327         if (gtt == NULL)
1328                 return false;
1329
1330         return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1331 }
1332
1333 /**
1334  * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1335  *
1336  * @ttm: The ttm_tt object to compute the flags for
1337  * @mem: The memory registry backing this ttm_tt object
1338  *
1339  * Figure out the flags to use for a VM PDE (Page Directory Entry).
1340  */
1341 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1342 {
1343         uint64_t flags = 0;
1344
1345         if (mem && mem->mem_type != TTM_PL_SYSTEM)
1346                 flags |= AMDGPU_PTE_VALID;
1347
1348         if (mem && mem->mem_type == TTM_PL_TT) {
1349                 flags |= AMDGPU_PTE_SYSTEM;
1350
1351                 if (ttm->caching == ttm_cached)
1352                         flags |= AMDGPU_PTE_SNOOPED;
1353         }
1354
1355         if (mem && mem->mem_type == TTM_PL_VRAM &&
1356                         mem->bus.caching == ttm_cached)
1357                 flags |= AMDGPU_PTE_SNOOPED;
1358
1359         return flags;
1360 }
1361
1362 /**
1363  * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1364  *
1365  * @adev: amdgpu_device pointer
1366  * @ttm: The ttm_tt object to compute the flags for
1367  * @mem: The memory registry backing this ttm_tt object
1368  *
1369  * Figure out the flags to use for a VM PTE (Page Table Entry).
1370  */
1371 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1372                                  struct ttm_resource *mem)
1373 {
1374         uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1375
1376         flags |= adev->gart.gart_pte_flags;
1377         flags |= AMDGPU_PTE_READABLE;
1378
1379         if (!amdgpu_ttm_tt_is_readonly(ttm))
1380                 flags |= AMDGPU_PTE_WRITEABLE;
1381
1382         return flags;
1383 }
1384
1385 /*
1386  * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1387  * object.
1388  *
1389  * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1390  * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1391  * it can find space for a new object and by ttm_bo_force_list_clean() which is
1392  * used to clean out a memory space.
1393  */
1394 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1395                                             const struct ttm_place *place)
1396 {
1397         unsigned long num_pages = bo->mem.num_pages;
1398         struct amdgpu_res_cursor cursor;
1399         struct dma_resv_list *flist;
1400         struct dma_fence *f;
1401         int i;
1402
1403         if (bo->type == ttm_bo_type_kernel &&
1404             !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1405                 return false;
1406
1407         /* If bo is a KFD BO, check if the bo belongs to the current process.
1408          * If true, then return false as any KFD process needs all its BOs to
1409          * be resident to run successfully
1410          */
1411         flist = dma_resv_get_list(bo->base.resv);
1412         if (flist) {
1413                 for (i = 0; i < flist->shared_count; ++i) {
1414                         f = rcu_dereference_protected(flist->shared[i],
1415                                 dma_resv_held(bo->base.resv));
1416                         if (amdkfd_fence_check_mm(f, current->mm))
1417                                 return false;
1418                 }
1419         }
1420
1421         switch (bo->mem.mem_type) {
1422         case TTM_PL_TT:
1423                 if (amdgpu_bo_is_amdgpu_bo(bo) &&
1424                     amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1425                         return false;
1426                 return true;
1427
1428         case TTM_PL_VRAM:
1429                 /* Check each drm MM node individually */
1430                 amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
1431                                  &cursor);
1432                 while (cursor.remaining) {
1433                         if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
1434                             && !(place->lpfn &&
1435                                  place->lpfn <= PFN_DOWN(cursor.start)))
1436                                 return true;
1437
1438                         amdgpu_res_next(&cursor, cursor.size);
1439                 }
1440                 return false;
1441
1442         default:
1443                 break;
1444         }
1445
1446         return ttm_bo_eviction_valuable(bo, place);
1447 }
1448
1449 /**
1450  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1451  *
1452  * @bo:  The buffer object to read/write
1453  * @offset:  Offset into buffer object
1454  * @buf:  Secondary buffer to write/read from
1455  * @len: Length in bytes of access
1456  * @write:  true if writing
1457  *
1458  * This is used to access VRAM that backs a buffer object via MMIO
1459  * access for debugging purposes.
1460  */
1461 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1462                                     unsigned long offset, void *buf, int len,
1463                                     int write)
1464 {
1465         struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1466         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1467         struct amdgpu_res_cursor cursor;
1468         unsigned long flags;
1469         uint32_t value = 0;
1470         int ret = 0;
1471
1472         if (bo->mem.mem_type != TTM_PL_VRAM)
1473                 return -EIO;
1474
1475         amdgpu_res_first(&bo->mem, offset, len, &cursor);
1476         while (cursor.remaining) {
1477                 uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
1478                 uint64_t bytes = 4 - (cursor.start & 3);
1479                 uint32_t shift = (cursor.start & 3) * 8;
1480                 uint32_t mask = 0xffffffff << shift;
1481
1482                 if (cursor.size < bytes) {
1483                         mask &= 0xffffffff >> (bytes - cursor.size) * 8;
1484                         bytes = cursor.size;
1485                 }
1486
1487                 if (mask != 0xffffffff) {
1488                         spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1489                         WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1490                         WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1491                         value = RREG32_NO_KIQ(mmMM_DATA);
1492                         if (write) {
1493                                 value &= ~mask;
1494                                 value |= (*(uint32_t *)buf << shift) & mask;
1495                                 WREG32_NO_KIQ(mmMM_DATA, value);
1496                         }
1497                         spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1498                         if (!write) {
1499                                 value = (value & mask) >> shift;
1500                                 memcpy(buf, &value, bytes);
1501                         }
1502                 } else {
1503                         bytes = cursor.size & ~0x3ULL;
1504                         amdgpu_device_vram_access(adev, cursor.start,
1505                                                   (uint32_t *)buf, bytes,
1506                                                   write);
1507                 }
1508
1509                 ret += bytes;
1510                 buf = (uint8_t *)buf + bytes;
1511                 amdgpu_res_next(&cursor, bytes);
1512         }
1513
1514         return ret;
1515 }
1516
1517 static void
1518 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1519 {
1520         amdgpu_bo_move_notify(bo, false, NULL);
1521 }
1522
1523 static struct ttm_device_funcs amdgpu_bo_driver = {
1524         .ttm_tt_create = &amdgpu_ttm_tt_create,
1525         .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1526         .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1527         .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1528         .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1529         .evict_flags = &amdgpu_evict_flags,
1530         .move = &amdgpu_bo_move,
1531         .verify_access = &amdgpu_verify_access,
1532         .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1533         .release_notify = &amdgpu_bo_release_notify,
1534         .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1535         .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1536         .access_memory = &amdgpu_ttm_access_memory,
1537         .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1538 };
1539
1540 /*
1541  * Firmware Reservation functions
1542  */
1543 /**
1544  * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1545  *
1546  * @adev: amdgpu_device pointer
1547  *
1548  * free fw reserved vram if it has been reserved.
1549  */
1550 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1551 {
1552         amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1553                 NULL, &adev->mman.fw_vram_usage_va);
1554 }
1555
1556 /**
1557  * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1558  *
1559  * @adev: amdgpu_device pointer
1560  *
1561  * create bo vram reservation from fw.
1562  */
1563 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1564 {
1565         uint64_t vram_size = adev->gmc.visible_vram_size;
1566
1567         adev->mman.fw_vram_usage_va = NULL;
1568         adev->mman.fw_vram_usage_reserved_bo = NULL;
1569
1570         if (adev->mman.fw_vram_usage_size == 0 ||
1571             adev->mman.fw_vram_usage_size > vram_size)
1572                 return 0;
1573
1574         return amdgpu_bo_create_kernel_at(adev,
1575                                           adev->mman.fw_vram_usage_start_offset,
1576                                           adev->mman.fw_vram_usage_size,
1577                                           AMDGPU_GEM_DOMAIN_VRAM,
1578                                           &adev->mman.fw_vram_usage_reserved_bo,
1579                                           &adev->mman.fw_vram_usage_va);
1580 }
1581
1582 /*
1583  * Memoy training reservation functions
1584  */
1585
1586 /**
1587  * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1588  *
1589  * @adev: amdgpu_device pointer
1590  *
1591  * free memory training reserved vram if it has been reserved.
1592  */
1593 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1594 {
1595         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1596
1597         ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1598         amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1599         ctx->c2p_bo = NULL;
1600
1601         return 0;
1602 }
1603
1604 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1605 {
1606         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1607
1608         memset(ctx, 0, sizeof(*ctx));
1609
1610         ctx->c2p_train_data_offset =
1611                 ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1612         ctx->p2c_train_data_offset =
1613                 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1614         ctx->train_data_size =
1615                 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1616
1617         DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1618                         ctx->train_data_size,
1619                         ctx->p2c_train_data_offset,
1620                         ctx->c2p_train_data_offset);
1621 }
1622
1623 /*
1624  * reserve TMR memory at the top of VRAM which holds
1625  * IP Discovery data and is protected by PSP.
1626  */
1627 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1628 {
1629         int ret;
1630         struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1631         bool mem_train_support = false;
1632
1633         if (!amdgpu_sriov_vf(adev)) {
1634                 ret = amdgpu_mem_train_support(adev);
1635                 if (ret == 1)
1636                         mem_train_support = true;
1637                 else if (ret == -1)
1638                         return -EINVAL;
1639                 else
1640                         DRM_DEBUG("memory training does not support!\n");
1641         }
1642
1643         /*
1644          * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1645          * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1646          *
1647          * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1648          * discovery data and G6 memory training data respectively
1649          */
1650         adev->mman.discovery_tmr_size =
1651                 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1652         if (!adev->mman.discovery_tmr_size)
1653                 adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1654
1655         if (mem_train_support) {
1656                 /* reserve vram for mem train according to TMR location */
1657                 amdgpu_ttm_training_data_block_init(adev);
1658                 ret = amdgpu_bo_create_kernel_at(adev,
1659                                          ctx->c2p_train_data_offset,
1660                                          ctx->train_data_size,
1661                                          AMDGPU_GEM_DOMAIN_VRAM,
1662                                          &ctx->c2p_bo,
1663                                          NULL);
1664                 if (ret) {
1665                         DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1666                         amdgpu_ttm_training_reserve_vram_fini(adev);
1667                         return ret;
1668                 }
1669                 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1670         }
1671
1672         ret = amdgpu_bo_create_kernel_at(adev,
1673                                 adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1674                                 adev->mman.discovery_tmr_size,
1675                                 AMDGPU_GEM_DOMAIN_VRAM,
1676                                 &adev->mman.discovery_memory,
1677                                 NULL);
1678         if (ret) {
1679                 DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1680                 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1681                 return ret;
1682         }
1683
1684         return 0;
1685 }
1686
1687 /*
1688  * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1689  * gtt/vram related fields.
1690  *
1691  * This initializes all of the memory space pools that the TTM layer
1692  * will need such as the GTT space (system memory mapped to the device),
1693  * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1694  * can be mapped per VMID.
1695  */
1696 int amdgpu_ttm_init(struct amdgpu_device *adev)
1697 {
1698         uint64_t gtt_size;
1699         int r;
1700         u64 vis_vram_limit;
1701
1702         mutex_init(&adev->mman.gtt_window_lock);
1703
1704         /* No others user of address space so set it to 0 */
1705         r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1706                                adev_to_drm(adev)->anon_inode->i_mapping,
1707                                adev_to_drm(adev)->vma_offset_manager,
1708                                adev->need_swiotlb,
1709                                dma_addressing_limited(adev->dev));
1710         if (r) {
1711                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1712                 return r;
1713         }
1714         adev->mman.initialized = true;
1715
1716         /* Initialize VRAM pool with all of VRAM divided into pages */
1717         r = amdgpu_vram_mgr_init(adev);
1718         if (r) {
1719                 DRM_ERROR("Failed initializing VRAM heap.\n");
1720                 return r;
1721         }
1722
1723         /* Reduce size of CPU-visible VRAM if requested */
1724         vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1725         if (amdgpu_vis_vram_limit > 0 &&
1726             vis_vram_limit <= adev->gmc.visible_vram_size)
1727                 adev->gmc.visible_vram_size = vis_vram_limit;
1728
1729         /* Change the size here instead of the init above so only lpfn is affected */
1730         amdgpu_ttm_set_buffer_funcs_status(adev, false);
1731 #ifdef CONFIG_64BIT
1732 #ifdef CONFIG_X86
1733         if (adev->gmc.xgmi.connected_to_cpu)
1734                 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
1735                                 adev->gmc.visible_vram_size);
1736
1737         else
1738 #endif
1739                 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1740                                 adev->gmc.visible_vram_size);
1741 #endif
1742
1743         /*
1744          *The reserved vram for firmware must be pinned to the specified
1745          *place on the VRAM, so reserve it early.
1746          */
1747         r = amdgpu_ttm_fw_reserve_vram_init(adev);
1748         if (r) {
1749                 return r;
1750         }
1751
1752         /*
1753          * only NAVI10 and onwards ASIC support for IP discovery.
1754          * If IP discovery enabled, a block of memory should be
1755          * reserved for IP discovey.
1756          */
1757         if (adev->mman.discovery_bin) {
1758                 r = amdgpu_ttm_reserve_tmr(adev);
1759                 if (r)
1760                         return r;
1761         }
1762
1763         /* allocate memory as required for VGA
1764          * This is used for VGA emulation and pre-OS scanout buffers to
1765          * avoid display artifacts while transitioning between pre-OS
1766          * and driver.  */
1767         r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1768                                        AMDGPU_GEM_DOMAIN_VRAM,
1769                                        &adev->mman.stolen_vga_memory,
1770                                        NULL);
1771         if (r)
1772                 return r;
1773         r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1774                                        adev->mman.stolen_extended_size,
1775                                        AMDGPU_GEM_DOMAIN_VRAM,
1776                                        &adev->mman.stolen_extended_memory,
1777                                        NULL);
1778         if (r)
1779                 return r;
1780
1781         DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1782                  (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1783
1784         /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1785          * or whatever the user passed on module init */
1786         if (amdgpu_gtt_size == -1) {
1787                 struct sysinfo si;
1788
1789                 si_meminfo(&si);
1790                 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1791                                adev->gmc.mc_vram_size),
1792                                ((uint64_t)si.totalram * si.mem_unit * 3/4));
1793         }
1794         else
1795                 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1796
1797         /* Initialize GTT memory pool */
1798         r = amdgpu_gtt_mgr_init(adev, gtt_size);
1799         if (r) {
1800                 DRM_ERROR("Failed initializing GTT heap.\n");
1801                 return r;
1802         }
1803         DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1804                  (unsigned)(gtt_size / (1024 * 1024)));
1805
1806         /* Initialize various on-chip memory pools */
1807         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1808         if (r) {
1809                 DRM_ERROR("Failed initializing GDS heap.\n");
1810                 return r;
1811         }
1812
1813         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1814         if (r) {
1815                 DRM_ERROR("Failed initializing gws heap.\n");
1816                 return r;
1817         }
1818
1819         r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1820         if (r) {
1821                 DRM_ERROR("Failed initializing oa heap.\n");
1822                 return r;
1823         }
1824
1825         return 0;
1826 }
1827
1828 /*
1829  * amdgpu_ttm_fini - De-initialize the TTM memory pools
1830  */
1831 void amdgpu_ttm_fini(struct amdgpu_device *adev)
1832 {
1833         if (!adev->mman.initialized)
1834                 return;
1835
1836         amdgpu_ttm_training_reserve_vram_fini(adev);
1837         /* return the stolen vga memory back to VRAM */
1838         amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1839         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1840         /* return the IP Discovery TMR memory back to VRAM */
1841         amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1842         amdgpu_ttm_fw_reserve_vram_fini(adev);
1843
1844         amdgpu_vram_mgr_fini(adev);
1845         amdgpu_gtt_mgr_fini(adev);
1846         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1847         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1848         ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1849         ttm_device_fini(&adev->mman.bdev);
1850         adev->mman.initialized = false;
1851         DRM_INFO("amdgpu: ttm finalized\n");
1852 }
1853
1854 /**
1855  * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1856  *
1857  * @adev: amdgpu_device pointer
1858  * @enable: true when we can use buffer functions.
1859  *
1860  * Enable/disable use of buffer functions during suspend/resume. This should
1861  * only be called at bootup or when userspace isn't running.
1862  */
1863 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1864 {
1865         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1866         uint64_t size;
1867         int r;
1868
1869         if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1870             adev->mman.buffer_funcs_enabled == enable)
1871                 return;
1872
1873         if (enable) {
1874                 struct amdgpu_ring *ring;
1875                 struct drm_gpu_scheduler *sched;
1876
1877                 ring = adev->mman.buffer_funcs_ring;
1878                 sched = &ring->sched;
1879                 r = drm_sched_entity_init(&adev->mman.entity,
1880                                           DRM_SCHED_PRIORITY_KERNEL, &sched,
1881                                           1, NULL);
1882                 if (r) {
1883                         DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1884                                   r);
1885                         return;
1886                 }
1887         } else {
1888                 drm_sched_entity_destroy(&adev->mman.entity);
1889                 dma_fence_put(man->move);
1890                 man->move = NULL;
1891         }
1892
1893         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1894         if (enable)
1895                 size = adev->gmc.real_vram_size;
1896         else
1897                 size = adev->gmc.visible_vram_size;
1898         man->size = size >> PAGE_SHIFT;
1899         adev->mman.buffer_funcs_enabled = enable;
1900 }
1901
1902 static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
1903 {
1904         struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
1905         struct drm_device *ddev = bo->base.dev;
1906         vm_fault_t ret;
1907         int idx;
1908
1909         ret = ttm_bo_vm_reserve(bo, vmf);
1910         if (ret)
1911                 return ret;
1912
1913         if (drm_dev_enter(ddev, &idx)) {
1914                 ret = amdgpu_bo_fault_reserve_notify(bo);
1915                 if (ret) {
1916                         drm_dev_exit(idx);
1917                         goto unlock;
1918                 }
1919
1920                  ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1921                                                 TTM_BO_VM_NUM_PREFAULT, 1);
1922
1923                  drm_dev_exit(idx);
1924         } else {
1925                 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1926         }
1927         if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1928                 return ret;
1929
1930 unlock:
1931         dma_resv_unlock(bo->base.resv);
1932         return ret;
1933 }
1934
1935 static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
1936         .fault = amdgpu_ttm_fault,
1937         .open = ttm_bo_vm_open,
1938         .close = ttm_bo_vm_close,
1939         .access = ttm_bo_vm_access
1940 };
1941
1942 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1943 {
1944         struct drm_file *file_priv = filp->private_data;
1945         struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
1946         int r;
1947
1948         r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1949         if (unlikely(r != 0))
1950                 return r;
1951
1952         vma->vm_ops = &amdgpu_ttm_vm_ops;
1953         return 0;
1954 }
1955
1956 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1957                        uint64_t dst_offset, uint32_t byte_count,
1958                        struct dma_resv *resv,
1959                        struct dma_fence **fence, bool direct_submit,
1960                        bool vm_needs_flush, bool tmz)
1961 {
1962         enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
1963                 AMDGPU_IB_POOL_DELAYED;
1964         struct amdgpu_device *adev = ring->adev;
1965         struct amdgpu_job *job;
1966
1967         uint32_t max_bytes;
1968         unsigned num_loops, num_dw;
1969         unsigned i;
1970         int r;
1971
1972         if (direct_submit && !ring->sched.ready) {
1973                 DRM_ERROR("Trying to move memory with ring turned off.\n");
1974                 return -EINVAL;
1975         }
1976
1977         max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
1978         num_loops = DIV_ROUND_UP(byte_count, max_bytes);
1979         num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
1980
1981         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
1982         if (r)
1983                 return r;
1984
1985         if (vm_needs_flush) {
1986                 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
1987                                         adev->gmc.pdb0_bo : adev->gart.bo);
1988                 job->vm_needs_flush = true;
1989         }
1990         if (resv) {
1991                 r = amdgpu_sync_resv(adev, &job->sync, resv,
1992                                      AMDGPU_SYNC_ALWAYS,
1993                                      AMDGPU_FENCE_OWNER_UNDEFINED);
1994                 if (r) {
1995                         DRM_ERROR("sync failed (%d).\n", r);
1996                         goto error_free;
1997                 }
1998         }
1999
2000         for (i = 0; i < num_loops; i++) {
2001                 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2002
2003                 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2004                                         dst_offset, cur_size_in_bytes, tmz);
2005
2006                 src_offset += cur_size_in_bytes;
2007                 dst_offset += cur_size_in_bytes;
2008                 byte_count -= cur_size_in_bytes;
2009         }
2010
2011         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2012         WARN_ON(job->ibs[0].length_dw > num_dw);
2013         if (direct_submit)
2014                 r = amdgpu_job_submit_direct(job, ring, fence);
2015         else
2016                 r = amdgpu_job_submit(job, &adev->mman.entity,
2017                                       AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2018         if (r)
2019                 goto error_free;
2020
2021         return r;
2022
2023 error_free:
2024         amdgpu_job_free(job);
2025         DRM_ERROR("Error scheduling IBs (%d)\n", r);
2026         return r;
2027 }
2028
2029 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2030                        uint32_t src_data,
2031                        struct dma_resv *resv,
2032                        struct dma_fence **fence)
2033 {
2034         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2035         uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2036         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2037
2038         struct amdgpu_res_cursor cursor;
2039         unsigned int num_loops, num_dw;
2040         uint64_t num_bytes;
2041
2042         struct amdgpu_job *job;
2043         int r;
2044
2045         if (!adev->mman.buffer_funcs_enabled) {
2046                 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2047                 return -EINVAL;
2048         }
2049
2050         if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2051                 r = amdgpu_ttm_alloc_gart(&bo->tbo);
2052                 if (r)
2053                         return r;
2054         }
2055
2056         num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
2057         num_loops = 0;
2058
2059         amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
2060         while (cursor.remaining) {
2061                 num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
2062                 amdgpu_res_next(&cursor, cursor.size);
2063         }
2064         num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2065
2066         /* for IB padding */
2067         num_dw += 64;
2068
2069         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
2070                                      &job);
2071         if (r)
2072                 return r;
2073
2074         if (resv) {
2075                 r = amdgpu_sync_resv(adev, &job->sync, resv,
2076                                      AMDGPU_SYNC_ALWAYS,
2077                                      AMDGPU_FENCE_OWNER_UNDEFINED);
2078                 if (r) {
2079                         DRM_ERROR("sync failed (%d).\n", r);
2080                         goto error_free;
2081                 }
2082         }
2083
2084         amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
2085         while (cursor.remaining) {
2086                 uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
2087                 uint64_t dst_addr = cursor.start;
2088
2089                 dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
2090                 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2091                                         cur_size);
2092
2093                 amdgpu_res_next(&cursor, cur_size);
2094         }
2095
2096         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2097         WARN_ON(job->ibs[0].length_dw > num_dw);
2098         r = amdgpu_job_submit(job, &adev->mman.entity,
2099                               AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2100         if (r)
2101                 goto error_free;
2102
2103         return 0;
2104
2105 error_free:
2106         amdgpu_job_free(job);
2107         return r;
2108 }
2109
2110 #if defined(CONFIG_DEBUG_FS)
2111
2112 static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
2113 {
2114         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2115         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2116                                                             TTM_PL_VRAM);
2117         struct drm_printer p = drm_seq_file_printer(m);
2118
2119         man->func->debug(man, &p);
2120         return 0;
2121 }
2122
2123 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2124 {
2125         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2126
2127         return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2128 }
2129
2130 static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
2131 {
2132         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2133         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2134                                                             TTM_PL_TT);
2135         struct drm_printer p = drm_seq_file_printer(m);
2136
2137         man->func->debug(man, &p);
2138         return 0;
2139 }
2140
2141 static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
2142 {
2143         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2144         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2145                                                             AMDGPU_PL_GDS);
2146         struct drm_printer p = drm_seq_file_printer(m);
2147
2148         man->func->debug(man, &p);
2149         return 0;
2150 }
2151
2152 static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
2153 {
2154         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2155         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2156                                                             AMDGPU_PL_GWS);
2157         struct drm_printer p = drm_seq_file_printer(m);
2158
2159         man->func->debug(man, &p);
2160         return 0;
2161 }
2162
2163 static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
2164 {
2165         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
2166         struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
2167                                                             AMDGPU_PL_OA);
2168         struct drm_printer p = drm_seq_file_printer(m);
2169
2170         man->func->debug(man, &p);
2171         return 0;
2172 }
2173
2174 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
2175 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
2176 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
2177 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
2178 DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
2179 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2180
2181 /*
2182  * amdgpu_ttm_vram_read - Linear read access to VRAM
2183  *
2184  * Accesses VRAM via MMIO for debugging purposes.
2185  */
2186 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2187                                     size_t size, loff_t *pos)
2188 {
2189         struct amdgpu_device *adev = file_inode(f)->i_private;
2190         ssize_t result = 0;
2191
2192         if (size & 0x3 || *pos & 0x3)
2193                 return -EINVAL;
2194
2195         if (*pos >= adev->gmc.mc_vram_size)
2196                 return -ENXIO;
2197
2198         size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2199         while (size) {
2200                 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2201                 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2202
2203                 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2204                 if (copy_to_user(buf, value, bytes))
2205                         return -EFAULT;
2206
2207                 result += bytes;
2208                 buf += bytes;
2209                 *pos += bytes;
2210                 size -= bytes;
2211         }
2212
2213         return result;
2214 }
2215
2216 /*
2217  * amdgpu_ttm_vram_write - Linear write access to VRAM
2218  *
2219  * Accesses VRAM via MMIO for debugging purposes.
2220  */
2221 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2222                                     size_t size, loff_t *pos)
2223 {
2224         struct amdgpu_device *adev = file_inode(f)->i_private;
2225         ssize_t result = 0;
2226         int r;
2227
2228         if (size & 0x3 || *pos & 0x3)
2229                 return -EINVAL;
2230
2231         if (*pos >= adev->gmc.mc_vram_size)
2232                 return -ENXIO;
2233
2234         while (size) {
2235                 unsigned long flags;
2236                 uint32_t value;
2237
2238                 if (*pos >= adev->gmc.mc_vram_size)
2239                         return result;
2240
2241                 r = get_user(value, (uint32_t *)buf);
2242                 if (r)
2243                         return r;
2244
2245                 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2246                 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2247                 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2248                 WREG32_NO_KIQ(mmMM_DATA, value);
2249                 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2250
2251                 result += 4;
2252                 buf += 4;
2253                 *pos += 4;
2254                 size -= 4;
2255         }
2256
2257         return result;
2258 }
2259
2260 static const struct file_operations amdgpu_ttm_vram_fops = {
2261         .owner = THIS_MODULE,
2262         .read = amdgpu_ttm_vram_read,
2263         .write = amdgpu_ttm_vram_write,
2264         .llseek = default_llseek,
2265 };
2266
2267 /*
2268  * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2269  *
2270  * This function is used to read memory that has been mapped to the
2271  * GPU and the known addresses are not physical addresses but instead
2272  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2273  */
2274 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2275                                  size_t size, loff_t *pos)
2276 {
2277         struct amdgpu_device *adev = file_inode(f)->i_private;
2278         struct iommu_domain *dom;
2279         ssize_t result = 0;
2280         int r;
2281
2282         /* retrieve the IOMMU domain if any for this device */
2283         dom = iommu_get_domain_for_dev(adev->dev);
2284
2285         while (size) {
2286                 phys_addr_t addr = *pos & PAGE_MASK;
2287                 loff_t off = *pos & ~PAGE_MASK;
2288                 size_t bytes = PAGE_SIZE - off;
2289                 unsigned long pfn;
2290                 struct page *p;
2291                 void *ptr;
2292
2293                 bytes = bytes < size ? bytes : size;
2294
2295                 /* Translate the bus address to a physical address.  If
2296                  * the domain is NULL it means there is no IOMMU active
2297                  * and the address translation is the identity
2298                  */
2299                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2300
2301                 pfn = addr >> PAGE_SHIFT;
2302                 if (!pfn_valid(pfn))
2303                         return -EPERM;
2304
2305                 p = pfn_to_page(pfn);
2306                 if (p->mapping != adev->mman.bdev.dev_mapping)
2307                         return -EPERM;
2308
2309                 ptr = kmap(p);
2310                 r = copy_to_user(buf, ptr + off, bytes);
2311                 kunmap(p);
2312                 if (r)
2313                         return -EFAULT;
2314
2315                 size -= bytes;
2316                 *pos += bytes;
2317                 result += bytes;
2318         }
2319
2320         return result;
2321 }
2322
2323 /*
2324  * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2325  *
2326  * This function is used to write memory that has been mapped to the
2327  * GPU and the known addresses are not physical addresses but instead
2328  * bus addresses (e.g., what you'd put in an IB or ring buffer).
2329  */
2330 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2331                                  size_t size, loff_t *pos)
2332 {
2333         struct amdgpu_device *adev = file_inode(f)->i_private;
2334         struct iommu_domain *dom;
2335         ssize_t result = 0;
2336         int r;
2337
2338         dom = iommu_get_domain_for_dev(adev->dev);
2339
2340         while (size) {
2341                 phys_addr_t addr = *pos & PAGE_MASK;
2342                 loff_t off = *pos & ~PAGE_MASK;
2343                 size_t bytes = PAGE_SIZE - off;
2344                 unsigned long pfn;
2345                 struct page *p;
2346                 void *ptr;
2347
2348                 bytes = bytes < size ? bytes : size;
2349
2350                 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2351
2352                 pfn = addr >> PAGE_SHIFT;
2353                 if (!pfn_valid(pfn))
2354                         return -EPERM;
2355
2356                 p = pfn_to_page(pfn);
2357                 if (p->mapping != adev->mman.bdev.dev_mapping)
2358                         return -EPERM;
2359
2360                 ptr = kmap(p);
2361                 r = copy_from_user(ptr + off, buf, bytes);
2362                 kunmap(p);
2363                 if (r)
2364                         return -EFAULT;
2365
2366                 size -= bytes;
2367                 *pos += bytes;
2368                 result += bytes;
2369         }
2370
2371         return result;
2372 }
2373
2374 static const struct file_operations amdgpu_ttm_iomem_fops = {
2375         .owner = THIS_MODULE,
2376         .read = amdgpu_iomem_read,
2377         .write = amdgpu_iomem_write,
2378         .llseek = default_llseek
2379 };
2380
2381 #endif
2382
2383 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2384 {
2385 #if defined(CONFIG_DEBUG_FS)
2386         struct drm_minor *minor = adev_to_drm(adev)->primary;
2387         struct dentry *root = minor->debugfs_root;
2388
2389         debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2390                                  &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2391         debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2392                             &amdgpu_ttm_iomem_fops);
2393         debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
2394                             &amdgpu_mm_vram_table_fops);
2395         debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
2396                             &amdgpu_mm_tt_table_fops);
2397         debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
2398                             &amdgpu_mm_gds_table_fops);
2399         debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
2400                             &amdgpu_mm_gws_table_fops);
2401         debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
2402                             &amdgpu_mm_oa_table_fops);
2403         debugfs_create_file("ttm_page_pool", 0444, root, adev,
2404                             &amdgpu_ttm_page_pool_fops);
2405 #endif
2406 }