Merge tag 'amd-drm-next-5.8-2020-04-30' of git://people.freedesktop.org/~agd5f/linux...
authorDave Airlie <airlied@redhat.com>
Fri, 8 May 2020 03:31:06 +0000 (13:31 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 8 May 2020 03:31:08 +0000 (13:31 +1000)
amd-drm-next-5.8-2020-04-30:

amdgpu:
- SR-IOV fixes
- SDMA fix for Navi
- VCN 2.5 DPG fixes
- Display fixes
- Display stuttering fixes for pageflip and cursor
- Add support for handling encrypted GPU memory
- Add UAPI for encrypted GPU memory
- Rework IB pool handling

amdkfd:
- Expose asic revision in topology
- Add UAPI for GWS (Global Wave Sync) resource management

UAPI:
- Add amdgpu UAPI for encrypted GPU memory
  Used by: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4401
- Add amdkfd UAPI for GWS (Global Wave Sync) resource management
  Thunk usage of KFD ioctl: https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/blob/roc-2.8.0/src/queues.c#L840
  ROCr usage of Thunk API: https://github.com/RadeonOpenCompute/ROCR-Runtime/blob/roc-3.1.0/src/core/runtime/amd_gpu_agent.cpp#L597
  HCC code using ROCr API: https://github.com/RadeonOpenCompute/hcc/blob/98ee9f34945d3b5f572d7a4c15cbffa506487734/lib/hsa/mcwamp_hsa.cpp#L2161
  HIP code using HCC API: https://github.com/ROCm-Developer-Tools/HIP/blob/cf8589b8c8a40ddcc55fa3a51e23390a49824130/src/hip_module.cpp#L567

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200430212951.3902-1-alexander.deucher@amd.com
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c

index 245aec521388a280a885b8f881a042fb7b3cb930,77d988a0033f223efbd2779c85a68eb613dc8b1e..4ed9958af94e3b8d01416f7bfae93ef2e26244a5
@@@ -29,7 -29,6 +29,7 @@@
  #include <linux/module.h>
  #include <linux/pagemap.h>
  #include <linux/pci.h>
 +#include <linux/dma-buf.h>
  
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_debugfs.h>
@@@ -234,7 -233,8 +234,8 @@@ int amdgpu_gem_create_ioctl(struct drm_
                      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
                      AMDGPU_GEM_CREATE_VRAM_CLEARED |
                      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
-                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+                     AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
+                     AMDGPU_GEM_CREATE_ENCRYPTED))
  
                return -EINVAL;
  
        if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
                return -EINVAL;
  
+       if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
+               DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
+               return -EINVAL;
+       }
        /* create a gem object to contain this object in */
        if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
            AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@@ -862,8 -867,7 +868,8 @@@ static int amdgpu_debugfs_gem_bo_info(i
        attachment = READ_ONCE(bo->tbo.base.import_attach);
  
        if (attachment)
 -              seq_printf(m, " imported from %p", dma_buf);
 +              seq_printf(m, " imported from %p%s", dma_buf,
 +                         attachment->peer2peer ? " P2P" : "");
        else if (dma_buf)
                seq_printf(m, " exported as %p", dma_buf);
  
index 6880c023ca8b8a782feb23686132550718cf54b9,ea0199a8f9c95b279ea03293451083fdd312a94f..d5543c25f3c7078590c13ad4b5e5ad1193856c71
  
  #define AMDGPU_TTM_VRAM_MAX_DW_READ   (size_t)128
  
- static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr);
  
  /**
   * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
@@@ -277,7 -272,7 +272,7 @@@ static uint64_t amdgpu_mm_node_addr(str
   *
   */
  static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
-                                              unsigned long *offset)
+                                              uint64_t *offset)
  {
        struct drm_mm_node *mm_node = mem->mm_node;
  
        return mm_node;
  }
  
+ /**
+  * amdgpu_ttm_map_buffer - Map memory into the GART windows
+  * @bo: buffer object to map
+  * @mem: memory object to map
+  * @mm_node: drm_mm node object to map
+  * @num_pages: number of pages to map
+  * @offset: offset into @mm_node where to start
+  * @window: which GART window to use
+  * @ring: DMA ring to use for the copy
+  * @tmz: if we should setup a TMZ enabled mapping
+  * @addr: resulting address inside the MC address space
+  *
+  * Setup one of the GART windows to access a specific piece of memory or return
+  * the physical address for local memory.
+  */
+ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+                                struct ttm_mem_reg *mem,
+                                struct drm_mm_node *mm_node,
+                                unsigned num_pages, uint64_t offset,
+                                unsigned window, struct amdgpu_ring *ring,
+                                bool tmz, uint64_t *addr)
+ {
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_job *job;
+       unsigned num_dw, num_bytes;
+       struct dma_fence *fence;
+       uint64_t src_addr, dst_addr;
+       void *cpu_addr;
+       uint64_t flags;
+       unsigned int i;
+       int r;
+       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+       /* Map only what can't be accessed directly */
+       if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
+               *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+               return 0;
+       }
+       *addr = adev->gmc.gart_start;
+       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+               AMDGPU_GPU_PAGE_SIZE;
+       *addr += offset & ~PAGE_MASK;
+       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+       num_bytes = num_pages * 8;
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+                                    AMDGPU_IB_POOL_DELAYED, &job);
+       if (r)
+               return r;
+       src_addr = num_dw * 4;
+       src_addr += job->ibs[0].gpu_addr;
+       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+                               dst_addr, num_bytes, false);
+       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+       flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+       if (tmz)
+               flags |= AMDGPU_PTE_TMZ;
+       cpu_addr = &job->ibs[0].ptr[num_dw];
+       if (mem->mem_type == TTM_PL_TT) {
+               struct ttm_dma_tt *dma;
+               dma_addr_t *dma_address;
+               dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+               dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+               r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+                                   cpu_addr);
+               if (r)
+                       goto error_free;
+       } else {
+               dma_addr_t dma_address;
+               dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+               dma_address += adev->vm_manager.vram_base_offset;
+               for (i = 0; i < num_pages; ++i) {
+                       r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
+                                           &dma_address, flags, cpu_addr);
+                       if (r)
+                               goto error_free;
+                       dma_address += PAGE_SIZE;
+               }
+       }
+       r = amdgpu_job_submit(job, &adev->mman.entity,
+                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       if (r)
+               goto error_free;
+       dma_fence_put(fence);
+       return r;
+ error_free:
+       amdgpu_job_free(job);
+       return r;
+ }
  /**
   * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+  * @adev: amdgpu device
+  * @src: buffer/address where to read from
+  * @dst: buffer/address where to write to
+  * @size: number of bytes to copy
+  * @tmz: if a secure copy should be used
+  * @resv: resv object to sync to
+  * @f: Returns the last fence if multiple jobs are submitted.
   *
   * The function copies @size bytes from {src->mem + src->offset} to
   * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
   * move and different for a BO to BO copy.
   *
-  * @f: Returns the last fence if multiple jobs are submitted.
   */
  int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
-                              struct amdgpu_copy_mem *src,
-                              struct amdgpu_copy_mem *dst,
-                              uint64_t size,
+                              const struct amdgpu_copy_mem *src,
+                              const struct amdgpu_copy_mem *dst,
+                              uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f)
  {
+       const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+                                       AMDGPU_GPU_PAGE_SIZE);
+       uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct drm_mm_node *src_mm, *dst_mm;
-       uint64_t src_node_start, dst_node_start, src_node_size,
-                dst_node_size, src_page_offset, dst_page_offset;
        struct dma_fence *fence = NULL;
        int r = 0;
-       const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
-                                       AMDGPU_GPU_PAGE_SIZE);
  
        if (!adev->mman.buffer_funcs_enabled) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
  
-       src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
-       src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
-                                            src->offset;
-       src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
-       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+       src_offset = src->offset;
+       src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+       src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
  
-       dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
-       dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
-                                            dst->offset;
-       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
-       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+       dst_offset = dst->offset;
+       dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+       dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
  
        mutex_lock(&adev->mman.gtt_window_lock);
  
        while (size) {
-               unsigned long cur_size;
-               uint64_t from = src_node_start, to = dst_node_start;
+               uint32_t src_page_offset = src_offset & ~PAGE_MASK;
+               uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
                struct dma_fence *next;
+               uint32_t cur_size;
+               uint64_t from, to;
  
                /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
                 * begins at an offset, then adjust the size accordingly
                 */
-               cur_size = min3(min(src_node_size, dst_node_size), size,
-                               GTT_MAX_BYTES);
-               if (cur_size + src_page_offset > GTT_MAX_BYTES ||
-                   cur_size + dst_page_offset > GTT_MAX_BYTES)
-                       cur_size -= max(src_page_offset, dst_page_offset);
-               /* Map only what needs to be accessed. Map src to window 0 and
-                * dst to window 1
-                */
-               if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(src->bo, src->mem,
-                                       PFN_UP(cur_size + src_page_offset),
-                                       src_node_start, 0, ring,
-                                       &from);
-                       if (r)
-                               goto error;
-                       /* Adjust the offset because amdgpu_map_buffer returns
-                        * start of mapped page
-                        */
-                       from += src_page_offset;
-               }
+               cur_size = max(src_page_offset, dst_page_offset);
+               cur_size = min(min3(src_node_size, dst_node_size, size),
+                              (uint64_t)(GTT_MAX_BYTES - cur_size));
+               /* Map src to window 0 and dst to window 1. */
+               r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+                                         PFN_UP(cur_size + src_page_offset),
+                                         src_offset, 0, ring, tmz, &from);
+               if (r)
+                       goto error;
  
-               if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
-                       r = amdgpu_map_buffer(dst->bo, dst->mem,
-                                       PFN_UP(cur_size + dst_page_offset),
-                                       dst_node_start, 1, ring,
-                                       &to);
-                       if (r)
-                               goto error;
-                       to += dst_page_offset;
-               }
+               r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+                                         PFN_UP(cur_size + dst_page_offset),
+                                         dst_offset, 1, ring, tmz, &to);
+               if (r)
+                       goto error;
  
                r = amdgpu_copy_buffer(ring, from, to, cur_size,
-                                      resv, &next, false, true);
+                                      resv, &next, false, true, tmz);
                if (r)
                        goto error;
  
  
                src_node_size -= cur_size;
                if (!src_node_size) {
-                       src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
-                                                            src->mem);
-                       src_node_size = (src_mm->size << PAGE_SHIFT);
-                       src_page_offset = 0;
+                       ++src_mm;
+                       src_node_size = src_mm->size << PAGE_SHIFT;
+                       src_offset = 0;
                } else {
-                       src_node_start += cur_size;
-                       src_page_offset = src_node_start & (PAGE_SIZE - 1);
+                       src_offset += cur_size;
                }
                dst_node_size -= cur_size;
                if (!dst_node_size) {
-                       dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
-                                                            dst->mem);
-                       dst_node_size = (dst_mm->size << PAGE_SHIFT);
-                       dst_page_offset = 0;
+                       ++dst_mm;
+                       dst_node_size = dst_mm->size << PAGE_SHIFT;
+                       dst_offset = 0;
                } else {
-                       dst_node_start += cur_size;
-                       dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+                       dst_offset += cur_size;
                }
        }
  error:
@@@ -425,6 -517,7 +517,7 @@@ static int amdgpu_move_blit(struct ttm_
                            struct ttm_mem_reg *old_mem)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
        struct amdgpu_copy_mem src, dst;
        struct dma_fence *fence = NULL;
        int r;
  
        r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
                                       new_mem->num_pages << PAGE_SHIFT,
+                                      amdgpu_bo_encrypted(abo),
                                       bo->base.resv, &fence);
        if (r)
                goto error;
  
        /* clear the space being freed */
        if (old_mem->mem_type == TTM_PL_VRAM &&
-           (ttm_to_amdgpu_bo(bo)->flags &
-            AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+           (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
  
                r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@@ -742,8 -835,8 +835,8 @@@ static void amdgpu_ttm_io_mem_free(stru
  static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
                                           unsigned long page_offset)
  {
+       uint64_t offset = (page_offset << PAGE_SHIFT);
        struct drm_mm_node *mm;
-       unsigned long offset = (page_offset << PAGE_SHIFT);
  
        mm = amdgpu_find_mm_node(&bo->mem, &offset);
        return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
@@@ -770,6 -863,7 +863,6 @@@ struct amdgpu_ttm_tt 
  static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
        (1 << 0), /* HMM_PFN_VALID */
        (1 << 1), /* HMM_PFN_WRITE */
 -      0 /* HMM_PFN_DEVICE_PRIVATE */
  };
  
  static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
@@@ -850,7 -944,7 +943,7 @@@ retry
        range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
  
        down_read(&mm->mmap_sem);
 -      r = hmm_range_fault(range, 0);
 +      r = hmm_range_fault(range);
        up_read(&mm->mmap_sem);
        if (unlikely(r <= 0)) {
                /*
@@@ -1027,6 -1121,9 +1120,9 @@@ int amdgpu_ttm_gart_bind(struct amdgpu_
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
  
+       if (amdgpu_bo_encrypted(abo))
+               flags |= AMDGPU_PTE_TMZ;
        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
  
@@@ -1539,6 -1636,9 +1635,9 @@@ static bool amdgpu_ttm_bo_eviction_valu
  
        switch (bo->mem.mem_type) {
        case TTM_PL_TT:
+               if (amdgpu_bo_is_amdgpu_bo(bo) &&
+                   amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+                       return false;
                return true;
  
        case TTM_PL_VRAM:
@@@ -1587,8 -1687,9 +1686,9 @@@ static int amdgpu_ttm_access_memory(str
        if (bo->mem.mem_type != TTM_PL_VRAM)
                return -EIO;
  
-       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
-       pos = (nodes->start << PAGE_SHIFT) + offset;
+       pos = offset;
+       nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
+       pos += (nodes->start << PAGE_SHIFT);
  
        while (len && pos < adev->gmc.mc_vram_size) {
                uint64_t aligned_pos = pos & ~(uint64_t)3;
@@@ -2015,76 -2116,14 +2115,14 @@@ int amdgpu_mmap(struct file *filp, stru
        return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
  }
  
- static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *mem, unsigned num_pages,
-                            uint64_t offset, unsigned window,
-                            struct amdgpu_ring *ring,
-                            uint64_t *addr)
- {
-       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
-       struct amdgpu_device *adev = ring->adev;
-       struct ttm_tt *ttm = bo->ttm;
-       struct amdgpu_job *job;
-       unsigned num_dw, num_bytes;
-       dma_addr_t *dma_address;
-       struct dma_fence *fence;
-       uint64_t src_addr, dst_addr;
-       uint64_t flags;
-       int r;
-       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
-              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-       *addr = adev->gmc.gart_start;
-       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
-               AMDGPU_GPU_PAGE_SIZE;
-       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
-       num_bytes = num_pages * 8;
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
-                                                                       AMDGPU_IB_POOL_NORMAL, &job);
-       if (r)
-               return r;
-       src_addr = num_dw * 4;
-       src_addr += job->ibs[0].gpu_addr;
-       dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
-       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
-       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
-                               dst_addr, num_bytes);
-       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-       WARN_ON(job->ibs[0].length_dw > num_dw);
-       dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
-       flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
-       r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
-                           &job->ibs[0].ptr[num_dw]);
-       if (r)
-               goto error_free;
-       r = amdgpu_job_submit(job, &adev->mman.entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
-       if (r)
-               goto error_free;
-       dma_fence_put(fence);
-       return r;
- error_free:
-       amdgpu_job_free(job);
-       return r;
- }
  int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
-                      bool vm_needs_flush)
+                      bool vm_needs_flush, bool tmz)
  {
+       enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+               AMDGPU_IB_POOL_DELAYED;
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
  
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
-                       direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
        if (r)
                return r;
  
                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  
                amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
-                                       dst_offset, cur_size_in_bytes);
+                                       dst_offset, cur_size_in_bytes, tmz);
  
                src_offset += cur_size_in_bytes;
                dst_offset += cur_size_in_bytes;
@@@ -2192,7 -2230,8 +2229,8 @@@ int amdgpu_fill_buffer(struct amdgpu_b
        /* for IB padding */
        num_dw += 64;
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+                                    &job);
        if (r)
                return r;
  
index 6b22dc41ef13ab2395fc575e38917058009f024b,11c0e79e71063481a7612069965d72e68a3ae07e..4351d02644a7bef01177d8e0d57bf9cb72cb56a1
@@@ -24,9 -24,8 +24,9 @@@
  #ifndef __AMDGPU_TTM_H__
  #define __AMDGPU_TTM_H__
  
 -#include "amdgpu.h"
 +#include <linux/dma-direction.h>
  #include <drm/gpu_scheduler.h>
 +#include "amdgpu.h"
  
  #define AMDGPU_PL_GDS         (TTM_PL_PRIV + 0)
  #define AMDGPU_PL_GWS         (TTM_PL_PRIV + 1)
@@@ -75,15 -74,6 +75,15 @@@ uint64_t amdgpu_gtt_mgr_usage(struct tt
  int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
  
  u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
 +int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
 +                            struct ttm_mem_reg *mem,
 +                            struct device *dev,
 +                            enum dma_data_direction dir,
 +                            struct sg_table **sgt);
 +void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
 +                            struct device *dev,
 +                            enum dma_data_direction dir,
 +                            struct sg_table *sgt);
  uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
  uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
  
@@@ -97,11 -87,11 +97,11 @@@ int amdgpu_copy_buffer(struct amdgpu_ri
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
-                      bool vm_needs_flush);
+                      bool vm_needs_flush, bool tmz);
  int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
-                              struct amdgpu_copy_mem *src,
-                              struct amdgpu_copy_mem *dst,
-                              uint64_t size,
+                              const struct amdgpu_copy_mem *src,
+                              const struct amdgpu_copy_mem *dst,
+                              uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f);
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
index 3e99f31b4bd03bd42e0acddf366e110113d5e8fd,c0ca9a8229e116dec09c88286e17dab65e053333..ebd723a0bcfced12ea9d5e9beb2ecd744e6eb775
@@@ -691,7 -691,7 +691,7 @@@ static uint64_t sdma_v4_0_ring_get_wptr
  }
  
  /**
 - * sdma_v4_0_ring_set_wptr - commit the write pointer
 + * sdma_v4_0_page_ring_set_wptr - commit the write pointer
   *
   * @ring: amdgpu ring pointer
   *
@@@ -987,7 -987,7 +987,7 @@@ static void sdma_v4_0_page_stop(struct 
  }
  
  /**
 - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
 + * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
   *
   * @adev: amdgpu_device pointer
   * @enable: enable/disable the DMA MEs context switch.
@@@ -2458,10 -2458,12 +2458,12 @@@ static void sdma_v4_0_set_irq_funcs(str
  static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
                                       uint64_t src_offset,
                                       uint64_t dst_offset,
-                                      uint32_t byte_count)
+                                      uint32_t byte_count,
+                                      bool tmz)
  {
        ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+               SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+               SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
        ib->ptr[ib->length_dw++] = byte_count - 1;
        ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
index 4c8b1bc989135e88f53dea7602e4b29cc4138594,98f39db81c7bd5c6fb647798c423f4871396e358..71309ee3aca39f3aaa84b8da368aa03747c78c26
@@@ -3309,7 -3309,7 +3309,7 @@@ static int fill_dc_scaling_info(const s
  }
  
  static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
-                      uint64_t *tiling_flags)
+                      uint64_t *tiling_flags, bool *tmz_surface)
  {
        struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
        int r = amdgpu_bo_reserve(rbo, false);
        if (tiling_flags)
                amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
  
+       if (tmz_surface)
+               *tmz_surface = amdgpu_bo_encrypted(rbo);
        amdgpu_bo_unreserve(rbo);
  
        return r;
@@@ -3411,6 -3414,7 +3414,7 @@@ fill_plane_buffer_attributes(struct amd
                             struct plane_size *plane_size,
                             struct dc_plane_dcc_param *dcc,
                             struct dc_plane_address *address,
+                            bool tmz_surface,
                             bool force_disable_dcc)
  {
        const struct drm_framebuffer *fb = &afb->base;
        memset(dcc, 0, sizeof(*dcc));
        memset(address, 0, sizeof(*address));
  
+       address->tmz_surface = tmz_surface;
        if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
                plane_size->surface_size.x = 0;
                plane_size->surface_size.y = 0;
@@@ -3611,6 -3617,7 +3617,7 @@@ fill_dc_plane_info_and_addr(struct amdg
                            const uint64_t tiling_flags,
                            struct dc_plane_info *plane_info,
                            struct dc_plane_address *address,
+                           bool tmz_surface,
                            bool force_disable_dcc)
  {
        const struct drm_framebuffer *fb = plane_state->fb;
                                           plane_info->rotation, tiling_flags,
                                           &plane_info->tiling_info,
                                           &plane_info->plane_size,
-                                          &plane_info->dcc, address,
+                                          &plane_info->dcc, address, tmz_surface,
                                           force_disable_dcc);
        if (ret)
                return ret;
@@@ -3717,6 -3724,7 +3724,7 @@@ static int fill_dc_plane_attributes(str
        struct dc_plane_info plane_info;
        uint64_t tiling_flags;
        int ret;
+       bool tmz_surface = false;
        bool force_disable_dcc = false;
  
        ret = fill_dc_scaling_info(plane_state, &scaling_info);
        dc_plane_state->clip_rect = scaling_info.clip_rect;
        dc_plane_state->scaling_quality = scaling_info.scaling_quality;
  
-       ret = get_fb_info(amdgpu_fb, &tiling_flags);
+       ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
        if (ret)
                return ret;
  
        ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
                                          &plane_info,
                                          &dc_plane_state->address,
+                                         tmz_surface,
                                          force_disable_dcc);
        if (ret)
                return ret;
@@@ -4736,7 -4745,6 +4745,7 @@@ amdgpu_dm_connector_atomic_duplicate_st
  static int
  amdgpu_dm_connector_late_register(struct drm_connector *connector)
  {
 +#if defined(CONFIG_DEBUG_FS)
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
        int r;
                        return r;
        }
  
 -#if defined(CONFIG_DEBUG_FS)
        connector_debugfs_init(amdgpu_dm_connector);
  #endif
  
@@@ -5354,6 -5363,7 +5363,7 @@@ static int dm_plane_helper_prepare_fb(s
        uint64_t tiling_flags;
        uint32_t domain;
        int r;
+       bool tmz_surface = false;
        bool force_disable_dcc = false;
  
        dm_plane_state_old = to_dm_plane_state(plane->state);
  
        amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
  
+       tmz_surface = amdgpu_bo_encrypted(rbo);
        ttm_eu_backoff_reservation(&ticket, &list);
  
        afb->address = amdgpu_bo_gpu_offset(rbo);
                        adev, afb, plane_state->format, plane_state->rotation,
                        tiling_flags, &plane_state->tiling_info,
                        &plane_state->plane_size, &plane_state->dcc,
-                       &plane_state->address,
+                       &plane_state->address, tmz_surface,
                        force_disable_dcc);
        }
  
@@@ -6592,6 -6604,7 +6604,7 @@@ static void amdgpu_dm_commit_planes(str
        unsigned long flags;
        struct amdgpu_bo *abo;
        uint64_t tiling_flags;
+       bool tmz_surface = false;
        uint32_t target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
        bool pflip_present = false;
  
                amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
  
+               tmz_surface = amdgpu_bo_encrypted(abo);
                amdgpu_bo_unreserve(abo);
  
                fill_dc_plane_info_and_addr(
                        dm->adev, new_plane_state, tiling_flags,
                        &bundle->plane_infos[planes_count],
                        &bundle->flip_addrs[planes_count].address,
+                       tmz_surface,
                        false);
  
                DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
@@@ -8065,6 -8081,7 +8081,7 @@@ dm_determine_update_type_for_commit(str
                        struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
                        struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
                        uint64_t tiling_flags;
+                       bool tmz_surface = false;
  
                        new_plane_crtc = new_plane_state->crtc;
                        new_dm_plane_state = to_dm_plane_state(new_plane_state);
                        bundle->surface_updates[num_plane].scaling_info = scaling_info;
  
                        if (amdgpu_fb) {
-                               ret = get_fb_info(amdgpu_fb, &tiling_flags);
+                               ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
                                if (ret)
                                        goto cleanup;
  
                                ret = fill_dc_plane_info_and_addr(
                                        dm->adev, new_plane_state, tiling_flags,
                                        plane_info,
-                                       &flip_addr->address,
+                                       &flip_addr->address, tmz_surface,
                                        false);
                                if (ret)
                                        goto cleanup;
index d5b306384d790c28e7cf1027f9eadc867d0448fb,f9fa0f7712b3d33df673e0b9cd87275d15e4ca8f..9ef9e50a34faaa11ac41151b954c70674f0a380d
@@@ -2521,7 -2521,7 +2521,7 @@@ static void dp_test_send_phy_test_patte
        /* get phy test pattern and pattern parameters from DP receiver */
        core_link_read_dpcd(
                        link,
 -                      DP_TEST_PHY_PATTERN,
 +                      DP_PHY_TEST_PATTERN,
                        &dpcd_test_pattern.raw,
                        sizeof(dpcd_test_pattern));
        core_link_read_dpcd(
@@@ -4231,7 -4231,7 +4231,7 @@@ void dpcd_set_source_specific_data(stru
  {
        const uint32_t post_oui_delay = 30; // 30ms
        uint8_t dspc = 0;
-       enum dc_status ret = DC_ERROR_UNEXPECTED;
+       enum dc_status ret;
  
        ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
                                  sizeof(dspc));
index 0045b54b19ed9b35efe16ad5ebfe005beb7c1e52,3e1b3ed8a05e8a613f608d266ee52768f31c16b8..d5a3487ccfacfd2c4fbadba16c66484af9578bd0
@@@ -57,7 -57,7 +57,7 @@@ static int smu_v11_0_send_msg_without_w
                                              uint16_t msg)
  {
        struct amdgpu_device *adev = smu->adev;
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
        return 0;
  }
  
@@@ -65,7 -65,7 +65,7 @@@ static int smu_v11_0_read_arg(struct sm
  {
        struct amdgpu_device *adev = smu->adev;
  
-       *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+       *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
        return 0;
  }
  
@@@ -75,7 -75,7 +75,7 @@@ static int smu_v11_0_wait_for_response(
        uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
  
        for (i = 0; i < timeout; i++) {
-               cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+               cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
                        return cur_value == 0x1 ? 0 : -EIO;
  
        }
  
        /* timeout means wrong logic */
-       return -ETIME;
+       if (i == timeout)
+               return -ETIME;
+       return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
  }
  
  int
@@@ -107,9 -110,9 +110,9 @@@ smu_v11_0_send_msg_with_param(struct sm
                goto out;
        }
  
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
  
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+       WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
  
        smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
  
                       smu_get_message_name(smu, msg), index, param, ret);
                goto out;
        }
        if (read_arg) {
                ret = smu_v11_0_read_arg(smu, read_arg);
                if (ret) {
@@@ -728,8 -732,9 +732,9 @@@ int smu_v11_0_parse_pptable(struct smu_
        struct smu_table_context *table_context = &smu->smu_table;
        struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
  
+       /* during TDR we need to free and alloc the pptable */
        if (table_context->driver_pptable)
-               return -EINVAL;
+               kfree(table_context->driver_pptable);
  
        table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
  
@@@ -769,6 -774,9 +774,9 @@@ int smu_v11_0_set_deep_sleep_dcefclk(st
  {
        int ret;
  
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
        ret = smu_send_smc_msg_with_param(smu,
                                          SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
        if (ret)
@@@ -812,6 -820,9 +820,9 @@@ int smu_v11_0_set_tool_table_location(s
        int ret = 0;
        struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
  
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
        if (tool_table->mc_address) {
                ret = smu_send_smc_msg_with_param(smu,
                                SMU_MSG_SetToolsDramAddrHigh,
@@@ -831,6 -842,12 +842,12 @@@ int smu_v11_0_init_display_count(struc
  {
        int ret = 0;
  
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+       if (!smu->pm_enabled)
+               return ret;
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
        return ret;
  }
@@@ -842,6 -859,9 +859,9 @@@ int smu_v11_0_set_allowed_mask(struct s
        int ret = 0;
        uint32_t feature_mask[2];
  
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
        mutex_lock(&feature->mutex);
        if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
                goto failed;
@@@ -870,6 -890,9 +890,9 @@@ int smu_v11_0_get_enabled_mask(struct s
        struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
  
+       if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
+               return 0;
        if (!feature_mask || num < 2)
                return -EINVAL;
  
@@@ -925,6 -948,12 +948,12 @@@ int smu_v11_0_notify_display_change(str
  {
        int ret = 0;
  
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
+       if (!smu->pm_enabled)
+               return ret;
        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
            smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@@ -1084,6 -1113,9 +1113,9 @@@ int smu_v11_0_set_power_limit(struct sm
        int ret = 0;
        uint32_t max_power_limit;
  
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
        max_power_limit = smu_v11_0_get_max_power_limit(smu);
  
        if (n > max_power_limit) {
@@@ -1700,12 -1732,6 +1732,12 @@@ int smu_v11_0_baco_set_state(struct smu
                if (ret)
                        goto out;
  
 +              if (ras && ras->supported) {
 +                      ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
 +                      if (ret)
 +                              goto out;
 +              }
 +
                /* clear vbios scratch 6 and 7 for coming asic reinit */
                WREG32(adev->bios_scratch_reg_offset + 6, 0);
                WREG32(adev->bios_scratch_reg_offset + 7, 0);
@@@ -1815,6 -1841,9 +1847,9 @@@ int smu_v11_0_override_pcie_parameters(
        uint32_t pcie_gen = 0, pcie_width = 0;
        int ret;
  
+       if (amdgpu_sriov_vf(smu->adev))
+               return 0;
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
                pcie_gen = 3;
        else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)