Merge tag 'drm-xe-fixes-2024-04-18' of https://gitlab.freedesktop.org/drm/xe/kernel...
authorDave Airlie <airlied@redhat.com>
Fri, 19 Apr 2024 00:40:04 +0000 (10:40 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 19 Apr 2024 00:40:47 +0000 (10:40 +1000)
- Fix bo leak on error path during fb init
- Fix use-after-free due to order vm is put and destroyed

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/xjguifyantaibyrnymuiotxws6akiexi6r7tqyieqxgquovubc@kkrtbe24hjjr
28 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/panel/panel-novatek-nt36672e.c
drivers/gpu/drm/panel/panel-visionox-rm69299.c
drivers/gpu/drm/radeon/pptable.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/v3d/v3d_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c

index 0a4b09709cfb149078c6284f2a0908cbde928430..ec888fc6ead8df0ce52ec00439e5f22ca7f4e9ff 100644 (file)
@@ -819,7 +819,7 @@ retry:
 
        p->bytes_moved += ctx.bytes_moved;
        if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
-           amdgpu_bo_in_cpu_visible_vram(bo))
+           amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                p->bytes_moved_vis += ctx.bytes_moved;
 
        if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
index 010b0cb7693c9c3be5608f192cb19e583a285893..2099159a693fa02e7c508c3aecdc9f695498cecd 100644 (file)
@@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
                return r;
 
        if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
-           bo->tbo.resource->mem_type == TTM_PL_VRAM &&
-           amdgpu_bo_in_cpu_visible_vram(bo))
+           amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
                                             ctx.bytes_moved);
        else
@@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
                          struct amdgpu_mem_stats *stats)
 {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_resource *res = bo->tbo.resource;
        uint64_t size = amdgpu_bo_size(bo);
        struct drm_gem_object *obj;
        unsigned int domain;
        bool shared;
 
        /* Abort if the BO doesn't currently have a backing store */
-       if (!bo->tbo.resource)
+       if (!res)
                return;
 
        obj = &bo->tbo.base;
        shared = drm_gem_object_is_shared_for_memory_stats(obj);
 
-       domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+       domain = amdgpu_mem_type_to_domain(res->mem_type);
        switch (domain) {
        case AMDGPU_GEM_DOMAIN_VRAM:
                stats->vram += size;
-               if (amdgpu_bo_in_cpu_visible_vram(bo))
+               if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                        stats->visible_vram += size;
                if (shared)
                        stats->vram_shared += size;
@@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        /* Remember that this BO was accessed by the CPU */
        abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
-       if (bo->resource->mem_type != TTM_PL_VRAM)
-               return 0;
-
-       if (amdgpu_bo_in_cpu_visible_vram(abo))
+       if (amdgpu_res_cpu_visible(adev, bo->resource))
                return 0;
 
        /* Can't move a pinned BO to visible VRAM */
@@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
        /* this should never happen */
        if (bo->resource->mem_type == TTM_PL_VRAM &&
-           !amdgpu_bo_in_cpu_visible_vram(abo))
+           !amdgpu_res_cpu_visible(adev, bo->resource))
                return VM_FAULT_SIGBUS;
 
        ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
  */
 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
 {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct dma_buf_attachment *attachment;
        struct dma_buf *dma_buf;
        const char *placement;
@@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
 
        if (dma_resv_trylock(bo->tbo.base.resv)) {
                unsigned int domain;
+
                domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
                switch (domain) {
                case AMDGPU_GEM_DOMAIN_VRAM:
-                       if (amdgpu_bo_in_cpu_visible_vram(bo))
+                       if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                                placement = "VRAM VISIBLE";
                        else
                                placement = "VRAM";
index be679c42b0b8cb5d127910803e79593910c72952..fa03d9e4874cc65b39e038014ab15fc4e58ba858 100644 (file)
@@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
        return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
 }
 
-/**
- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
- */
-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
-{
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct amdgpu_res_cursor cursor;
-
-       if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
-               return false;
-
-       amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
-       while (cursor.remaining) {
-               if (cursor.start < adev->gmc.visible_vram_size)
-                       return true;
-
-               amdgpu_res_next(&cursor, cursor.size);
-       }
-
-       return false;
-}
-
 /**
  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
  */
index fc418e670fdae27b699bdbefce8051ab128ab76c..1d71729e3f6bcef2c02f9e1ce252dc6cd6461b94 100644 (file)
@@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 
                } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
                           !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
-                          amdgpu_bo_in_cpu_visible_vram(abo)) {
+                          amdgpu_res_cpu_visible(adev, bo->resource)) {
 
                        /* Try evicting to the CPU inaccessible part of VRAM
                         * first, but only set GTT as busy placement, so this
@@ -403,40 +403,55 @@ error:
        return r;
 }
 
-/*
- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
  *
- * Called by amdgpu_bo_move()
+ * Returns: true if the full resource is CPU visible, false otherwise.
  */
-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
-                              struct ttm_resource *mem)
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+                           struct ttm_resource *res)
 {
-       u64 mem_size = (u64)mem->size;
        struct amdgpu_res_cursor cursor;
-       u64 end;
 
-       if (mem->mem_type == TTM_PL_SYSTEM ||
-           mem->mem_type == TTM_PL_TT)
+       if (!res)
+               return false;
+
+       if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+           res->mem_type == AMDGPU_PL_PREEMPT)
                return true;
-       if (mem->mem_type != TTM_PL_VRAM)
+
+       if (res->mem_type != TTM_PL_VRAM)
                return false;
 
-       amdgpu_res_first(mem, 0, mem_size, &cursor);
-       end = cursor.start + cursor.size;
+       amdgpu_res_first(res, 0, res->size, &cursor);
        while (cursor.remaining) {
+               if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
+                       return false;
                amdgpu_res_next(&cursor, cursor.size);
+       }
 
-               if (!cursor.remaining)
-                       break;
+       return true;
+}
 
-               /* ttm_resource_ioremap only supports contiguous memory */
-               if (end != cursor.start)
-                       return false;
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+                               struct ttm_resource *mem)
+{
+       if (!amdgpu_res_cpu_visible(adev, mem))
+               return false;
 
-               end = cursor.start + cursor.size;
-       }
+       /* ttm_resource_ioremap only supports contiguous memory */
+       if (mem->mem_type == TTM_PL_VRAM &&
+           !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+               return false;
 
-       return end <= adev->gmc.visible_vram_size;
+       return true;
 }
 
 /*
@@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 
        if (r) {
                /* Check that all memory is CPU accessible */
-               if (!amdgpu_mem_visible(adev, old_mem) ||
-                   !amdgpu_mem_visible(adev, new_mem)) {
+               if (!amdgpu_res_copyable(adev, old_mem) ||
+                   !amdgpu_res_copyable(adev, new_mem)) {
                        pr_err("Move buffer fallback to memcpy unavailable\n");
                        return r;
                }
@@ -557,7 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
                                     struct ttm_resource *mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       size_t bus_size = (size_t)mem->size;
 
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
@@ -568,9 +582,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
                break;
        case TTM_PL_VRAM:
                mem->bus.offset = mem->start << PAGE_SHIFT;
-               /* check if it's visible */
-               if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
-                       return -EINVAL;
 
                if (adev->mman.aper_base_kaddr &&
                    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
index 65ec82141a8e012e8ba42b0bb627f1a4f504c465..32cf6b6f6efd96873c294648714f2c78f6ff9ec3 100644 (file)
@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
                                      uint64_t start);
 
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+                           struct ttm_resource *res);
+
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
index 4299ce386322e7cea27232ae05a1222f62f5a850..94089069c9ada61aa61b7c2b28601b764d47c172 100644 (file)
@@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
        trace_amdgpu_vm_bo_map(bo_va, mapping);
 }
 
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+                                         struct amdgpu_bo *bo,
+                                         uint64_t saddr,
+                                         uint64_t offset,
+                                         uint64_t size)
+{
+       uint64_t tmp, lpfn;
+
+       if (saddr & AMDGPU_GPU_PAGE_MASK
+           || offset & AMDGPU_GPU_PAGE_MASK
+           || size & AMDGPU_GPU_PAGE_MASK)
+               return -EINVAL;
+
+       if (check_add_overflow(saddr, size, &tmp)
+           || check_add_overflow(offset, size, &tmp)
+           || size == 0 /* which also leads to end < begin */)
+               return -EINVAL;
+
+       /* make sure object fit at this offset */
+       if (bo && offset + size > amdgpu_bo_size(bo))
+               return -EINVAL;
+
+       /* Ensure last pfn not exceed max_pfn */
+       lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+       if (lpfn >= adev->vm_manager.max_pfn)
+               return -EINVAL;
+
+       return 0;
+}
+
 /**
  * amdgpu_vm_bo_map - map bo inside a vm
  *
@@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        struct amdgpu_bo *bo = bo_va->base.bo;
        struct amdgpu_vm *vm = bo_va->base.vm;
        uint64_t eaddr;
+       int r;
 
-       /* validate the parameters */
-       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
-               return -EINVAL;
-       if (saddr + size <= saddr || offset + size <= offset)
-               return -EINVAL;
-
-       /* make sure object fit at this offset */
-       eaddr = saddr + size - 1;
-       if ((bo && offset + size > amdgpu_bo_size(bo)) ||
-           (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
-               return -EINVAL;
+       r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+       if (r)
+               return r;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-       eaddr /= AMDGPU_GPU_PAGE_SIZE;
+       eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
        tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
        if (tmp) {
@@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        uint64_t eaddr;
        int r;
 
-       /* validate the parameters */
-       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
-               return -EINVAL;
-       if (saddr + size <= saddr || offset + size <= offset)
-               return -EINVAL;
-
-       /* make sure object fit at this offset */
-       eaddr = saddr + size - 1;
-       if ((bo && offset + size > amdgpu_bo_size(bo)) ||
-           (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
-               return -EINVAL;
+       r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+       if (r)
+               return r;
 
        /* Allocate all the needed memory */
        mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        }
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-       eaddr /= AMDGPU_GPU_PAGE_SIZE;
+       eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
        mapping->start = saddr;
        mapping->last = eaddr;
@@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
        struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
        LIST_HEAD(removed);
        uint64_t eaddr;
+       int r;
+
+       r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+       if (r)
+               return r;
 
-       eaddr = saddr + size - 1;
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-       eaddr /= AMDGPU_GPU_PAGE_SIZE;
+       eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
 
        /* Allocate all the needed memory */
        before = kzalloc(sizeof(*before), GFP_KERNEL);
index 717a60d7a4ea953b8dfc369b09d855ad74b49659..b79986412cd839bc89741a0b3bc1986daa2b10e4 100644 (file)
@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
        mutex_lock(&kfd_processes_mutex);
 
        if (kfd_is_locked()) {
-               mutex_unlock(&kfd_processes_mutex);
                pr_debug("KFD is locked! Cannot create process");
-               return ERR_PTR(-EINVAL);
+               process = ERR_PTR(-EINVAL);
+               goto out;
        }
 
        /* A prior open of /dev/kfd could have already created the process. */
index e224a028d68accaf083a76a93eb7f0cdb940aedf..8a0460e86309775e83775093b04527f022e4a91c 100644 (file)
@@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
        enc10->base.hpd_source = init_data->hpd_source;
        enc10->base.connector = init_data->connector;
 
-       enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
-
-       enc10->base.features = *enc_features;
        if (enc10->base.connector.id == CONNECTOR_ID_USBC)
                enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 
-       if (enc10->base.connector.id == CONNECTOR_ID_USBC)
-               enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+       enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+       enc10->base.features = *enc_features;
 
        enc10->base.transmitter = init_data->transmitter;
 
index 81e349d5835bbed499f03ef6eb33e5210c83d64b..da94e5309fbaf0f8e06a4a1aad4ce431a8d9f2cc 100644 (file)
@@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
        enc10->base.hpd_source = init_data->hpd_source;
        enc10->base.connector = init_data->connector;
 
+       if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+               enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 
        enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
 
@@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
        }
 
        enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
-       if (enc10->base.connector.id == CONNECTOR_ID_USBC)
-               enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 
        if (bp_funcs->get_connector_speed_cap_info)
                result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
index 479effcf607e261fac73361958a0a855cf90d315..79cfab53f80e259093b7ae0f04310f6470a3c930 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include "nouveau_drv.h"
+#include "nouveau_bios.h"
 #include "nouveau_reg.h"
 #include "dispnv04/hw.h"
 #include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
         */
        if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
                if (*conn == 0xf2005014 && *conf == 0xffffffff) {
-                       fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+                       fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
                        return false;
                }
        }
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
 #ifdef __powerpc__
        /* Apple iMac G4 NV17 */
        if (of_machine_is_compatible("PowerMac4,5")) {
-               fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
-               fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+               fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+               fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
                return;
        }
 #endif
 
        /* Make up some sane defaults */
        fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
-                            bios->legacy.i2c_indices.crt, 1, 1);
+                            bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
 
        if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
                fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
                                     bios->legacy.i2c_indices.tv,
-                                    all_heads, 0);
+                                    all_heads, DCB_OUTPUT_A);
 
        else if (bios->tmds.output0_script_ptr ||
                 bios->tmds.output1_script_ptr)
                fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
                                     bios->legacy.i2c_indices.panel,
-                                    all_heads, 1);
+                                    all_heads, DCB_OUTPUT_B);
 }
 
 static int
index 7de7707ec6a895ee2a914150008425a21041bf9c..a72c45809484ab58023dfa0dc5172f67adcfdc23 100644 (file)
@@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
        u8 *dpcd = nv_encoder->dp.dpcd;
        int ret = NOUVEAU_DP_NONE, hpd;
 
-       /* If we've already read the DPCD on an eDP device, we don't need to
-        * reread it as it won't change
+       /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
+        * haven't probed them once before.
         */
-       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
-           dpcd[DP_DPCD_REV] != 0)
-               return NOUVEAU_DP_SST;
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+               if (connector->status == connector_status_connected)
+                       return NOUVEAU_DP_SST;
+               else if (connector->status == connector_status_disconnected)
+                       return NOUVEAU_DP_NONE;
+       }
+
+       // Ensure that the aux bus is enabled for probing
+       drm_dp_dpcd_set_powered(&nv_connector->aux, true);
 
        mutex_lock(&nv_encoder->dp.hpd_irq_lock);
        if (mstm) {
@@ -293,6 +299,13 @@ out:
        if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
                nv50_mstm_remove(mstm);
 
+       /* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
+        * and since we don't really have a usecase for that anyway - just disable the aux bus here
+        * if we've decided the connector is disconnected
+        */
+       if (ret == NOUVEAU_DP_NONE)
+               drm_dp_dpcd_set_powered(&nv_connector->aux, false);
+
        mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
        return ret;
 }
index a7f3fc342d87e03b031b5008d939c2eb46f49404..dd5b5a17ece0beed225888888d6c01a0afcf67c9 100644 (file)
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
        void __iomem *map = NULL;
 
        /* Already mapped? */
-       if (refcount_inc_not_zero(&iobj->maps))
+       if (refcount_inc_not_zero(&iobj->maps)) {
+               /* read barrier match the wmb on refcount set */
+               smp_rmb();
                return iobj->map;
+       }
 
        /* Take the lock, and re-check that another thread hasn't
         * already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
                        iobj->base.memory.ptrs = &nv50_instobj_fast;
                else
                        iobj->base.memory.ptrs = &nv50_instobj_slow;
+               /* barrier to ensure the ptrs are written before refcount is set */
+               smp_wmb();
                refcount_set(&iobj->maps, 1);
        }
 
index cb7406d7446695ebd3566230f3e11fca3b4cc323..c39fe0fc5d69c646915561bc3d4cb5cfc5411ac1 100644 (file)
@@ -614,8 +614,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
        struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
 
        mipi_dsi_detach(ctx->dsi);
-       mipi_dsi_device_unregister(ctx->dsi);
-
        drm_panel_remove(&ctx->panel);
 }
 
index 775144695283f54dcb1c527e58c9604cfd6da207..b15ca56a09a74a06f8bfcd0b4053d554ced9b58d 100644 (file)
@@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
        struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
 
        mipi_dsi_detach(ctx->dsi);
-       mipi_dsi_device_unregister(ctx->dsi);
-
        drm_panel_remove(&ctx->panel);
 }
 
index 94947229888ba7888aa6992116af8ab985219dbe..b7f22597ee95e798bb104894052997e332c298c6 100644 (file)
@@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
 typedef struct _ATOM_PPLIB_STATE_V2
 {
       //number of valid dpm levels in this state; Driver uses it to calculate the whole 
-      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
       UCHAR ucNumDPMLevels;
       
       //a index to the array of nonClockInfos
@@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
       /**
       * Driver will read the first ucNumDPMLevels in this array
       */
-      UCHAR clockInfoIndex[1];
+      UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
 } ATOM_PPLIB_STATE_V2;
 
 typedef struct _StateArray{
     //how many states we have 
     UCHAR ucNumEntries;
     
-    ATOM_PPLIB_STATE_V2 states[1];
+    ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
 }StateArray;
 
 
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
     //sizeof(ATOM_PPLIB_CLOCK_INFO)
     UCHAR ucEntrySize;
     
-    UCHAR clockInfo[1];
+    UCHAR clockInfo[] __counted_by(ucNumEntries);
 }ClockInfoArray;
 
 typedef struct _NonClockInfoArray{
@@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
     //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
     UCHAR ucEntrySize;
     
-    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
 }NonClockInfoArray;
 
 typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
index bb1f0a3371ab5de484a81ad040347c9a5a8d4e76..10793a433bf58697fcdfce8e850ebfdd55ec7284 100644 (file)
@@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
                max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
 
        for (i = 0; i < max_device; i++) {
-               ATOM_CONNECTOR_INFO_I2C ci =
-                   supported_devices->info.asConnInfo[i];
+               ATOM_CONNECTOR_INFO_I2C ci;
+
+               if (frev > 1)
+                       ci = supported_devices->info_2d1.asConnInfo[i];
+               else
+                       ci = supported_devices->info.asConnInfo[i];
 
                bios_connectors[i].valid = false;
 
index 112438d965ffbefd4fa2cce5f246cc03a63759f9..6e1fd6985ffcb730eb7057c4509aec971dfa8266 100644 (file)
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
                                                  enum ttm_caching caching,
                                                  unsigned int order)
 {
-       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+       if (pool->use_dma_alloc)
                return &pool->caching[caching].orders[order];
 
 #ifdef CONFIG_X86
        switch (caching) {
        case ttm_write_combined:
+               if (pool->nid != NUMA_NO_NODE)
+                       return &pool->caching[caching].orders[order];
+
                if (pool->use_dma32)
                        return &global_dma32_write_combined[order];
 
                return &global_write_combined[order];
        case ttm_uncached:
+               if (pool->nid != NUMA_NO_NODE)
+                       return &pool->caching[caching].orders[order];
+
                if (pool->use_dma32)
                        return &global_dma32_uncached[order];
 
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
        pool->use_dma_alloc = use_dma_alloc;
        pool->use_dma32 = use_dma32;
 
-       if (use_dma_alloc || nid != NUMA_NO_NODE) {
-               for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
-                       for (j = 0; j < NR_PAGE_ORDERS; ++j)
-                               ttm_pool_type_init(&pool->caching[i].orders[j],
-                                                  pool, i, j);
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+               for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+                       struct ttm_pool_type *pt;
+
+                       /* Initialize only pool types which are actually used */
+                       pt = ttm_pool_select_type(pool, i, j);
+                       if (pt != &pool->caching[i].orders[j])
+                               continue;
+
+                       ttm_pool_type_init(pt, pool, i, j);
+               }
        }
 }
 EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
 {
        unsigned int i, j;
 
-       if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
-               for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
-                       for (j = 0; j < NR_PAGE_ORDERS; ++j)
-                               ttm_pool_type_fini(&pool->caching[i].orders[j]);
+       for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+               for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+                       struct ttm_pool_type *pt;
+
+                       pt = ttm_pool_select_type(pool, i, j);
+                       if (pt != &pool->caching[i].orders[j])
+                               continue;
+
+                       ttm_pool_type_fini(pt);
+               }
        }
 
        /* We removed the pool types from the LRU, but we need to also make sure
index 2e04f6cb661e4f42eeaaef3c5d7fcdaee501d457..ce6b2fb341d1f8a85bab6f0ed19fd3ccde39757b 100644 (file)
@@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_BIN];
 
-               file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
                file->jobs_sent[V3D_BIN]++;
                v3d->queue[V3D_BIN].jobs_sent++;
 
@@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
 
-               file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
                file->jobs_sent[V3D_RENDER]++;
                v3d->queue[V3D_RENDER].jobs_sent++;
 
@@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_CSD];
 
-               file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
                file->jobs_sent[V3D_CSD]++;
                v3d->queue[V3D_CSD].jobs_sent++;
 
@@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
                struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
                u64 runtime = local_clock() - file->start_ns[V3D_TFU];
 
-               file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
                file->jobs_sent[V3D_TFU]++;
                v3d->queue[V3D_TFU].jobs_sent++;
 
index c52c7bf1485b1fa95b1e9ca3f1e05135167a8c5c..717d624e9a052298d5d5070551e909cc65ee0cc5 100644 (file)
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
                .no_wait_gpu = false
        };
        u32 j, initial_line = dst_offset / dst_stride;
-       struct vmw_bo_blit_line_data d;
+       struct vmw_bo_blit_line_data d = {0};
        int ret = 0;
+       struct page **dst_pages = NULL;
+       struct page **src_pages = NULL;
 
        /* Buffer objects need to be either pinned or reserved: */
        if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
                        return ret;
        }
 
+       if (!src->ttm->pages && src->ttm->sg) {
+               src_pages = kvmalloc_array(src->ttm->num_pages,
+                                          sizeof(struct page *), GFP_KERNEL);
+               if (!src_pages)
+                       return -ENOMEM;
+               ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+                                                src->ttm->num_pages);
+               if (ret)
+                       goto out;
+       }
+       if (!dst->ttm->pages && dst->ttm->sg) {
+               dst_pages = kvmalloc_array(dst->ttm->num_pages,
+                                          sizeof(struct page *), GFP_KERNEL);
+               if (!dst_pages) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+                                                dst->ttm->num_pages);
+               if (ret)
+                       goto out;
+       }
+
        d.mapped_dst = 0;
        d.mapped_src = 0;
        d.dst_addr = NULL;
        d.src_addr = NULL;
-       d.dst_pages = dst->ttm->pages;
-       d.src_pages = src->ttm->pages;
+       d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+       d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
        d.dst_num_pages = PFN_UP(dst->resource->size);
        d.src_num_pages = PFN_UP(src->resource->size);
        d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ out:
                kunmap_atomic(d.src_addr);
        if (d.dst_addr)
                kunmap_atomic(d.dst_addr);
+       if (src_pages)
+               kvfree(src_pages);
+       if (dst_pages)
+               kvfree(dst_pages);
 
        return ret;
 }
index bfd41ce3c8f4fca1f5a659e4513e08f72ea95966..e5eb21a471a6010aa956c811522956f27b99a096 100644 (file)
@@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
 {
        struct ttm_operation_ctx ctx = {
                .interruptible = params->bo_type != ttm_bo_type_kernel,
-               .no_wait_gpu = false
+               .no_wait_gpu = false,
+               .resv = params->resv,
        };
        struct ttm_device *bdev = &dev_priv->bdev;
        struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
 
        vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
        ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
-                                  &vmw_bo->placement, 0, &ctx, NULL,
-                                  NULL, destroy);
+                                  &vmw_bo->placement, 0, &ctx,
+                                  params->sg, params->resv, destroy);
        if (unlikely(ret))
                return ret;
 
index 0d496dc9c6af7a352c0432f50f4dd9be37448b5e..f349642e6190d6933031d08ccd7f353231f0f1da 100644 (file)
@@ -55,6 +55,8 @@ struct vmw_bo_params {
        enum ttm_bo_type bo_type;
        size_t size;
        bool pin;
+       struct dma_resv *resv;
+       struct sg_table *sg;
 };
 
 /**
index 0a304706e01322a6372727a27eb3fd0330471b31..58fb40c93100a84ec8b1dd769f35ab31c00bd0dc 100644 (file)
@@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
 
        .prime_fd_to_handle = vmw_prime_fd_to_handle,
        .prime_handle_to_fd = vmw_prime_handle_to_fd,
+       .gem_prime_import_sg_table = vmw_prime_import_sg_table,
 
        .fops = &vmwgfx_driver_fops,
        .name = VMWGFX_DRIVER_NAME,
index 12efecc17df664968056906da40cdd46b5665ddf..b019a1a1787af59e3fca37f560db905d7b2b6ab0 100644 (file)
@@ -1130,6 +1130,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
                                  struct drm_file *file_priv,
                                  uint32_t handle, uint32_t flags,
                                  int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+                                                struct dma_buf_attachment *attach,
+                                                struct sg_table *table);
 
 /*
  * MemoryOBject management -  vmwgfx_mob.c
index 12787bb9c111d10db997b9db650c6bb1069c26ef..d6bcaf078b1f40bbf75bdfb63fd1e00b7901e20f 100644 (file)
@@ -149,6 +149,38 @@ out_no_bo:
        return ret;
 }
 
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+                                                struct dma_buf_attachment *attach,
+                                                struct sg_table *table)
+{
+       int ret;
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct drm_gem_object *gem = NULL;
+       struct vmw_bo *vbo;
+       struct vmw_bo_params params = {
+               .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+               .busy_domain = VMW_BO_DOMAIN_SYS,
+               .bo_type = ttm_bo_type_sg,
+               .size = attach->dmabuf->size,
+               .pin = false,
+               .resv = attach->dmabuf->resv,
+               .sg = table,
+
+       };
+
+       dma_resv_lock(params.resv, NULL);
+
+       ret = vmw_bo_create(dev_priv, &params, &vbo);
+       if (ret != 0)
+               goto out_no_bo;
+
+       vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+       gem = &vbo->tbo.base;
+out_no_bo:
+       dma_resv_unlock(params.resv);
+       return gem;
+}
 
 int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp)
index cd4925346ed45a1c10ae4e9d9b13c0066c71f168..84ae4e10a2ebec20c52a7eb42ea5455a0d22dfa5 100644 (file)
@@ -933,6 +933,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
                             struct drm_atomic_state *state)
 {
+       struct vmw_private *vmw = vmw_priv(crtc->dev);
        struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
                                                                         crtc);
        struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -940,9 +941,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
        bool has_primary = new_state->plane_mask &
                           drm_plane_mask(crtc->primary);
 
-       /* We always want to have an active plane with an active CRTC */
-       if (has_primary != new_state->enable)
-               return -EINVAL;
+       /*
+        * This is fine in general, but broken userspace might expect
+        * some actual rendering so give a clue as why it's blank.
+        */
+       if (new_state->enable && !has_primary)
+               drm_dbg_driver(&vmw->drm,
+                              "CRTC without a primary plane will be blank.\n");
 
 
        if (new_state->connector_mask != connector_mask &&
index a94947b588e85f2c764aab60e11a84e59dd2a2ea..19a843da87b789b62279ecb9dccb8b2ddb19fe2f 100644 (file)
@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
 
 
 static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
-       DRM_FORMAT_XRGB1555,
-       DRM_FORMAT_RGB565,
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_XRGB1555,
 };
 
 static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
index 2d72a5ee7c0c710339d5d25c0a9376745a90f7af..c99cad444991579f6e665453b74f56cb35de2e15 100644 (file)
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
                           int fd, u32 *handle)
 {
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
 
-       return ttm_prime_fd_to_handle(tfile, fd, handle);
+       if (ret)
+               ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+       return ret;
 }
 
 int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
                           int *prime_fd)
 {
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+       int ret;
+
+       if (handle > VMWGFX_NUM_MOB)
+               ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+       else
+               ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+       return ret;
 }
index 4d23d0a70bcb7ef4901e9128b84cd7112ae8913a..621d98b376bbbc4b40cef6b9c6759b975610dd56 100644 (file)
@@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
        switch (dev_priv->map_mode) {
        case vmw_dma_map_bind:
        case vmw_dma_map_populate:
-               vsgt->sgt = &vmw_tt->sgt;
-               ret = sg_alloc_table_from_pages_segment(
-                       &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
-                       (unsigned long)vsgt->num_pages << PAGE_SHIFT,
-                       dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
-               if (ret)
-                       goto out_sg_alloc_fail;
+               if (vmw_tt->dma_ttm.page_flags  & TTM_TT_FLAG_EXTERNAL) {
+                       vsgt->sgt = vmw_tt->dma_ttm.sg;
+               } else {
+                       vsgt->sgt = &vmw_tt->sgt;
+                       ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+                               vsgt->pages, vsgt->num_pages, 0,
+                               (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+                               dma_get_max_seg_size(dev_priv->drm.dev),
+                               GFP_KERNEL);
+                       if (ret)
+                               goto out_sg_alloc_fail;
+               }
 
                ret = vmw_ttm_map_for_dma(vmw_tt);
                if (unlikely(ret != 0))
@@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
        return 0;
 
 out_map_fail:
-       sg_free_table(vmw_tt->vsgt.sgt);
-       vmw_tt->vsgt.sgt = NULL;
+       drm_warn(&dev_priv->drm, "VSG table map failed!");
+       sg_free_table(vsgt->sgt);
+       vsgt->sgt = NULL;
 out_sg_alloc_fail:
        return ret;
 }
@@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
 static int vmw_ttm_populate(struct ttm_device *bdev,
                            struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
-       int ret;
+       bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
 
-       /* TODO: maybe completely drop this ? */
        if (ttm_tt_is_populated(ttm))
                return 0;
 
-       ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+       if (external && ttm->sg)
+               return  drm_prime_sg_to_dma_addr_array(ttm->sg,
+                                                      ttm->dma_address,
+                                                      ttm->num_pages);
 
-       return ret;
+       return ttm_pool_alloc(&bdev->pool, ttm, ctx);
 }
 
 static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
 {
        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
                                                 dma_ttm);
+       bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+       if (external)
+               return;
 
        vmw_ttm_unbind(bdev, ttm);
 
@@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
 {
        struct vmw_ttm_tt *vmw_be;
        int ret;
+       bool external = bo->type == ttm_bo_type_sg;
 
        vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
        if (!vmw_be)
@@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
        vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
        vmw_be->mob = NULL;
 
-       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+       if (external)
+               page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
                ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
                                     ttm_cached);
        else