1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/dma-buf.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/set_memory.h>
10 #include <linux/xarray.h>
12 #include <drm/drm_cache.h>
13 #include <drm/drm_debugfs.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_utils.h>
21 #include "ivpu_mmu_context.h"
23 static const struct drm_gem_object_funcs ivpu_gem_funcs;
25 static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
28 "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
29 action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
30 (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
31 (bool)bo->base.base.import_attach);
35 * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
37 * This function pins physical memory pages, then maps the physical pages
38 * to IOMMU address space and finally updates the VPU MMU page tables
39 * to allow the VPU to translate VPU address to IOMMU address.
41 int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
43 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
46 mutex_lock(&bo->lock);
48 ivpu_dbg_bo(vdev, bo, "pin");
49 drm_WARN_ON(&vdev->drm, !bo->ctx);
51 if (!bo->mmu_mapped) {
52 struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
56 ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
60 ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
61 ivpu_bo_is_snooped(bo));
63 ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
66 bo->mmu_mapped = true;
70 mutex_unlock(&bo->lock);
76 ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
77 const struct ivpu_addr_range *range)
79 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
82 if (!drm_dev_enter(&vdev->drm, &idx))
85 mutex_lock(&bo->lock);
87 ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
90 bo->vpu_addr = bo->mm_node.start;
92 ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
95 ivpu_dbg_bo(vdev, bo, "alloc");
97 mutex_unlock(&bo->lock);
104 static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
106 struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
108 lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
110 if (bo->mmu_mapped) {
111 drm_WARN_ON(&vdev->drm, !bo->ctx);
112 drm_WARN_ON(&vdev->drm, !bo->vpu_addr);
113 drm_WARN_ON(&vdev->drm, !bo->base.sgt);
114 ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->base.sgt);
115 bo->mmu_mapped = false;
119 ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
123 if (bo->base.base.import_attach)
126 dma_resv_lock(bo->base.base.resv, NULL);
128 dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
129 sg_free_table(bo->base.sgt);
133 dma_resv_unlock(bo->base.base.resv);
136 void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
140 if (drm_WARN_ON(&vdev->drm, !ctx))
143 mutex_lock(&vdev->bo_list_lock);
144 list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
145 mutex_lock(&bo->lock);
146 if (bo->ctx == ctx) {
147 ivpu_dbg_bo(vdev, bo, "unbind");
148 ivpu_bo_unbind_locked(bo);
150 mutex_unlock(&bo->lock);
152 mutex_unlock(&vdev->bo_list_lock);
155 struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size)
159 if (size == 0 || !PAGE_ALIGNED(size))
160 return ERR_PTR(-EINVAL);
162 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
164 return ERR_PTR(-ENOMEM);
166 bo->base.base.funcs = &ivpu_gem_funcs;
167 bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
169 INIT_LIST_HEAD(&bo->bo_list_node);
170 mutex_init(&bo->lock);
172 return &bo->base.base;
175 static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
177 struct drm_gem_shmem_object *shmem;
180 switch (flags & DRM_IVPU_BO_CACHE_MASK) {
181 case DRM_IVPU_BO_CACHED:
185 return ERR_PTR(-EINVAL);
188 shmem = drm_gem_shmem_create(&vdev->drm, size);
190 return ERR_CAST(shmem);
192 bo = to_ivpu_bo(&shmem->base);
193 bo->base.map_wc = flags & DRM_IVPU_BO_WC;
196 mutex_lock(&vdev->bo_list_lock);
197 list_add_tail(&bo->bo_list_node, &vdev->bo_list);
198 mutex_unlock(&vdev->bo_list_lock);
203 static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
205 struct ivpu_file_priv *file_priv = file->driver_priv;
206 struct ivpu_device *vdev = file_priv->vdev;
207 struct ivpu_bo *bo = to_ivpu_bo(obj);
208 struct ivpu_addr_range *range;
211 ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
212 file_priv->ctx.id, bo->ctx->id);
216 if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
217 range = &vdev->hw->ranges.shave;
218 else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
219 range = &vdev->hw->ranges.dma;
221 range = &vdev->hw->ranges.user;
223 return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range);
226 static void ivpu_gem_bo_free(struct drm_gem_object *obj)
228 struct ivpu_device *vdev = to_ivpu_device(obj->dev);
229 struct ivpu_bo *bo = to_ivpu_bo(obj);
231 ivpu_dbg_bo(vdev, bo, "free");
233 mutex_lock(&vdev->bo_list_lock);
234 list_del(&bo->bo_list_node);
235 mutex_unlock(&vdev->bo_list_lock);
237 drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
239 ivpu_bo_unbind_locked(bo);
240 mutex_destroy(&bo->lock);
242 drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
243 drm_gem_shmem_free(&bo->base);
246 static const struct drm_gem_object_funcs ivpu_gem_funcs = {
247 .free = ivpu_gem_bo_free,
248 .open = ivpu_gem_bo_open,
249 .print_info = drm_gem_shmem_object_print_info,
250 .pin = drm_gem_shmem_object_pin,
251 .unpin = drm_gem_shmem_object_unpin,
252 .get_sg_table = drm_gem_shmem_object_get_sg_table,
253 .vmap = drm_gem_shmem_object_vmap,
254 .vunmap = drm_gem_shmem_object_vunmap,
255 .mmap = drm_gem_shmem_object_mmap,
256 .vm_ops = &drm_gem_shmem_vm_ops,
259 int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
261 struct ivpu_file_priv *file_priv = file->driver_priv;
262 struct ivpu_device *vdev = file_priv->vdev;
263 struct drm_ivpu_bo_create *args = data;
264 u64 size = PAGE_ALIGN(args->size);
268 if (args->flags & ~DRM_IVPU_BO_FLAGS)
274 bo = ivpu_bo_alloc(vdev, size, args->flags);
276 ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
277 bo, file_priv->ctx.id, args->size, args->flags);
281 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
283 args->vpu_addr = bo->vpu_addr;
285 drm_gem_object_put(&bo->base.base);
291 ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
292 struct ivpu_addr_range *range, u64 size, u32 flags)
294 struct iosys_map map;
298 if (drm_WARN_ON(&vdev->drm, !range))
301 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->start));
302 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
303 drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
305 bo = ivpu_bo_alloc(vdev, size, flags);
307 ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
308 bo, range->start, size, flags);
312 ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
316 ret = ivpu_bo_pin(bo);
320 if (flags & DRM_IVPU_BO_MAPPABLE) {
321 dma_resv_lock(bo->base.base.resv, NULL);
322 ret = drm_gem_shmem_vmap(&bo->base, &map);
323 dma_resv_unlock(bo->base.base.resv);
332 drm_gem_object_put(&bo->base.base);
336 struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
338 return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
341 void ivpu_bo_free(struct ivpu_bo *bo)
343 struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
345 if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
346 dma_resv_lock(bo->base.base.resv, NULL);
347 drm_gem_shmem_vunmap(&bo->base, &map);
348 dma_resv_unlock(bo->base.base.resv);
351 drm_gem_object_put(&bo->base.base);
354 int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
356 struct drm_ivpu_bo_info *args = data;
357 struct drm_gem_object *obj;
361 obj = drm_gem_object_lookup(file, args->handle);
365 bo = to_ivpu_bo(obj);
367 mutex_lock(&bo->lock);
368 args->flags = bo->flags;
369 args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
370 args->vpu_addr = bo->vpu_addr;
371 args->size = obj->size;
372 mutex_unlock(&bo->lock);
374 drm_gem_object_put(obj);
378 int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
380 struct drm_ivpu_bo_wait *args = data;
381 struct drm_gem_object *obj;
382 unsigned long timeout;
385 timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
387 obj = drm_gem_object_lookup(file, args->handle);
391 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout);
394 } else if (ret > 0) {
396 args->job_status = to_ivpu_bo(obj)->job_status;
399 drm_gem_object_put(obj);
404 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
406 mutex_lock(&bo->lock);
408 drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
409 bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
410 bo->flags, kref_read(&bo->base.base.refcount));
413 drm_printf(p, " has_pages");
416 drm_printf(p, " mmu_mapped");
418 if (bo->base.base.import_attach)
419 drm_printf(p, " imported");
423 mutex_unlock(&bo->lock);
426 void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
428 struct ivpu_device *vdev = to_ivpu_device(dev);
431 drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n",
432 "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs");
434 mutex_lock(&vdev->bo_list_lock);
435 list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
436 ivpu_bo_print_info(bo, p);
437 mutex_unlock(&vdev->bo_list_lock);
440 void ivpu_bo_list_print(struct drm_device *dev)
442 struct drm_printer p = drm_info_printer(dev->dev);
444 ivpu_bo_list(dev, &p);