1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <drm/drm_file.h>
8 #include <linux/bitfield.h>
9 #include <linux/highmem.h>
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <uapi/drm/ivpu_accel.h>
18 #include "ivpu_jsm_msg.h"
22 #define JOB_ID_JOB_MASK GENMASK(7, 0)
23 #define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
24 #define JOB_MAX_BUFFER_COUNT 65535
26 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
28 ivpu_hw_reg_db_set(vdev, cmdq->db_id);
31 static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
33 struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
34 struct ivpu_device *vdev = file_priv->vdev;
35 struct vpu_job_queue_header *jobq_header;
36 struct ivpu_cmdq *cmdq;
39 cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
43 ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
45 ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
49 cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
53 cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
54 sizeof(struct vpu_job_queue_entry));
56 cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
57 jobq_header = &cmdq->jobq->header;
58 jobq_header->engine_idx = engine;
59 jobq_header->head = 0;
60 jobq_header->tail = 0;
61 wmb(); /* Flush WC buffer for jobq->header */
66 xa_erase(&vdev->db_xa, cmdq->db_id);
72 static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
77 ivpu_bo_free(cmdq->mem);
78 xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
82 static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
84 struct ivpu_device *vdev = file_priv->vdev;
85 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
88 lockdep_assert_held(&file_priv->lock);
91 cmdq = ivpu_cmdq_alloc(file_priv, engine);
94 file_priv->cmdq[engine] = cmdq;
97 if (cmdq->db_registered)
100 ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
101 cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
105 cmdq->db_registered = true;
110 static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
112 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
114 lockdep_assert_held(&file_priv->lock);
117 file_priv->cmdq[engine] = NULL;
118 if (cmdq->db_registered)
119 ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
121 ivpu_cmdq_free(file_priv, cmdq);
125 void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
129 lockdep_assert_held(&file_priv->lock);
131 for (i = 0; i < IVPU_NUM_ENGINES; i++)
132 ivpu_cmdq_release_locked(file_priv, i);
136 * Mark the doorbell as unregistered and reset job queue pointers.
137 * This function needs to be called when the VPU hardware is restarted
138 * and FW loses job queue state. The next time job queue is used it
139 * will be registered again.
141 static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
143 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
145 lockdep_assert_held(&file_priv->lock);
148 cmdq->db_registered = false;
149 cmdq->jobq->header.head = 0;
150 cmdq->jobq->header.tail = 0;
151 wmb(); /* Flush WC buffer for jobq header */
155 static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
159 mutex_lock(&file_priv->lock);
161 for (i = 0; i < IVPU_NUM_ENGINES; i++)
162 ivpu_cmdq_reset_locked(file_priv, i);
164 mutex_unlock(&file_priv->lock);
167 void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
169 struct ivpu_file_priv *file_priv;
170 unsigned long ctx_id;
172 mutex_lock(&vdev->context_list_lock);
174 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
175 ivpu_cmdq_reset_all(file_priv);
177 mutex_unlock(&vdev->context_list_lock);
181 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
183 struct ivpu_device *vdev = job->vdev;
184 struct vpu_job_queue_header *header = &cmdq->jobq->header;
185 struct vpu_job_queue_entry *entry;
186 u32 tail = READ_ONCE(header->tail);
187 u32 next_entry = (tail + 1) % cmdq->entry_count;
189 /* Check if there is space left in job queue */
190 if (next_entry == header->head) {
191 ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
192 job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
196 entry = &cmdq->jobq->job[tail];
197 entry->batch_buf_addr = job->cmd_buf_vpu_addr;
198 entry->job_id = job->job_id;
200 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
201 entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
202 wmb(); /* Ensure that tail is updated after filling entry */
203 header->tail = next_entry;
204 wmb(); /* Flush WC buffer for jobq header */
210 struct dma_fence base;
211 spinlock_t lock; /* protects base */
212 struct ivpu_device *vdev;
215 static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
217 return container_of(fence, struct ivpu_fence, base);
220 static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
225 static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
227 struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
229 return dev_name(ivpu_fence->vdev->drm.dev);
232 static const struct dma_fence_ops ivpu_fence_ops = {
233 .get_driver_name = ivpu_fence_get_driver_name,
234 .get_timeline_name = ivpu_fence_get_timeline_name,
237 static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
239 struct ivpu_fence *fence;
241 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
246 spin_lock_init(&fence->lock);
247 dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
252 static void ivpu_job_destroy(struct ivpu_job *job)
254 struct ivpu_device *vdev = job->vdev;
257 ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
258 job->job_id, job->file_priv->ctx.id, job->engine_idx);
260 for (i = 0; i < job->bo_count; i++)
262 drm_gem_object_put(&job->bos[i]->base.base);
264 dma_fence_put(job->done_fence);
265 ivpu_file_priv_put(&job->file_priv);
269 static struct ivpu_job *
270 ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
272 struct ivpu_device *vdev = file_priv->vdev;
273 struct ivpu_job *job;
275 job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
280 job->engine_idx = engine_idx;
281 job->bo_count = bo_count;
282 job->done_fence = ivpu_fence_create(vdev);
283 if (!job->done_fence) {
284 ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
288 job->file_priv = ivpu_file_priv_get(file_priv);
290 ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
298 static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
300 struct ivpu_job *job;
302 job = xa_erase(&vdev->submitted_jobs_xa, job_id);
306 if (job->file_priv->has_mmu_faults)
307 job_status = DRM_IVPU_JOB_STATUS_ABORTED;
309 job->bos[CMD_BUF_IDX]->job_status = job_status;
310 dma_fence_signal(job->done_fence);
312 ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
313 job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
315 ivpu_job_destroy(job);
316 ivpu_stop_job_timeout_detection(vdev);
322 void ivpu_jobs_abort_all(struct ivpu_device *vdev)
324 struct ivpu_job *job;
327 xa_for_each(&vdev->submitted_jobs_xa, id, job)
328 ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
331 static int ivpu_job_submit(struct ivpu_job *job)
333 struct ivpu_file_priv *file_priv = job->file_priv;
334 struct ivpu_device *vdev = job->vdev;
335 struct xa_limit job_id_range;
336 struct ivpu_cmdq *cmdq;
339 ret = ivpu_rpm_get(vdev);
343 mutex_lock(&file_priv->lock);
345 cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
347 ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
348 file_priv->ctx.id, job->engine_idx);
350 goto err_unlock_file_priv;
353 job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
354 job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
356 xa_lock(&vdev->submitted_jobs_xa);
357 ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
359 ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
362 goto err_unlock_submitted_jobs_xa;
365 ret = ivpu_cmdq_push_job(cmdq, job);
369 ivpu_start_job_timeout_detection(vdev);
371 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
372 cmdq->jobq->header.head = cmdq->jobq->header.tail;
373 wmb(); /* Flush WC buffer for jobq header */
375 ivpu_cmdq_ring_db(vdev, cmdq);
378 ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
379 job->job_id, file_priv->ctx.id, job->engine_idx,
380 job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
382 xa_unlock(&vdev->submitted_jobs_xa);
384 mutex_unlock(&file_priv->lock);
386 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
387 ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
392 __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
393 err_unlock_submitted_jobs_xa:
394 xa_unlock(&vdev->submitted_jobs_xa);
395 err_unlock_file_priv:
396 mutex_unlock(&file_priv->lock);
402 ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
403 u32 buf_count, u32 commands_offset)
405 struct ivpu_file_priv *file_priv = file->driver_priv;
406 struct ivpu_device *vdev = file_priv->vdev;
407 struct ww_acquire_ctx acquire_ctx;
408 enum dma_resv_usage usage;
413 for (i = 0; i < buf_count; i++) {
414 struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
419 job->bos[i] = to_ivpu_bo(obj);
421 ret = ivpu_bo_pin(job->bos[i]);
426 bo = job->bos[CMD_BUF_IDX];
427 if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
428 ivpu_warn(vdev, "Buffer is already in use\n");
432 if (commands_offset >= ivpu_bo_size(bo)) {
433 ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
437 job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
439 ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
442 ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
446 for (i = 0; i < buf_count; i++) {
447 ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
449 ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
450 goto unlock_reservations;
454 for (i = 0; i < buf_count; i++) {
455 usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
456 dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
460 drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
462 wmb(); /* Flush write combining buffers */
467 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
469 struct ivpu_file_priv *file_priv = file->driver_priv;
470 struct ivpu_device *vdev = file_priv->vdev;
471 struct drm_ivpu_submit *params = data;
472 struct ivpu_job *job;
476 if (params->engine > DRM_IVPU_ENGINE_COPY)
479 if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
482 if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
485 if (!IS_ALIGNED(params->commands_offset, 8))
488 if (!file_priv->ctx.id)
491 if (file_priv->has_mmu_faults)
494 buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
498 ret = copy_from_user(buf_handles,
499 (void __user *)params->buffers_ptr,
500 params->buffer_count * sizeof(u32));
503 goto err_free_handles;
506 if (!drm_dev_enter(&vdev->drm, &idx)) {
508 goto err_free_handles;
511 ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
512 file_priv->ctx.id, params->buffer_count);
514 job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
516 ivpu_err(vdev, "Failed to create job\n");
521 ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
522 params->commands_offset);
524 ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
525 goto err_destroy_job;
528 down_read(&vdev->pm->reset_lock);
529 ret = ivpu_job_submit(job);
530 up_read(&vdev->pm->reset_lock);
532 goto err_signal_fence;
539 dma_fence_signal(job->done_fence);
541 ivpu_job_destroy(job);
550 ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
551 struct vpu_jsm_msg *jsm_msg)
553 struct vpu_ipc_msg_payload_job_done *payload;
557 ivpu_err(vdev, "IPC message has no JSM payload\n");
561 if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
562 ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
566 payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
567 ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
568 if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
569 ivpu_start_job_timeout_detection(vdev);
572 void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
574 ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
575 VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
578 void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
580 ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);