Merge tag 'for-linus-2024042501' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / accel / ivpu / ivpu_drv.c
CommitLineData
35b13763
JL
1// SPDX-License-Identifier: GPL-2.0-only
2/*
e3caadf1 3 * Copyright (C) 2020-2024 Intel Corporation
35b13763
JL
4 */
5
6#include <linux/firmware.h>
7#include <linux/module.h>
8#include <linux/pci.h>
f1cc6ace 9#include <linux/pm_runtime.h>
35b13763
JL
10
11#include <drm/drm_accel.h>
35b13763
JL
12#include <drm/drm_file.h>
13#include <drm/drm_gem.h>
14#include <drm/drm_ioctl.h>
647371a6 15#include <drm/drm_prime.h>
35b13763 16
02d5b0aa 17#include "vpu_boot_api.h"
edde4cae 18#include "ivpu_debugfs.h"
35b13763 19#include "ivpu_drv.h"
02d5b0aa 20#include "ivpu_fw.h"
2a20b857 21#include "ivpu_fw_log.h"
647371a6 22#include "ivpu_gem.h"
35b13763 23#include "ivpu_hw.h"
5d7422cf 24#include "ivpu_ipc.h"
cd727221 25#include "ivpu_job.h"
02d5b0aa 26#include "ivpu_jsm_msg.h"
263b2ba5
JL
27#include "ivpu_mmu.h"
28#include "ivpu_mmu_context.h"
852be13f 29#include "ivpu_pm.h"
35b13763
JL
30
31#ifndef DRIVER_VERSION_STR
32#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
33 __stringify(DRM_IVPU_DRIVER_MINOR) "."
34#endif
35
cd727221
JL
36static struct lock_class_key submitted_jobs_xa_lock_class_key;
37
35b13763
JL
38int ivpu_dbg_mask;
39module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
40MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
41
02d5b0aa
JL
42int ivpu_test_mode;
43module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
8b5cec3c 44MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
02d5b0aa 45
35b13763
JL
46u8 ivpu_pll_min_ratio;
47module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
b7f9b9b6 48MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
35b13763
JL
49
50u8 ivpu_pll_max_ratio = U8_MAX;
51module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
b7f9b9b6 52MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
35b13763 53
95d44018
KW
54bool ivpu_disable_mmu_cont_pages;
55module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
56MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
57
35b13763
JL
58struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
59{
263b2ba5
JL
60 struct ivpu_device *vdev = file_priv->vdev;
61
35b13763 62 kref_get(&file_priv->ref);
263b2ba5
JL
63
64 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
65 file_priv->ctx.id, kref_read(&file_priv->ref));
66
35b13763
JL
67 return file_priv;
68}
69
f1cc6ace 70static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
647371a6 71{
f1cc6ace
JL
72 mutex_lock(&file_priv->lock);
73 if (file_priv->bound) {
74 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
75
76 ivpu_cmdq_release_all_locked(file_priv);
77 ivpu_jsm_context_release(vdev, file_priv->ctx.id);
78 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
79 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
80 file_priv->bound = false;
81 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
82 }
83 mutex_unlock(&file_priv->lock);
647371a6
JL
84}
85
35b13763
JL
86static void file_priv_release(struct kref *ref)
87{
88 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
263b2ba5 89 struct ivpu_device *vdev = file_priv->vdev;
35b13763 90
f1cc6ace
JL
91 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
92 file_priv->ctx.id, (bool)file_priv->bound);
93
94 pm_runtime_get_sync(vdev->drm.dev);
95 mutex_lock(&vdev->context_list_lock);
96 file_priv_unbind(vdev, file_priv);
97 mutex_unlock(&vdev->context_list_lock);
98 pm_runtime_put_autosuspend(vdev->drm.dev);
263b2ba5 99
cd727221 100 mutex_destroy(&file_priv->lock);
35b13763
JL
101 kfree(file_priv);
102}
103
104void ivpu_file_priv_put(struct ivpu_file_priv **link)
105{
106 struct ivpu_file_priv *file_priv = *link;
263b2ba5 107 struct ivpu_device *vdev = file_priv->vdev;
35b13763 108
647371a6 109 drm_WARN_ON(&vdev->drm, !file_priv);
35b13763 110
263b2ba5
JL
111 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
112 file_priv->ctx.id, kref_read(&file_priv->ref));
113
35b13763
JL
114 *link = NULL;
115 kref_put(&file_priv->ref, file_priv_release);
116}
117
aa5f04d2
SG
118static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
119{
120 switch (args->index) {
121 case DRM_IVPU_CAP_METRIC_STREAMER:
122 args->value = 0;
123 break;
124 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
162f17b2 125 args->value = 1;
aa5f04d2
SG
126 break;
127 default:
128 return -EINVAL;
129 }
130
131 return 0;
132}
133
35b13763
JL
134static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
135{
136 struct ivpu_file_priv *file_priv = file->driver_priv;
137 struct ivpu_device *vdev = file_priv->vdev;
138 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
139 struct drm_ivpu_param *args = data;
140 int ret = 0;
4522ad76
SG
141 int idx;
142
143 if (!drm_dev_enter(dev, &idx))
144 return -ENODEV;
35b13763
JL
145
146 switch (args->param) {
147 case DRM_IVPU_PARAM_DEVICE_ID:
148 args->value = pdev->device;
149 break;
150 case DRM_IVPU_PARAM_DEVICE_REVISION:
151 args->value = pdev->revision;
152 break;
153 case DRM_IVPU_PARAM_PLATFORM_TYPE:
154 args->value = vdev->platform;
155 break;
156 case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
c52c35e5 157 args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
35b13763
JL
158 break;
159 case DRM_IVPU_PARAM_NUM_CONTEXTS:
160 args->value = ivpu_get_context_count(vdev);
161 break;
162 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
162f17b2 163 args->value = vdev->hw->ranges.user.start;
35b13763 164 break;
263b2ba5
JL
165 case DRM_IVPU_PARAM_CONTEXT_ID:
166 args->value = file_priv->ctx.id;
167 break;
02d5b0aa
JL
168 case DRM_IVPU_PARAM_FW_API_VERSION:
169 if (args->index < VPU_FW_API_VER_NUM) {
170 struct vpu_firmware_header *fw_hdr;
171
172 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
173 args->value = fw_hdr->api_version[args->index];
174 } else {
175 ret = -EINVAL;
176 }
177 break;
178 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
179 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
180 break;
181 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
182 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
183 break;
184 case DRM_IVPU_PARAM_TILE_CONFIG:
185 args->value = vdev->hw->tile_fuse;
186 break;
187 case DRM_IVPU_PARAM_SKU:
188 args->value = vdev->hw->sku;
189 break;
aa5f04d2
SG
190 case DRM_IVPU_PARAM_CAPABILITIES:
191 ret = ivpu_get_capabilities(vdev, args);
192 break;
35b13763
JL
193 default:
194 ret = -EINVAL;
195 break;
196 }
197
4522ad76 198 drm_dev_exit(idx);
35b13763
JL
199 return ret;
200}
201
202static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
203{
35b13763
JL
204 struct drm_ivpu_param *args = data;
205 int ret = 0;
206
207 switch (args->param) {
35b13763
JL
208 default:
209 ret = -EINVAL;
210 }
211
212 return ret;
213}
214
215static int ivpu_open(struct drm_device *dev, struct drm_file *file)
216{
217 struct ivpu_device *vdev = to_ivpu_device(dev);
218 struct ivpu_file_priv *file_priv;
263b2ba5 219 u32 ctx_id;
f1cc6ace 220 int idx, ret;
263b2ba5 221
f1cc6ace
JL
222 if (!drm_dev_enter(dev, &idx))
223 return -ENODEV;
35b13763
JL
224
225 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
263b2ba5
JL
226 if (!file_priv) {
227 ret = -ENOMEM;
f1cc6ace 228 goto err_dev_exit;
263b2ba5 229 }
35b13763
JL
230
231 file_priv->vdev = vdev;
f1cc6ace 232 file_priv->bound = true;
35b13763 233 kref_init(&file_priv->ref);
cd727221 234 mutex_init(&file_priv->lock);
35b13763 235
f1cc6ace
JL
236 mutex_lock(&vdev->context_list_lock);
237
238 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
239 vdev->context_xa_limit, GFP_KERNEL);
240 if (ret) {
241 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
242 goto err_unlock;
243 }
244
263b2ba5
JL
245 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
246 if (ret)
f1cc6ace 247 goto err_xa_erase;
263b2ba5 248
f1cc6ace
JL
249 mutex_unlock(&vdev->context_list_lock);
250 drm_dev_exit(idx);
251
252 file->driver_priv = file_priv;
263b2ba5
JL
253
254 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
255 ctx_id, current->comm, task_pid_nr(current));
256
35b13763 257 return 0;
263b2ba5 258
263b2ba5
JL
259err_xa_erase:
260 xa_erase_irq(&vdev->context_xa, ctx_id);
f1cc6ace
JL
261err_unlock:
262 mutex_unlock(&vdev->context_list_lock);
263 mutex_destroy(&file_priv->lock);
264 kfree(file_priv);
265err_dev_exit:
266 drm_dev_exit(idx);
263b2ba5 267 return ret;
35b13763
JL
268}
269
270static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
271{
272 struct ivpu_file_priv *file_priv = file->driver_priv;
263b2ba5
JL
273 struct ivpu_device *vdev = to_ivpu_device(dev);
274
275 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
276 file_priv->ctx.id, current->comm, task_pid_nr(current));
35b13763
JL
277
278 ivpu_file_priv_put(&file_priv);
279}
280
281static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
282 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
283 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
647371a6
JL
284 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
285 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
cd727221
JL
286 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
287 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
35b13763
JL
288};
289
02d5b0aa
JL
290static int ivpu_wait_for_ready(struct ivpu_device *vdev)
291{
292 struct ivpu_ipc_consumer cons;
293 struct ivpu_ipc_hdr ipc_hdr;
294 unsigned long timeout;
295 int ret;
296
8b5cec3c 297 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
02d5b0aa
JL
298 return 0;
299
3b434a34 300 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
02d5b0aa
JL
301
302 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
303 while (1) {
3b434a34 304 ivpu_ipc_irq_handler(vdev, NULL);
02d5b0aa
JL
305 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
306 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
307 break;
308
309 cond_resched();
310 }
311
312 ivpu_ipc_consumer_del(vdev, &cons);
313
314 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
b7f9b9b6 315 ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
02d5b0aa
JL
316 ipc_hdr.data_addr);
317 return -EIO;
318 }
319
320 if (!ret)
b7f9b9b6 321 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
02d5b0aa
JL
322
323 return ret;
324}
325
326/**
327 * ivpu_boot() - Start VPU firmware
328 * @vdev: VPU device
329 *
330 * This function is paired with ivpu_shutdown() but it doesn't power up the
331 * VPU because power up has to be called very early in ivpu_probe().
332 */
333int ivpu_boot(struct ivpu_device *vdev)
334{
335 int ret;
336
337 /* Update boot params located at first 4KB of FW memory */
8d88e4cd 338 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
02d5b0aa
JL
339
340 ret = ivpu_hw_boot_fw(vdev);
341 if (ret) {
342 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
343 return ret;
344 }
345
346 ret = ivpu_wait_for_ready(vdev);
347 if (ret) {
348 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
2a20b857 349 ivpu_hw_diagnose_failure(vdev);
30cf36bb 350 ivpu_mmu_evtq_dump(vdev);
2a20b857 351 ivpu_fw_log_dump(vdev);
02d5b0aa
JL
352 return ret;
353 }
354
355 ivpu_hw_irq_clear(vdev);
356 enable_irq(vdev->irq);
357 ivpu_hw_irq_enable(vdev);
358 ivpu_ipc_enable(vdev);
359 return 0;
360}
361
828d6304 362void ivpu_prepare_for_reset(struct ivpu_device *vdev)
35b13763 363{
35b13763 364 ivpu_hw_irq_disable(vdev);
02d5b0aa 365 disable_irq(vdev->irq);
5d7422cf 366 ivpu_ipc_disable(vdev);
263b2ba5 367 ivpu_mmu_disable(vdev);
828d6304
JL
368}
369
370int ivpu_shutdown(struct ivpu_device *vdev)
371{
372 int ret;
373
e3caadf1
JL
374 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
375 pci_save_state(to_pci_dev(vdev->drm.dev));
35b13763
JL
376
377 ret = ivpu_hw_power_down(vdev);
378 if (ret)
379 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
380
e3caadf1
JL
381 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
382
35b13763
JL
383 return ret;
384}
385
386static const struct file_operations ivpu_fops = {
387 .owner = THIS_MODULE,
35b13763
JL
388 DRM_ACCEL_FOPS,
389};
390
391static const struct drm_driver driver = {
392 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
393
394 .open = ivpu_open,
395 .postclose = ivpu_postclose,
8d88e4cd
JL
396
397 .gem_create_object = ivpu_gem_create_object,
398 .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
35b13763
JL
399
400 .ioctls = ivpu_drm_ioctls,
401 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
402 .fops = &ivpu_fops,
403
404 .name = DRIVER_NAME,
405 .desc = DRIVER_DESC,
406 .date = DRIVER_DATE,
407 .major = DRM_IVPU_DRIVER_MAJOR,
408 .minor = DRM_IVPU_DRIVER_MINOR,
409};
410
3b434a34
JL
411static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
412{
413 struct ivpu_device *vdev = arg;
414
415 return ivpu_ipc_irq_thread_handler(vdev);
416}
417
35b13763
JL
418static int ivpu_irq_init(struct ivpu_device *vdev)
419{
420 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
421 int ret;
422
423 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
424 if (ret < 0) {
425 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
426 return ret;
427 }
428
429 vdev->irq = pci_irq_vector(pdev, 0);
430
3b434a34
JL
431 ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
432 ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
35b13763
JL
433 if (ret)
434 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
435
436 return ret;
437}
438
439static int ivpu_pci_init(struct ivpu_device *vdev)
440{
441 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
442 struct resource *bar0 = &pdev->resource[0];
443 struct resource *bar4 = &pdev->resource[4];
444 int ret;
445
446 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
447 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
448 if (IS_ERR(vdev->regv)) {
449 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
450 return PTR_ERR(vdev->regv);
451 }
452
453 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
454 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
455 if (IS_ERR(vdev->regb)) {
456 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
457 return PTR_ERR(vdev->regb);
458 }
459
a4172d6c 460 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
35b13763
JL
461 if (ret) {
462 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
463 return ret;
464 }
62079b6f 465 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
35b13763
JL
466
467 /* Clear any pending errors */
468 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
469
a7f31091
JL
470 /* NPU does not require 10m D3hot delay */
471 pdev->d3hot_delay = 0;
cb949ce5 472
35b13763
JL
473 ret = pcim_enable_device(pdev);
474 if (ret) {
475 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
476 return ret;
477 }
478
479 pci_set_master(pdev);
480
481 return 0;
482}
483
484static int ivpu_dev_init(struct ivpu_device *vdev)
485{
486 int ret;
487
488 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
489 if (!vdev->hw)
490 return -ENOMEM;
491
263b2ba5
JL
492 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
493 if (!vdev->mmu)
494 return -ENOMEM;
495
02d5b0aa
JL
496 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
497 if (!vdev->fw)
498 return -ENOMEM;
499
5d7422cf
JL
500 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
501 if (!vdev->ipc)
502 return -ENOMEM;
503
852be13f
JL
504 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
505 if (!vdev->pm)
506 return -ENOMEM;
507
79cdc56c
SG
508 if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
509 vdev->hw->ops = &ivpu_hw_40xx_ops;
510 vdev->hw->dma_bits = 48;
511 } else {
512 vdev->hw->ops = &ivpu_hw_37xx_ops;
513 vdev->hw->dma_bits = 38;
514 }
a4172d6c 515
35b13763 516 vdev->platform = IVPU_PLATFORM_INVALID;
3ff6edbc
SG
517 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
518 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
02d5b0aa 519 atomic64_set(&vdev->unique_id_counter, 0);
fd7726e7 520 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
cd727221 521 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
f32d5967 522 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
cd727221 523 lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
48aea7f2
JL
524 INIT_LIST_HEAD(&vdev->bo_list);
525
f1cc6ace
JL
526 ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
527 if (ret)
528 goto err_xa_destroy;
529
48aea7f2
JL
530 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
531 if (ret)
532 goto err_xa_destroy;
35b13763
JL
533
534 ret = ivpu_pci_init(vdev);
0a9cd792 535 if (ret)
35b13763 536 goto err_xa_destroy;
35b13763
JL
537
538 ret = ivpu_irq_init(vdev);
0a9cd792 539 if (ret)
35b13763 540 goto err_xa_destroy;
35b13763
JL
541
542 /* Init basic HW info based on buttress registers which are accessible before power up */
543 ret = ivpu_hw_info_init(vdev);
0a9cd792 544 if (ret)
35b13763 545 goto err_xa_destroy;
35b13763
JL
546
547 /* Power up early so the rest of init code can access VPU registers */
548 ret = ivpu_hw_power_up(vdev);
0a9cd792 549 if (ret)
e3caadf1 550 goto err_shutdown;
35b13763 551
263b2ba5 552 ret = ivpu_mmu_global_context_init(vdev);
0a9cd792 553 if (ret)
e3caadf1 554 goto err_shutdown;
263b2ba5
JL
555
556 ret = ivpu_mmu_init(vdev);
0a9cd792 557 if (ret)
263b2ba5 558 goto err_mmu_gctx_fini;
263b2ba5 559
34d03f2a 560 ret = ivpu_mmu_reserved_context_init(vdev);
0a9cd792 561 if (ret)
02d5b0aa 562 goto err_mmu_gctx_fini;
02d5b0aa 563
34d03f2a
KW
564 ret = ivpu_fw_init(vdev);
565 if (ret)
566 goto err_mmu_rctx_fini;
567
5d7422cf 568 ret = ivpu_ipc_init(vdev);
0a9cd792 569 if (ret)
02d5b0aa 570 goto err_fw_fini;
02d5b0aa 571
3f68b03a 572 ivpu_pm_init(vdev);
852be13f 573
02d5b0aa 574 ret = ivpu_boot(vdev);
0a9cd792 575 if (ret)
3b434a34 576 goto err_ipc_fini;
5d7422cf 577
3b434a34 578 ivpu_job_done_consumer_init(vdev);
852be13f
JL
579 ivpu_pm_enable(vdev);
580
35b13763
JL
581 return 0;
582
cd727221
JL
583err_ipc_fini:
584 ivpu_ipc_fini(vdev);
02d5b0aa
JL
585err_fw_fini:
586 ivpu_fw_fini(vdev);
34d03f2a
KW
587err_mmu_rctx_fini:
588 ivpu_mmu_reserved_context_fini(vdev);
263b2ba5
JL
589err_mmu_gctx_fini:
590 ivpu_mmu_global_context_fini(vdev);
e3caadf1
JL
591err_shutdown:
592 ivpu_shutdown(vdev);
35b13763 593err_xa_destroy:
f32d5967 594 xa_destroy(&vdev->db_xa);
cd727221 595 xa_destroy(&vdev->submitted_jobs_xa);
35b13763
JL
596 xa_destroy(&vdev->context_xa);
597 return ret;
598}
599
f1cc6ace
JL
600static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
601{
602 struct ivpu_file_priv *file_priv;
603 unsigned long ctx_id;
604
605 mutex_lock(&vdev->context_list_lock);
606
607 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
608 file_priv_unbind(vdev, file_priv);
609
610 mutex_unlock(&vdev->context_list_lock);
611}
612
35b13763
JL
613static void ivpu_dev_fini(struct ivpu_device *vdev)
614{
852be13f 615 ivpu_pm_disable(vdev);
e3caadf1 616 ivpu_prepare_for_reset(vdev);
35b13763 617 ivpu_shutdown(vdev);
f1cc6ace
JL
618
619 ivpu_jobs_abort_all(vdev);
3b434a34 620 ivpu_job_done_consumer_fini(vdev);
6013aa84 621 ivpu_pm_cancel_recovery(vdev);
f1cc6ace 622 ivpu_bo_unbind_all_user_contexts(vdev);
6013aa84 623
5d7422cf 624 ivpu_ipc_fini(vdev);
02d5b0aa 625 ivpu_fw_fini(vdev);
34d03f2a 626 ivpu_mmu_reserved_context_fini(vdev);
263b2ba5 627 ivpu_mmu_global_context_fini(vdev);
35b13763 628
f32d5967
WK
629 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
630 xa_destroy(&vdev->db_xa);
cd727221
JL
631 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
632 xa_destroy(&vdev->submitted_jobs_xa);
35b13763
JL
633 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
634 xa_destroy(&vdev->context_xa);
635}
636
637static struct pci_device_id ivpu_pci_ids[] = {
638 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
9c1b2429 639 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
79cdc56c 640 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
35b13763
JL
641 { }
642};
643MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
644
645static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
646{
647 struct ivpu_device *vdev;
648 int ret;
649
650 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
651 if (IS_ERR(vdev))
652 return PTR_ERR(vdev);
653
654 pci_set_drvdata(pdev, vdev);
655
656 ret = ivpu_dev_init(vdev);
0a9cd792 657 if (ret)
35b13763 658 return ret;
35b13763 659
c78199a7 660 ivpu_debugfs_init(vdev);
c78199a7 661
35b13763
JL
662 ret = drm_dev_register(&vdev->drm, 0);
663 if (ret) {
664 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
665 ivpu_dev_fini(vdev);
666 }
667
668 return ret;
669}
670
671static void ivpu_remove(struct pci_dev *pdev)
672{
673 struct ivpu_device *vdev = pci_get_drvdata(pdev);
674
4522ad76 675 drm_dev_unplug(&vdev->drm);
35b13763
JL
676 ivpu_dev_fini(vdev);
677}
678
852be13f
JL
679static const struct dev_pm_ops ivpu_drv_pci_pm = {
680 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
681 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
682};
683
684static const struct pci_error_handlers ivpu_drv_pci_err = {
685 .reset_prepare = ivpu_pm_reset_prepare_cb,
686 .reset_done = ivpu_pm_reset_done_cb,
687};
688
35b13763
JL
689static struct pci_driver ivpu_pci_driver = {
690 .name = KBUILD_MODNAME,
691 .id_table = ivpu_pci_ids,
692 .probe = ivpu_probe,
693 .remove = ivpu_remove,
852be13f
JL
694 .driver = {
695 .pm = &ivpu_drv_pci_pm,
696 },
697 .err_handler = &ivpu_drv_pci_err,
35b13763
JL
698};
699
700module_pci_driver(ivpu_pci_driver);
701
702MODULE_AUTHOR("Intel Corporation");
703MODULE_DESCRIPTION(DRIVER_DESC);
704MODULE_LICENSE("GPL and additional rights");
705MODULE_VERSION(DRIVER_VERSION_STR);