accel/ivpu: Use GEM shmem helper for all buffers
[linux-block.git] / drivers / accel / ivpu / ivpu_drv.c
CommitLineData
35b13763
JL
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/firmware.h>
7#include <linux/module.h>
8#include <linux/pci.h>
9
10#include <drm/drm_accel.h>
35b13763
JL
11#include <drm/drm_file.h>
12#include <drm/drm_gem.h>
13#include <drm/drm_ioctl.h>
647371a6 14#include <drm/drm_prime.h>
35b13763 15
02d5b0aa 16#include "vpu_boot_api.h"
edde4cae 17#include "ivpu_debugfs.h"
35b13763 18#include "ivpu_drv.h"
02d5b0aa 19#include "ivpu_fw.h"
647371a6 20#include "ivpu_gem.h"
35b13763 21#include "ivpu_hw.h"
5d7422cf 22#include "ivpu_ipc.h"
cd727221 23#include "ivpu_job.h"
02d5b0aa 24#include "ivpu_jsm_msg.h"
263b2ba5
JL
25#include "ivpu_mmu.h"
26#include "ivpu_mmu_context.h"
852be13f 27#include "ivpu_pm.h"
35b13763
JL
28
29#ifndef DRIVER_VERSION_STR
30#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
31 __stringify(DRM_IVPU_DRIVER_MINOR) "."
32#endif
33
cd727221
JL
34static struct lock_class_key submitted_jobs_xa_lock_class_key;
35
35b13763
JL
36int ivpu_dbg_mask;
37module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
38MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
39
02d5b0aa
JL
40int ivpu_test_mode;
41module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
8b5cec3c 42MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
02d5b0aa 43
35b13763
JL
44u8 ivpu_pll_min_ratio;
45module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
46MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
47
48u8 ivpu_pll_max_ratio = U8_MAX;
49module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
50MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
51
95d44018
KW
52bool ivpu_disable_mmu_cont_pages;
53module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
54MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
55
35b13763
JL
56struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
57{
263b2ba5
JL
58 struct ivpu_device *vdev = file_priv->vdev;
59
35b13763 60 kref_get(&file_priv->ref);
263b2ba5
JL
61
62 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
63 file_priv->ctx.id, kref_read(&file_priv->ref));
64
35b13763
JL
65 return file_priv;
66}
67
647371a6
JL
68struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
69{
70 struct ivpu_file_priv *file_priv;
71
72 xa_lock_irq(&vdev->context_xa);
73 file_priv = xa_load(&vdev->context_xa, id);
74 /* file_priv may still be in context_xa during file_priv_release() */
75 if (file_priv && !kref_get_unless_zero(&file_priv->ref))
76 file_priv = NULL;
77 xa_unlock_irq(&vdev->context_xa);
78
79 if (file_priv)
80 ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
81 file_priv->ctx.id, kref_read(&file_priv->ref));
82
83 return file_priv;
84}
85
35b13763
JL
86static void file_priv_release(struct kref *ref)
87{
88 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
263b2ba5 89 struct ivpu_device *vdev = file_priv->vdev;
35b13763 90
263b2ba5
JL
91 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
92
cd727221 93 ivpu_cmdq_release_all(file_priv);
dffaa98c 94 ivpu_jsm_context_release(vdev, file_priv->ctx.id);
48aea7f2 95 ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
263b2ba5 96 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
647371a6 97 drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
cd727221 98 mutex_destroy(&file_priv->lock);
35b13763
JL
99 kfree(file_priv);
100}
101
102void ivpu_file_priv_put(struct ivpu_file_priv **link)
103{
104 struct ivpu_file_priv *file_priv = *link;
263b2ba5 105 struct ivpu_device *vdev = file_priv->vdev;
35b13763 106
647371a6 107 drm_WARN_ON(&vdev->drm, !file_priv);
35b13763 108
263b2ba5
JL
109 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
110 file_priv->ctx.id, kref_read(&file_priv->ref));
111
35b13763
JL
112 *link = NULL;
113 kref_put(&file_priv->ref, file_priv_release);
114}
115
aa5f04d2
SG
116static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
117{
118 switch (args->index) {
119 case DRM_IVPU_CAP_METRIC_STREAMER:
120 args->value = 0;
121 break;
122 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
162f17b2 123 args->value = 1;
aa5f04d2
SG
124 break;
125 default:
126 return -EINVAL;
127 }
128
129 return 0;
130}
131
c39dc151
KW
132static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
133{
134 int ret;
135
136 ret = ivpu_rpm_get_if_active(vdev);
137 if (ret < 0)
138 return ret;
139
140 *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
141
142 if (ret)
143 ivpu_rpm_put(vdev);
144
145 return 0;
146}
147
35b13763
JL
148static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
149{
150 struct ivpu_file_priv *file_priv = file->driver_priv;
151 struct ivpu_device *vdev = file_priv->vdev;
152 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
153 struct drm_ivpu_param *args = data;
154 int ret = 0;
4522ad76
SG
155 int idx;
156
157 if (!drm_dev_enter(dev, &idx))
158 return -ENODEV;
35b13763
JL
159
160 switch (args->param) {
161 case DRM_IVPU_PARAM_DEVICE_ID:
162 args->value = pdev->device;
163 break;
164 case DRM_IVPU_PARAM_DEVICE_REVISION:
165 args->value = pdev->revision;
166 break;
167 case DRM_IVPU_PARAM_PLATFORM_TYPE:
168 args->value = vdev->platform;
169 break;
170 case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
c39dc151 171 ret = ivpu_get_core_clock_rate(vdev, &args->value);
35b13763
JL
172 break;
173 case DRM_IVPU_PARAM_NUM_CONTEXTS:
174 args->value = ivpu_get_context_count(vdev);
175 break;
176 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
162f17b2 177 args->value = vdev->hw->ranges.user.start;
35b13763
JL
178 break;
179 case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
180 args->value = file_priv->priority;
181 break;
263b2ba5
JL
182 case DRM_IVPU_PARAM_CONTEXT_ID:
183 args->value = file_priv->ctx.id;
184 break;
02d5b0aa
JL
185 case DRM_IVPU_PARAM_FW_API_VERSION:
186 if (args->index < VPU_FW_API_VER_NUM) {
187 struct vpu_firmware_header *fw_hdr;
188
189 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
190 args->value = fw_hdr->api_version[args->index];
191 } else {
192 ret = -EINVAL;
193 }
194 break;
195 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
196 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
197 break;
198 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
199 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
200 break;
201 case DRM_IVPU_PARAM_TILE_CONFIG:
202 args->value = vdev->hw->tile_fuse;
203 break;
204 case DRM_IVPU_PARAM_SKU:
205 args->value = vdev->hw->sku;
206 break;
aa5f04d2
SG
207 case DRM_IVPU_PARAM_CAPABILITIES:
208 ret = ivpu_get_capabilities(vdev, args);
209 break;
35b13763
JL
210 default:
211 ret = -EINVAL;
212 break;
213 }
214
4522ad76 215 drm_dev_exit(idx);
35b13763
JL
216 return ret;
217}
218
219static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
220{
221 struct ivpu_file_priv *file_priv = file->driver_priv;
222 struct drm_ivpu_param *args = data;
223 int ret = 0;
224
225 switch (args->param) {
226 case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
227 if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
228 file_priv->priority = args->value;
229 else
230 ret = -EINVAL;
231 break;
232 default:
233 ret = -EINVAL;
234 }
235
236 return ret;
237}
238
239static int ivpu_open(struct drm_device *dev, struct drm_file *file)
240{
241 struct ivpu_device *vdev = to_ivpu_device(dev);
242 struct ivpu_file_priv *file_priv;
263b2ba5
JL
243 u32 ctx_id;
244 void *old;
245 int ret;
246
247 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
248 if (ret) {
249 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
250 return ret;
251 }
35b13763
JL
252
253 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
263b2ba5
JL
254 if (!file_priv) {
255 ret = -ENOMEM;
256 goto err_xa_erase;
257 }
35b13763
JL
258
259 file_priv->vdev = vdev;
260 file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
261 kref_init(&file_priv->ref);
cd727221 262 mutex_init(&file_priv->lock);
35b13763 263
263b2ba5
JL
264 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
265 if (ret)
cd727221 266 goto err_mutex_destroy;
263b2ba5
JL
267
268 old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
269 if (xa_is_err(old)) {
270 ret = xa_err(old);
271 ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
272 goto err_ctx_fini;
273 }
274
275 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
276 ctx_id, current->comm, task_pid_nr(current));
277
35b13763
JL
278 file->driver_priv = file_priv;
279 return 0;
263b2ba5
JL
280
281err_ctx_fini:
282 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
cd727221
JL
283err_mutex_destroy:
284 mutex_destroy(&file_priv->lock);
263b2ba5
JL
285 kfree(file_priv);
286err_xa_erase:
287 xa_erase_irq(&vdev->context_xa, ctx_id);
288 return ret;
35b13763
JL
289}
290
291static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
292{
293 struct ivpu_file_priv *file_priv = file->driver_priv;
263b2ba5
JL
294 struct ivpu_device *vdev = to_ivpu_device(dev);
295
296 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
297 file_priv->ctx.id, current->comm, task_pid_nr(current));
35b13763
JL
298
299 ivpu_file_priv_put(&file_priv);
300}
301
302static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
303 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
304 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
647371a6
JL
305 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
306 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
cd727221
JL
307 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
308 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
35b13763
JL
309};
310
02d5b0aa
JL
311static int ivpu_wait_for_ready(struct ivpu_device *vdev)
312{
313 struct ivpu_ipc_consumer cons;
314 struct ivpu_ipc_hdr ipc_hdr;
315 unsigned long timeout;
316 int ret;
317
8b5cec3c 318 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
02d5b0aa
JL
319 return 0;
320
321 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
322
323 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
324 while (1) {
325 ret = ivpu_ipc_irq_handler(vdev);
326 if (ret)
327 break;
328 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
329 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
330 break;
331
332 cond_resched();
333 }
334
335 ivpu_ipc_consumer_del(vdev, &cons);
336
337 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
338 ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
339 ipc_hdr.data_addr);
340 return -EIO;
341 }
342
343 if (!ret)
344 ivpu_info(vdev, "VPU ready message received successfully\n");
345 else
346 ivpu_hw_diagnose_failure(vdev);
347
348 return ret;
349}
350
351/**
352 * ivpu_boot() - Start VPU firmware
353 * @vdev: VPU device
354 *
355 * This function is paired with ivpu_shutdown() but it doesn't power up the
356 * VPU because power up has to be called very early in ivpu_probe().
357 */
358int ivpu_boot(struct ivpu_device *vdev)
359{
360 int ret;
361
362 /* Update boot params located at first 4KB of FW memory */
8d88e4cd 363 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
02d5b0aa
JL
364
365 ret = ivpu_hw_boot_fw(vdev);
366 if (ret) {
367 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
368 return ret;
369 }
370
371 ret = ivpu_wait_for_ready(vdev);
372 if (ret) {
373 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
374 return ret;
375 }
376
377 ivpu_hw_irq_clear(vdev);
378 enable_irq(vdev->irq);
379 ivpu_hw_irq_enable(vdev);
380 ivpu_ipc_enable(vdev);
57c7e3e4 381 ivpu_job_done_thread_enable(vdev);
02d5b0aa
JL
382 return 0;
383}
384
35b13763
JL
385int ivpu_shutdown(struct ivpu_device *vdev)
386{
387 int ret;
388
389 ivpu_hw_irq_disable(vdev);
02d5b0aa 390 disable_irq(vdev->irq);
5d7422cf 391 ivpu_ipc_disable(vdev);
263b2ba5 392 ivpu_mmu_disable(vdev);
57c7e3e4 393 ivpu_job_done_thread_disable(vdev);
35b13763
JL
394
395 ret = ivpu_hw_power_down(vdev);
396 if (ret)
397 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
398
399 return ret;
400}
401
402static const struct file_operations ivpu_fops = {
403 .owner = THIS_MODULE,
35b13763
JL
404 DRM_ACCEL_FOPS,
405};
406
407static const struct drm_driver driver = {
408 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
409
410 .open = ivpu_open,
411 .postclose = ivpu_postclose,
8d88e4cd
JL
412
413 .gem_create_object = ivpu_gem_create_object,
414 .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
35b13763
JL
415
416 .ioctls = ivpu_drm_ioctls,
417 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
418 .fops = &ivpu_fops,
419
420 .name = DRIVER_NAME,
421 .desc = DRIVER_DESC,
422 .date = DRIVER_DATE,
423 .major = DRM_IVPU_DRIVER_MAJOR,
424 .minor = DRM_IVPU_DRIVER_MINOR,
425};
426
427static int ivpu_irq_init(struct ivpu_device *vdev)
428{
429 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
430 int ret;
431
432 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
433 if (ret < 0) {
434 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
435 return ret;
436 }
437
438 vdev->irq = pci_irq_vector(pdev, 0);
439
440 ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
441 IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
442 if (ret)
443 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
444
445 return ret;
446}
447
448static int ivpu_pci_init(struct ivpu_device *vdev)
449{
450 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
451 struct resource *bar0 = &pdev->resource[0];
452 struct resource *bar4 = &pdev->resource[4];
453 int ret;
454
455 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
456 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
457 if (IS_ERR(vdev->regv)) {
458 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
459 return PTR_ERR(vdev->regv);
460 }
461
462 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
463 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
464 if (IS_ERR(vdev->regb)) {
465 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
466 return PTR_ERR(vdev->regb);
467 }
468
a4172d6c 469 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
35b13763
JL
470 if (ret) {
471 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
472 return ret;
473 }
62079b6f 474 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
35b13763
JL
475
476 /* Clear any pending errors */
477 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
478
51d66a7b
JL
479 /* VPU 37XX does not require 10m D3hot delay */
480 if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
cb949ce5
KW
481 pdev->d3hot_delay = 0;
482
35b13763
JL
483 ret = pcim_enable_device(pdev);
484 if (ret) {
485 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
486 return ret;
487 }
488
489 pci_set_master(pdev);
490
491 return 0;
492}
493
494static int ivpu_dev_init(struct ivpu_device *vdev)
495{
496 int ret;
497
498 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
499 if (!vdev->hw)
500 return -ENOMEM;
501
263b2ba5
JL
502 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
503 if (!vdev->mmu)
504 return -ENOMEM;
505
02d5b0aa
JL
506 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
507 if (!vdev->fw)
508 return -ENOMEM;
509
5d7422cf
JL
510 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
511 if (!vdev->ipc)
512 return -ENOMEM;
513
852be13f
JL
514 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
515 if (!vdev->pm)
516 return -ENOMEM;
517
79cdc56c
SG
518 if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
519 vdev->hw->ops = &ivpu_hw_40xx_ops;
520 vdev->hw->dma_bits = 48;
521 } else {
522 vdev->hw->ops = &ivpu_hw_37xx_ops;
523 vdev->hw->dma_bits = 38;
524 }
a4172d6c 525
35b13763 526 vdev->platform = IVPU_PLATFORM_INVALID;
3ff6edbc
SG
527 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
528 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
02d5b0aa 529 atomic64_set(&vdev->unique_id_counter, 0);
35b13763 530 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
cd727221
JL
531 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
532 lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
48aea7f2
JL
533 INIT_LIST_HEAD(&vdev->bo_list);
534
535 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
536 if (ret)
537 goto err_xa_destroy;
35b13763
JL
538
539 ret = ivpu_pci_init(vdev);
0a9cd792 540 if (ret)
35b13763 541 goto err_xa_destroy;
35b13763
JL
542
543 ret = ivpu_irq_init(vdev);
0a9cd792 544 if (ret)
35b13763 545 goto err_xa_destroy;
35b13763
JL
546
547 /* Init basic HW info based on buttress registers which are accessible before power up */
548 ret = ivpu_hw_info_init(vdev);
0a9cd792 549 if (ret)
35b13763 550 goto err_xa_destroy;
35b13763
JL
551
552 /* Power up early so the rest of init code can access VPU registers */
553 ret = ivpu_hw_power_up(vdev);
0a9cd792 554 if (ret)
a06eb9be 555 goto err_power_down;
35b13763 556
263b2ba5 557 ret = ivpu_mmu_global_context_init(vdev);
0a9cd792 558 if (ret)
263b2ba5 559 goto err_power_down;
263b2ba5
JL
560
561 ret = ivpu_mmu_init(vdev);
0a9cd792 562 if (ret)
263b2ba5 563 goto err_mmu_gctx_fini;
263b2ba5 564
34d03f2a 565 ret = ivpu_mmu_reserved_context_init(vdev);
0a9cd792 566 if (ret)
02d5b0aa 567 goto err_mmu_gctx_fini;
02d5b0aa 568
34d03f2a
KW
569 ret = ivpu_fw_init(vdev);
570 if (ret)
571 goto err_mmu_rctx_fini;
572
5d7422cf 573 ret = ivpu_ipc_init(vdev);
0a9cd792 574 if (ret)
02d5b0aa 575 goto err_fw_fini;
02d5b0aa 576
3f68b03a 577 ivpu_pm_init(vdev);
852be13f 578
cd727221 579 ret = ivpu_job_done_thread_init(vdev);
0a9cd792 580 if (ret)
cd727221 581 goto err_ipc_fini;
cd727221 582
02d5b0aa 583 ret = ivpu_boot(vdev);
0a9cd792 584 if (ret)
cd727221 585 goto err_job_done_thread_fini;
5d7422cf 586
852be13f
JL
587 ivpu_pm_enable(vdev);
588
35b13763
JL
589 return 0;
590
cd727221
JL
591err_job_done_thread_fini:
592 ivpu_job_done_thread_fini(vdev);
593err_ipc_fini:
594 ivpu_ipc_fini(vdev);
02d5b0aa
JL
595err_fw_fini:
596 ivpu_fw_fini(vdev);
34d03f2a
KW
597err_mmu_rctx_fini:
598 ivpu_mmu_reserved_context_fini(vdev);
263b2ba5
JL
599err_mmu_gctx_fini:
600 ivpu_mmu_global_context_fini(vdev);
601err_power_down:
602 ivpu_hw_power_down(vdev);
3d8b2727
SG
603 if (IVPU_WA(d3hot_after_power_off))
604 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
35b13763 605err_xa_destroy:
cd727221 606 xa_destroy(&vdev->submitted_jobs_xa);
35b13763
JL
607 xa_destroy(&vdev->context_xa);
608 return ret;
609}
610
611static void ivpu_dev_fini(struct ivpu_device *vdev)
612{
852be13f 613 ivpu_pm_disable(vdev);
35b13763 614 ivpu_shutdown(vdev);
3d8b2727
SG
615 if (IVPU_WA(d3hot_after_power_off))
616 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
cd727221 617 ivpu_job_done_thread_fini(vdev);
6013aa84
SG
618 ivpu_pm_cancel_recovery(vdev);
619
5d7422cf 620 ivpu_ipc_fini(vdev);
02d5b0aa 621 ivpu_fw_fini(vdev);
34d03f2a 622 ivpu_mmu_reserved_context_fini(vdev);
263b2ba5 623 ivpu_mmu_global_context_fini(vdev);
35b13763 624
cd727221
JL
625 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
626 xa_destroy(&vdev->submitted_jobs_xa);
35b13763
JL
627 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
628 xa_destroy(&vdev->context_xa);
629}
630
631static struct pci_device_id ivpu_pci_ids[] = {
632 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
79cdc56c 633 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
35b13763
JL
634 { }
635};
636MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
637
638static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
639{
640 struct ivpu_device *vdev;
641 int ret;
642
643 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
644 if (IS_ERR(vdev))
645 return PTR_ERR(vdev);
646
647 pci_set_drvdata(pdev, vdev);
648
649 ret = ivpu_dev_init(vdev);
0a9cd792 650 if (ret)
35b13763 651 return ret;
35b13763 652
c78199a7 653 ivpu_debugfs_init(vdev);
c78199a7 654
35b13763
JL
655 ret = drm_dev_register(&vdev->drm, 0);
656 if (ret) {
657 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
658 ivpu_dev_fini(vdev);
659 }
660
661 return ret;
662}
663
664static void ivpu_remove(struct pci_dev *pdev)
665{
666 struct ivpu_device *vdev = pci_get_drvdata(pdev);
667
4522ad76 668 drm_dev_unplug(&vdev->drm);
35b13763
JL
669 ivpu_dev_fini(vdev);
670}
671
852be13f
JL
672static const struct dev_pm_ops ivpu_drv_pci_pm = {
673 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
674 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
675};
676
677static const struct pci_error_handlers ivpu_drv_pci_err = {
678 .reset_prepare = ivpu_pm_reset_prepare_cb,
679 .reset_done = ivpu_pm_reset_done_cb,
680};
681
35b13763
JL
682static struct pci_driver ivpu_pci_driver = {
683 .name = KBUILD_MODNAME,
684 .id_table = ivpu_pci_ids,
685 .probe = ivpu_probe,
686 .remove = ivpu_remove,
852be13f
JL
687 .driver = {
688 .pm = &ivpu_drv_pci_pm,
689 },
690 .err_handler = &ivpu_drv_pci_err,
35b13763
JL
691};
692
693module_pci_driver(ivpu_pci_driver);
694
695MODULE_AUTHOR("Intel Corporation");
696MODULE_DESCRIPTION(DRIVER_DESC);
697MODULE_LICENSE("GPL and additional rights");
698MODULE_VERSION(DRIVER_VERSION_STR);