habanalabs: replace WARN/WARN_ON with dev_crit in driver
[linux-block.git] / drivers / misc / habanalabs / common / device.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #define pr_fmt(fmt)                     "habanalabs: " fmt
9
10 #include "habanalabs.h"
11
12 #include <linux/pci.h>
13 #include <linux/hwmon.h>
14 #include <uapi/misc/habanalabs.h>
15
16 enum hl_device_status hl_device_status(struct hl_device *hdev)
17 {
18         enum hl_device_status status;
19
20         if (atomic_read(&hdev->in_reset))
21                 status = HL_DEVICE_STATUS_IN_RESET;
22         else if (hdev->needs_reset)
23                 status = HL_DEVICE_STATUS_NEEDS_RESET;
24         else if (hdev->disabled)
25                 status = HL_DEVICE_STATUS_MALFUNCTION;
26         else
27                 status = HL_DEVICE_STATUS_OPERATIONAL;
28
29         return status;
30 }
31
32 bool hl_device_operational(struct hl_device *hdev,
33                 enum hl_device_status *status)
34 {
35         enum hl_device_status current_status;
36
37         current_status = hl_device_status(hdev);
38         if (status)
39                 *status = current_status;
40
41         switch (current_status) {
42         case HL_DEVICE_STATUS_IN_RESET:
43         case HL_DEVICE_STATUS_MALFUNCTION:
44         case HL_DEVICE_STATUS_NEEDS_RESET:
45                 return false;
46         case HL_DEVICE_STATUS_OPERATIONAL:
47         default:
48                 return true;
49         }
50 }
51
52 static void hpriv_release(struct kref *ref)
53 {
54         struct hl_fpriv *hpriv;
55         struct hl_device *hdev;
56
57         hpriv = container_of(ref, struct hl_fpriv, refcount);
58
59         hdev = hpriv->hdev;
60
61         put_pid(hpriv->taskpid);
62
63         hl_debugfs_remove_file(hpriv);
64
65         mutex_destroy(&hpriv->restore_phase_mutex);
66
67         mutex_lock(&hdev->fpriv_list_lock);
68         list_del(&hpriv->dev_node);
69         hdev->compute_ctx = NULL;
70         mutex_unlock(&hdev->fpriv_list_lock);
71
72         kfree(hpriv);
73 }
74
75 void hl_hpriv_get(struct hl_fpriv *hpriv)
76 {
77         kref_get(&hpriv->refcount);
78 }
79
80 void hl_hpriv_put(struct hl_fpriv *hpriv)
81 {
82         kref_put(&hpriv->refcount, hpriv_release);
83 }
84
85 /*
86  * hl_device_release - release function for habanalabs device
87  *
88  * @inode: pointer to inode structure
89  * @filp: pointer to file structure
90  *
91  * Called when process closes an habanalabs device
92  */
93 static int hl_device_release(struct inode *inode, struct file *filp)
94 {
95         struct hl_fpriv *hpriv = filp->private_data;
96
97         hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
98         hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
99
100         filp->private_data = NULL;
101
102         hl_hpriv_put(hpriv);
103
104         return 0;
105 }
106
107 static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
108 {
109         struct hl_fpriv *hpriv = filp->private_data;
110         struct hl_device *hdev;
111
112         filp->private_data = NULL;
113
114         hdev = hpriv->hdev;
115
116         mutex_lock(&hdev->fpriv_list_lock);
117         list_del(&hpriv->dev_node);
118         mutex_unlock(&hdev->fpriv_list_lock);
119
120         kfree(hpriv);
121
122         return 0;
123 }
124
125 /*
126  * hl_mmap - mmap function for habanalabs device
127  *
128  * @*filp: pointer to file structure
129  * @*vma: pointer to vm_area_struct of the process
130  *
131  * Called when process does an mmap on habanalabs device. Call the device's mmap
132  * function at the end of the common code.
133  */
134 static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
135 {
136         struct hl_fpriv *hpriv = filp->private_data;
137         unsigned long vm_pgoff;
138
139         vm_pgoff = vma->vm_pgoff;
140         vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
141
142         switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
143         case HL_MMAP_TYPE_CB:
144                 return hl_cb_mmap(hpriv, vma);
145         }
146
147         return -EINVAL;
148 }
149
150 static const struct file_operations hl_ops = {
151         .owner = THIS_MODULE,
152         .open = hl_device_open,
153         .release = hl_device_release,
154         .mmap = hl_mmap,
155         .unlocked_ioctl = hl_ioctl,
156         .compat_ioctl = hl_ioctl
157 };
158
159 static const struct file_operations hl_ctrl_ops = {
160         .owner = THIS_MODULE,
161         .open = hl_device_open_ctrl,
162         .release = hl_device_release_ctrl,
163         .unlocked_ioctl = hl_ioctl_control,
164         .compat_ioctl = hl_ioctl_control
165 };
166
167 static void device_release_func(struct device *dev)
168 {
169         kfree(dev);
170 }
171
172 /*
173  * device_init_cdev - Initialize cdev and device for habanalabs device
174  *
175  * @hdev: pointer to habanalabs device structure
176  * @hclass: pointer to the class object of the device
177  * @minor: minor number of the specific device
178  * @fpos: file operations to install for this device
179  * @name: name of the device as it will appear in the filesystem
180  * @cdev: pointer to the char device object that will be initialized
181  * @dev: pointer to the device object that will be initialized
182  *
183  * Initialize a cdev and a Linux device for habanalabs's device.
184  */
185 static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
186                                 int minor, const struct file_operations *fops,
187                                 char *name, struct cdev *cdev,
188                                 struct device **dev)
189 {
190         cdev_init(cdev, fops);
191         cdev->owner = THIS_MODULE;
192
193         *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
194         if (!*dev)
195                 return -ENOMEM;
196
197         device_initialize(*dev);
198         (*dev)->devt = MKDEV(hdev->major, minor);
199         (*dev)->class = hclass;
200         (*dev)->release = device_release_func;
201         dev_set_drvdata(*dev, hdev);
202         dev_set_name(*dev, "%s", name);
203
204         return 0;
205 }
206
207 static int device_cdev_sysfs_add(struct hl_device *hdev)
208 {
209         int rc;
210
211         rc = cdev_device_add(&hdev->cdev, hdev->dev);
212         if (rc) {
213                 dev_err(hdev->dev,
214                         "failed to add a char device to the system\n");
215                 return rc;
216         }
217
218         rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
219         if (rc) {
220                 dev_err(hdev->dev,
221                         "failed to add a control char device to the system\n");
222                 goto delete_cdev_device;
223         }
224
225         /* hl_sysfs_init() must be done after adding the device to the system */
226         rc = hl_sysfs_init(hdev);
227         if (rc) {
228                 dev_err(hdev->dev, "failed to initialize sysfs\n");
229                 goto delete_ctrl_cdev_device;
230         }
231
232         hdev->cdev_sysfs_created = true;
233
234         return 0;
235
236 delete_ctrl_cdev_device:
237         cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
238 delete_cdev_device:
239         cdev_device_del(&hdev->cdev, hdev->dev);
240         return rc;
241 }
242
243 static void device_cdev_sysfs_del(struct hl_device *hdev)
244 {
245         if (!hdev->cdev_sysfs_created)
246                 goto put_devices;
247
248         hl_sysfs_fini(hdev);
249         cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
250         cdev_device_del(&hdev->cdev, hdev->dev);
251
252 put_devices:
253         put_device(hdev->dev);
254         put_device(hdev->dev_ctrl);
255 }
256
257 static void device_hard_reset_pending(struct work_struct *work)
258 {
259         struct hl_device_reset_work *device_reset_work =
260                 container_of(work, struct hl_device_reset_work,
261                                 reset_work.work);
262         struct hl_device *hdev = device_reset_work->hdev;
263         int rc;
264
265         rc = hl_device_reset(hdev, true, true);
266         if ((rc == -EBUSY) && !hdev->device_fini_pending) {
267                 dev_info(hdev->dev,
268                         "Could not reset device. will try again in %u seconds",
269                         HL_PENDING_RESET_PER_SEC);
270
271                 queue_delayed_work(device_reset_work->wq,
272                         &device_reset_work->reset_work,
273                         msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
274         }
275 }
276
277 /*
278  * device_early_init - do some early initialization for the habanalabs device
279  *
280  * @hdev: pointer to habanalabs device structure
281  *
282  * Install the relevant function pointers and call the early_init function,
283  * if such a function exists
284  */
285 static int device_early_init(struct hl_device *hdev)
286 {
287         int i, rc;
288         char workq_name[32];
289
290         switch (hdev->asic_type) {
291         case ASIC_GOYA:
292                 goya_set_asic_funcs(hdev);
293                 strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
294                 break;
295         case ASIC_GAUDI:
296                 gaudi_set_asic_funcs(hdev);
297                 sprintf(hdev->asic_name, "GAUDI");
298                 break;
299         default:
300                 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
301                         hdev->asic_type);
302                 return -EINVAL;
303         }
304
305         rc = hdev->asic_funcs->early_init(hdev);
306         if (rc)
307                 return rc;
308
309         rc = hl_asid_init(hdev);
310         if (rc)
311                 goto early_fini;
312
313         if (hdev->asic_prop.completion_queues_count) {
314                 hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
315                                 sizeof(*hdev->cq_wq),
316                                 GFP_ATOMIC);
317                 if (!hdev->cq_wq) {
318                         rc = -ENOMEM;
319                         goto asid_fini;
320                 }
321         }
322
323         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
324                 snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
325                 hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
326                 if (hdev->cq_wq[i] == NULL) {
327                         dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
328                         rc = -ENOMEM;
329                         goto free_cq_wq;
330                 }
331         }
332
333         hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
334         if (hdev->eq_wq == NULL) {
335                 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
336                 rc = -ENOMEM;
337                 goto free_cq_wq;
338         }
339
340         hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
341                                         GFP_KERNEL);
342         if (!hdev->hl_chip_info) {
343                 rc = -ENOMEM;
344                 goto free_eq_wq;
345         }
346
347         hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE,
348                                         sizeof(struct hl_device_idle_busy_ts),
349                                         (GFP_KERNEL | __GFP_ZERO));
350         if (!hdev->idle_busy_ts_arr) {
351                 rc = -ENOMEM;
352                 goto free_chip_info;
353         }
354
355         rc = hl_mmu_if_set_funcs(hdev);
356         if (rc)
357                 goto free_idle_busy_ts_arr;
358
359         hl_cb_mgr_init(&hdev->kernel_cb_mgr);
360
361         hdev->device_reset_work.wq =
362                         create_singlethread_workqueue("hl_device_reset");
363         if (!hdev->device_reset_work.wq) {
364                 rc = -ENOMEM;
365                 dev_err(hdev->dev, "Failed to create device reset WQ\n");
366                 goto free_cb_mgr;
367         }
368
369         INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work,
370                         device_hard_reset_pending);
371         hdev->device_reset_work.hdev = hdev;
372         hdev->device_fini_pending = 0;
373
374         mutex_init(&hdev->send_cpu_message_lock);
375         mutex_init(&hdev->debug_lock);
376         INIT_LIST_HEAD(&hdev->cs_mirror_list);
377         spin_lock_init(&hdev->cs_mirror_lock);
378         INIT_LIST_HEAD(&hdev->fpriv_list);
379         mutex_init(&hdev->fpriv_list_lock);
380         atomic_set(&hdev->in_reset, 0);
381
382         return 0;
383
384 free_cb_mgr:
385         hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
386 free_idle_busy_ts_arr:
387         kfree(hdev->idle_busy_ts_arr);
388 free_chip_info:
389         kfree(hdev->hl_chip_info);
390 free_eq_wq:
391         destroy_workqueue(hdev->eq_wq);
392 free_cq_wq:
393         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
394                 if (hdev->cq_wq[i])
395                         destroy_workqueue(hdev->cq_wq[i]);
396         kfree(hdev->cq_wq);
397 asid_fini:
398         hl_asid_fini(hdev);
399 early_fini:
400         if (hdev->asic_funcs->early_fini)
401                 hdev->asic_funcs->early_fini(hdev);
402
403         return rc;
404 }
405
406 /*
407  * device_early_fini - finalize all that was done in device_early_init
408  *
409  * @hdev: pointer to habanalabs device structure
410  *
411  */
412 static void device_early_fini(struct hl_device *hdev)
413 {
414         int i;
415
416         mutex_destroy(&hdev->debug_lock);
417         mutex_destroy(&hdev->send_cpu_message_lock);
418
419         mutex_destroy(&hdev->fpriv_list_lock);
420
421         hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
422
423         kfree(hdev->idle_busy_ts_arr);
424         kfree(hdev->hl_chip_info);
425
426         destroy_workqueue(hdev->eq_wq);
427         destroy_workqueue(hdev->device_reset_work.wq);
428
429         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
430                 destroy_workqueue(hdev->cq_wq[i]);
431         kfree(hdev->cq_wq);
432
433         hl_asid_fini(hdev);
434
435         if (hdev->asic_funcs->early_fini)
436                 hdev->asic_funcs->early_fini(hdev);
437 }
438
439 static void set_freq_to_low_job(struct work_struct *work)
440 {
441         struct hl_device *hdev = container_of(work, struct hl_device,
442                                                 work_freq.work);
443
444         mutex_lock(&hdev->fpriv_list_lock);
445
446         if (!hdev->compute_ctx)
447                 hl_device_set_frequency(hdev, PLL_LOW);
448
449         mutex_unlock(&hdev->fpriv_list_lock);
450
451         schedule_delayed_work(&hdev->work_freq,
452                         usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
453 }
454
455 static void hl_device_heartbeat(struct work_struct *work)
456 {
457         struct hl_device *hdev = container_of(work, struct hl_device,
458                                                 work_heartbeat.work);
459
460         if (!hl_device_operational(hdev, NULL))
461                 goto reschedule;
462
463         if (!hdev->asic_funcs->send_heartbeat(hdev))
464                 goto reschedule;
465
466         dev_err(hdev->dev, "Device heartbeat failed!\n");
467         hl_device_reset(hdev, true, false);
468
469         return;
470
471 reschedule:
472         schedule_delayed_work(&hdev->work_heartbeat,
473                         usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
474 }
475
476 /*
477  * device_late_init - do late stuff initialization for the habanalabs device
478  *
479  * @hdev: pointer to habanalabs device structure
480  *
481  * Do stuff that either needs the device H/W queues to be active or needs
482  * to happen after all the rest of the initialization is finished
483  */
484 static int device_late_init(struct hl_device *hdev)
485 {
486         int rc;
487
488         if (hdev->asic_funcs->late_init) {
489                 rc = hdev->asic_funcs->late_init(hdev);
490                 if (rc) {
491                         dev_err(hdev->dev,
492                                 "failed late initialization for the H/W\n");
493                         return rc;
494                 }
495         }
496
497         hdev->high_pll = hdev->asic_prop.high_pll;
498
499         /* force setting to low frequency */
500         hdev->curr_pll_profile = PLL_LOW;
501
502         if (hdev->pm_mng_profile == PM_AUTO)
503                 hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
504         else
505                 hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
506
507         INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
508         schedule_delayed_work(&hdev->work_freq,
509         usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
510
511         if (hdev->heartbeat) {
512                 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
513                 schedule_delayed_work(&hdev->work_heartbeat,
514                                 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
515         }
516
517         hdev->late_init_done = true;
518
519         return 0;
520 }
521
522 /*
523  * device_late_fini - finalize all that was done in device_late_init
524  *
525  * @hdev: pointer to habanalabs device structure
526  *
527  */
528 static void device_late_fini(struct hl_device *hdev)
529 {
530         if (!hdev->late_init_done)
531                 return;
532
533         cancel_delayed_work_sync(&hdev->work_freq);
534         if (hdev->heartbeat)
535                 cancel_delayed_work_sync(&hdev->work_heartbeat);
536
537         if (hdev->asic_funcs->late_fini)
538                 hdev->asic_funcs->late_fini(hdev);
539
540         hdev->late_init_done = false;
541 }
542
543 uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms)
544 {
545         struct hl_device_idle_busy_ts *ts;
546         ktime_t zero_ktime, curr = ktime_get();
547         u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx;
548         s64 period_us, last_start_us, last_end_us, last_busy_time_us,
549                 total_busy_time_us = 0, total_busy_time_ms;
550
551         zero_ktime = ktime_set(0, 0);
552         period_us = period_ms * USEC_PER_MSEC;
553         ts = &hdev->idle_busy_ts_arr[last_index];
554
555         /* check case that device is currently in idle */
556         if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) &&
557                         !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) {
558
559                 last_index--;
560                 /* Handle case idle_busy_ts_idx was 0 */
561                 if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
562                         last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
563
564                 ts = &hdev->idle_busy_ts_arr[last_index];
565         }
566
567         while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) {
568                 /* Check if we are in last sample case. i.e. if the sample
569                  * begun before the sampling period. This could be a real
570                  * sample or 0 so need to handle both cases
571                  */
572                 last_start_us = ktime_to_us(
573                                 ktime_sub(curr, ts->idle_to_busy_ts));
574
575                 if (last_start_us > period_us) {
576
577                         /* First check two cases:
578                          * 1. If the device is currently busy
579                          * 2. If the device was idle during the whole sampling
580                          *    period
581                          */
582
583                         if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) {
584                                 /* Check if the device is currently busy */
585                                 if (ktime_compare(ts->idle_to_busy_ts,
586                                                 zero_ktime))
587                                         return 100;
588
589                                 /* We either didn't have any activity or we
590                                  * reached an entry which is 0. Either way,
591                                  * exit and return what was accumulated so far
592                                  */
593                                 break;
594                         }
595
596                         /* If sample has finished, check it is relevant */
597                         last_end_us = ktime_to_us(
598                                         ktime_sub(curr, ts->busy_to_idle_ts));
599
600                         if (last_end_us > period_us)
601                                 break;
602
603                         /* It is relevant so add it but with adjustment */
604                         last_busy_time_us = ktime_to_us(
605                                                 ktime_sub(ts->busy_to_idle_ts,
606                                                 ts->idle_to_busy_ts));
607                         total_busy_time_us += last_busy_time_us -
608                                         (last_start_us - period_us);
609                         break;
610                 }
611
612                 /* Check if the sample is finished or still open */
613                 if (ktime_compare(ts->busy_to_idle_ts, zero_ktime))
614                         last_busy_time_us = ktime_to_us(
615                                                 ktime_sub(ts->busy_to_idle_ts,
616                                                 ts->idle_to_busy_ts));
617                 else
618                         last_busy_time_us = ktime_to_us(
619                                         ktime_sub(curr, ts->idle_to_busy_ts));
620
621                 total_busy_time_us += last_busy_time_us;
622
623                 last_index--;
624                 /* Handle case idle_busy_ts_idx was 0 */
625                 if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
626                         last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
627
628                 ts = &hdev->idle_busy_ts_arr[last_index];
629
630                 overlap_cnt++;
631         }
632
633         total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us,
634                                                 USEC_PER_MSEC);
635
636         return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms);
637 }
638
639 /*
640  * hl_device_set_frequency - set the frequency of the device
641  *
642  * @hdev: pointer to habanalabs device structure
643  * @freq: the new frequency value
644  *
645  * Change the frequency if needed. This function has no protection against
646  * concurrency, therefore it is assumed that the calling function has protected
647  * itself against the case of calling this function from multiple threads with
648  * different values
649  *
650  * Returns 0 if no change was done, otherwise returns 1
651  */
652 int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
653 {
654         if ((hdev->pm_mng_profile == PM_MANUAL) ||
655                         (hdev->curr_pll_profile == freq))
656                 return 0;
657
658         dev_dbg(hdev->dev, "Changing device frequency to %s\n",
659                 freq == PLL_HIGH ? "high" : "low");
660
661         hdev->asic_funcs->set_pll_profile(hdev, freq);
662
663         hdev->curr_pll_profile = freq;
664
665         return 1;
666 }
667
668 int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
669 {
670         int rc = 0;
671
672         mutex_lock(&hdev->debug_lock);
673
674         if (!enable) {
675                 if (!hdev->in_debug) {
676                         dev_err(hdev->dev,
677                                 "Failed to disable debug mode because device was not in debug mode\n");
678                         rc = -EFAULT;
679                         goto out;
680                 }
681
682                 if (!hdev->hard_reset_pending)
683                         hdev->asic_funcs->halt_coresight(hdev);
684
685                 hdev->in_debug = 0;
686
687                 if (!hdev->hard_reset_pending)
688                         hdev->asic_funcs->set_clock_gating(hdev);
689
690                 goto out;
691         }
692
693         if (hdev->in_debug) {
694                 dev_err(hdev->dev,
695                         "Failed to enable debug mode because device is already in debug mode\n");
696                 rc = -EFAULT;
697                 goto out;
698         }
699
700         hdev->asic_funcs->disable_clock_gating(hdev);
701         hdev->in_debug = 1;
702
703 out:
704         mutex_unlock(&hdev->debug_lock);
705
706         return rc;
707 }
708
709 /*
710  * hl_device_suspend - initiate device suspend
711  *
712  * @hdev: pointer to habanalabs device structure
713  *
714  * Puts the hw in the suspend state (all asics).
715  * Returns 0 for success or an error on failure.
716  * Called at driver suspend.
717  */
718 int hl_device_suspend(struct hl_device *hdev)
719 {
720         int rc;
721
722         pci_save_state(hdev->pdev);
723
724         /* Block future CS/VM/JOB completion operations */
725         rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
726         if (rc) {
727                 dev_err(hdev->dev, "Can't suspend while in reset\n");
728                 return -EIO;
729         }
730
731         /* This blocks all other stuff that is not blocked by in_reset */
732         hdev->disabled = true;
733
734         /*
735          * Flush anyone that is inside the critical section of enqueue
736          * jobs to the H/W
737          */
738         hdev->asic_funcs->hw_queues_lock(hdev);
739         hdev->asic_funcs->hw_queues_unlock(hdev);
740
741         /* Flush processes that are sending message to CPU */
742         mutex_lock(&hdev->send_cpu_message_lock);
743         mutex_unlock(&hdev->send_cpu_message_lock);
744
745         rc = hdev->asic_funcs->suspend(hdev);
746         if (rc)
747                 dev_err(hdev->dev,
748                         "Failed to disable PCI access of device CPU\n");
749
750         /* Shut down the device */
751         pci_disable_device(hdev->pdev);
752         pci_set_power_state(hdev->pdev, PCI_D3hot);
753
754         return 0;
755 }
756
757 /*
758  * hl_device_resume - initiate device resume
759  *
760  * @hdev: pointer to habanalabs device structure
761  *
762  * Bring the hw back to operating state (all asics).
763  * Returns 0 for success or an error on failure.
764  * Called at driver resume.
765  */
766 int hl_device_resume(struct hl_device *hdev)
767 {
768         int rc;
769
770         pci_set_power_state(hdev->pdev, PCI_D0);
771         pci_restore_state(hdev->pdev);
772         rc = pci_enable_device_mem(hdev->pdev);
773         if (rc) {
774                 dev_err(hdev->dev,
775                         "Failed to enable PCI device in resume\n");
776                 return rc;
777         }
778
779         pci_set_master(hdev->pdev);
780
781         rc = hdev->asic_funcs->resume(hdev);
782         if (rc) {
783                 dev_err(hdev->dev, "Failed to resume device after suspend\n");
784                 goto disable_device;
785         }
786
787
788         hdev->disabled = false;
789         atomic_set(&hdev->in_reset, 0);
790
791         rc = hl_device_reset(hdev, true, false);
792         if (rc) {
793                 dev_err(hdev->dev, "Failed to reset device during resume\n");
794                 goto disable_device;
795         }
796
797         return 0;
798
799 disable_device:
800         pci_clear_master(hdev->pdev);
801         pci_disable_device(hdev->pdev);
802
803         return rc;
804 }
805
806 static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
807 {
808         struct hl_fpriv *hpriv;
809         struct task_struct *task = NULL;
810         u32 pending_cnt;
811
812
813         /* Giving time for user to close FD, and for processes that are inside
814          * hl_device_open to finish
815          */
816         if (!list_empty(&hdev->fpriv_list))
817                 ssleep(1);
818
819         if (timeout) {
820                 pending_cnt = timeout;
821         } else {
822                 if (hdev->process_kill_trial_cnt) {
823                         /* Processes have been already killed */
824                         pending_cnt = 1;
825                         goto wait_for_processes;
826                 } else {
827                         /* Wait a small period after process kill */
828                         pending_cnt = HL_PENDING_RESET_PER_SEC;
829                 }
830         }
831
832         mutex_lock(&hdev->fpriv_list_lock);
833
834         /* This section must be protected because we are dereferencing
835          * pointers that are freed if the process exits
836          */
837         list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
838                 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
839                 if (task) {
840                         dev_info(hdev->dev, "Killing user process pid=%d\n",
841                                 task_pid_nr(task));
842                         send_sig(SIGKILL, task, 1);
843                         usleep_range(1000, 10000);
844
845                         put_task_struct(task);
846                 }
847         }
848
849         mutex_unlock(&hdev->fpriv_list_lock);
850
851         /*
852          * We killed the open users, but that doesn't mean they are closed.
853          * It could be that they are running a long cleanup phase in the driver
854          * e.g. MMU unmappings, or running other long teardown flow even before
855          * our cleanup.
856          * Therefore we need to wait again to make sure they are closed before
857          * continuing with the reset.
858          */
859
860 wait_for_processes:
861         while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
862                 dev_dbg(hdev->dev,
863                         "Waiting for all unmap operations to finish before hard reset\n");
864
865                 pending_cnt--;
866
867                 ssleep(1);
868         }
869
870         /* All processes exited successfully */
871         if (list_empty(&hdev->fpriv_list))
872                 return 0;
873
874         /* Give up waiting for processes to exit */
875         if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
876                 return -ETIME;
877
878         hdev->process_kill_trial_cnt++;
879
880         return -EBUSY;
881 }
882
883 /*
884  * hl_device_reset - reset the device
885  *
886  * @hdev: pointer to habanalabs device structure
887  * @hard_reset: should we do hard reset to all engines or just reset the
888  *              compute/dma engines
889  * @from_hard_reset_thread: is the caller the hard-reset thread
890  *
891  * Block future CS and wait for pending CS to be enqueued
892  * Call ASIC H/W fini
893  * Flush all completions
894  * Re-initialize all internal data structures
895  * Call ASIC H/W init, late_init
896  * Test queues
897  * Enable device
898  *
899  * Returns 0 for success or an error on failure.
900  */
901 int hl_device_reset(struct hl_device *hdev, bool hard_reset,
902                         bool from_hard_reset_thread)
903 {
904         int i, rc;
905
906         if (!hdev->init_done) {
907                 dev_err(hdev->dev,
908                         "Can't reset before initialization is done\n");
909                 return 0;
910         }
911
912         if ((!hard_reset) && (!hdev->supports_soft_reset)) {
913                 dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
914                 hard_reset = true;
915         }
916
917         /* Re-entry of reset thread */
918         if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
919                 goto kill_processes;
920
921         /*
922          * Prevent concurrency in this function - only one reset should be
923          * done at any given time. Only need to perform this if we didn't
924          * get from the dedicated hard reset thread
925          */
926         if (!from_hard_reset_thread) {
927                 /* Block future CS/VM/JOB completion operations */
928                 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
929                 if (rc)
930                         return 0;
931
932                 if (hard_reset) {
933                         /* Disable PCI access from device F/W so he won't send
934                          * us additional interrupts. We disable MSI/MSI-X at
935                          * the halt_engines function and we can't have the F/W
936                          * sending us interrupts after that. We need to disable
937                          * the access here because if the device is marked
938                          * disable, the message won't be send. Also, in case
939                          * of heartbeat, the device CPU is marked as disable
940                          * so this message won't be sent
941                          */
942                         if (hl_fw_send_pci_access_msg(hdev,
943                                         CPUCP_PACKET_DISABLE_PCI_ACCESS))
944                                 dev_warn(hdev->dev,
945                                         "Failed to disable PCI access by F/W\n");
946                 }
947
948                 /* This also blocks future CS/VM/JOB completion operations */
949                 hdev->disabled = true;
950
951                 /* Flush anyone that is inside the critical section of enqueue
952                  * jobs to the H/W
953                  */
954                 hdev->asic_funcs->hw_queues_lock(hdev);
955                 hdev->asic_funcs->hw_queues_unlock(hdev);
956
957                 /* Flush anyone that is inside device open */
958                 mutex_lock(&hdev->fpriv_list_lock);
959                 mutex_unlock(&hdev->fpriv_list_lock);
960
961                 dev_err(hdev->dev, "Going to RESET device!\n");
962         }
963
964 again:
965         if ((hard_reset) && (!from_hard_reset_thread)) {
966                 hdev->hard_reset_pending = true;
967
968                 hdev->process_kill_trial_cnt = 0;
969
970                 /*
971                  * Because the reset function can't run from interrupt or
972                  * from heartbeat work, we need to call the reset function
973                  * from a dedicated work
974                  */
975                 queue_delayed_work(hdev->device_reset_work.wq,
976                         &hdev->device_reset_work.reset_work, 0);
977
978                 return 0;
979         }
980
981         if (hard_reset) {
982                 device_late_fini(hdev);
983
984                 /*
985                  * Now that the heartbeat thread is closed, flush processes
986                  * which are sending messages to CPU
987                  */
988                 mutex_lock(&hdev->send_cpu_message_lock);
989                 mutex_unlock(&hdev->send_cpu_message_lock);
990         }
991
992         /*
993          * Halt the engines and disable interrupts so we won't get any more
994          * completions from H/W and we won't have any accesses from the
995          * H/W to the host machine
996          */
997         hdev->asic_funcs->halt_engines(hdev, hard_reset);
998
999         /* Go over all the queues, release all CS and their jobs */
1000         hl_cs_rollback_all(hdev);
1001
1002 kill_processes:
1003         if (hard_reset) {
1004                 /* Kill processes here after CS rollback. This is because the
1005                  * process can't really exit until all its CSs are done, which
1006                  * is what we do in cs rollback
1007                  */
1008                 rc = device_kill_open_processes(hdev, 0);
1009
1010                 if (rc == -EBUSY) {
1011                         if (hdev->device_fini_pending) {
1012                                 dev_crit(hdev->dev,
1013                                         "Failed to kill all open processes, stopping hard reset\n");
1014                                 goto out_err;
1015                         }
1016
1017                         /* signal reset thread to reschedule */
1018                         return rc;
1019                 }
1020
1021                 if (rc) {
1022                         dev_crit(hdev->dev,
1023                                 "Failed to kill all open processes, stopping hard reset\n");
1024                         goto out_err;
1025                 }
1026
1027                 /* Flush the Event queue workers to make sure no other thread is
1028                  * reading or writing to registers during the reset
1029                  */
1030                 flush_workqueue(hdev->eq_wq);
1031         }
1032
1033         /* Reset the H/W. It will be in idle state after this returns */
1034         hdev->asic_funcs->hw_fini(hdev, hard_reset);
1035
1036         if (hard_reset) {
1037                 /* Release kernel context */
1038                 if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1039                         hdev->kernel_ctx = NULL;
1040                 hl_vm_fini(hdev);
1041                 hl_mmu_fini(hdev);
1042                 hl_eq_reset(hdev, &hdev->event_queue);
1043         }
1044
1045         /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1046         hl_hw_queue_reset(hdev, hard_reset);
1047         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1048                 hl_cq_reset(hdev, &hdev->completion_queue[i]);
1049
1050         hdev->idle_busy_ts_idx = 0;
1051         hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0);
1052         hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0);
1053
1054         if (hdev->cs_active_cnt)
1055                 dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n",
1056                         hdev->cs_active_cnt);
1057
1058         mutex_lock(&hdev->fpriv_list_lock);
1059
1060         /* Make sure the context switch phase will run again */
1061         if (hdev->compute_ctx) {
1062                 atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
1063                 hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
1064         }
1065
1066         mutex_unlock(&hdev->fpriv_list_lock);
1067
1068         /* Finished tear-down, starting to re-initialize */
1069
1070         if (hard_reset) {
1071                 hdev->device_cpu_disabled = false;
1072                 hdev->hard_reset_pending = false;
1073
1074                 if (hdev->kernel_ctx) {
1075                         dev_crit(hdev->dev,
1076                                 "kernel ctx was alive during hard reset, something is terribly wrong\n");
1077                         rc = -EBUSY;
1078                         goto out_err;
1079                 }
1080
1081                 rc = hl_mmu_init(hdev);
1082                 if (rc) {
1083                         dev_err(hdev->dev,
1084                                 "Failed to initialize MMU S/W after hard reset\n");
1085                         goto out_err;
1086                 }
1087
1088                 /* Allocate the kernel context */
1089                 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1090                                                 GFP_KERNEL);
1091                 if (!hdev->kernel_ctx) {
1092                         rc = -ENOMEM;
1093                         hl_mmu_fini(hdev);
1094                         goto out_err;
1095                 }
1096
1097                 hdev->compute_ctx = NULL;
1098
1099                 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1100                 if (rc) {
1101                         dev_err(hdev->dev,
1102                                 "failed to init kernel ctx in hard reset\n");
1103                         kfree(hdev->kernel_ctx);
1104                         hdev->kernel_ctx = NULL;
1105                         hl_mmu_fini(hdev);
1106                         goto out_err;
1107                 }
1108         }
1109
1110         /* Device is now enabled as part of the initialization requires
1111          * communication with the device firmware to get information that
1112          * is required for the initialization itself
1113          */
1114         hdev->disabled = false;
1115
1116         rc = hdev->asic_funcs->hw_init(hdev);
1117         if (rc) {
1118                 dev_err(hdev->dev,
1119                         "failed to initialize the H/W after reset\n");
1120                 goto out_err;
1121         }
1122
1123         /* Check that the communication with the device is working */
1124         rc = hdev->asic_funcs->test_queues(hdev);
1125         if (rc) {
1126                 dev_err(hdev->dev,
1127                         "Failed to detect if device is alive after reset\n");
1128                 goto out_err;
1129         }
1130
1131         if (hard_reset) {
1132                 rc = device_late_init(hdev);
1133                 if (rc) {
1134                         dev_err(hdev->dev,
1135                                 "Failed late init after hard reset\n");
1136                         goto out_err;
1137                 }
1138
1139                 rc = hl_vm_init(hdev);
1140                 if (rc) {
1141                         dev_err(hdev->dev,
1142                                 "Failed to init memory module after hard reset\n");
1143                         goto out_err;
1144                 }
1145
1146                 hl_set_max_power(hdev);
1147         } else {
1148                 rc = hdev->asic_funcs->soft_reset_late_init(hdev);
1149                 if (rc) {
1150                         dev_err(hdev->dev,
1151                                 "Failed late init after soft reset\n");
1152                         goto out_err;
1153                 }
1154         }
1155
1156         atomic_set(&hdev->in_reset, 0);
1157         hdev->needs_reset = false;
1158
1159         if (hard_reset)
1160                 hdev->hard_reset_cnt++;
1161         else
1162                 hdev->soft_reset_cnt++;
1163
1164         dev_warn(hdev->dev, "Successfully finished resetting the device\n");
1165
1166         return 0;
1167
1168 out_err:
1169         hdev->disabled = true;
1170
1171         if (hard_reset) {
1172                 dev_err(hdev->dev,
1173                         "Failed to reset! Device is NOT usable\n");
1174                 hdev->hard_reset_cnt++;
1175         } else {
1176                 dev_err(hdev->dev,
1177                         "Failed to do soft-reset, trying hard reset\n");
1178                 hdev->soft_reset_cnt++;
1179                 hard_reset = true;
1180                 goto again;
1181         }
1182
1183         atomic_set(&hdev->in_reset, 0);
1184
1185         return rc;
1186 }
1187
1188 /*
1189  * hl_device_init - main initialization function for habanalabs device
1190  *
1191  * @hdev: pointer to habanalabs device structure
1192  *
1193  * Allocate an id for the device, do early initialization and then call the
1194  * ASIC specific initialization functions. Finally, create the cdev and the
1195  * Linux device to expose it to the user
1196  */
1197 int hl_device_init(struct hl_device *hdev, struct class *hclass)
1198 {
1199         int i, rc, cq_cnt, cq_ready_cnt;
1200         char *name;
1201         bool add_cdev_sysfs_on_err = false;
1202
1203         name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
1204         if (!name) {
1205                 rc = -ENOMEM;
1206                 goto out_disabled;
1207         }
1208
1209         /* Initialize cdev and device structures */
1210         rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
1211                                 &hdev->cdev, &hdev->dev);
1212
1213         kfree(name);
1214
1215         if (rc)
1216                 goto out_disabled;
1217
1218         name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
1219         if (!name) {
1220                 rc = -ENOMEM;
1221                 goto free_dev;
1222         }
1223
1224         /* Initialize cdev and device structures for control device */
1225         rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
1226                                 name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
1227
1228         kfree(name);
1229
1230         if (rc)
1231                 goto free_dev;
1232
1233         /* Initialize ASIC function pointers and perform early init */
1234         rc = device_early_init(hdev);
1235         if (rc)
1236                 goto free_dev_ctrl;
1237
1238         /*
1239          * Start calling ASIC initialization. First S/W then H/W and finally
1240          * late init
1241          */
1242         rc = hdev->asic_funcs->sw_init(hdev);
1243         if (rc)
1244                 goto early_fini;
1245
1246         /*
1247          * Initialize the H/W queues. Must be done before hw_init, because
1248          * there the addresses of the kernel queue are being written to the
1249          * registers of the device
1250          */
1251         rc = hl_hw_queues_create(hdev);
1252         if (rc) {
1253                 dev_err(hdev->dev, "failed to initialize kernel queues\n");
1254                 goto sw_fini;
1255         }
1256
1257         cq_cnt = hdev->asic_prop.completion_queues_count;
1258
1259         /*
1260          * Initialize the completion queues. Must be done before hw_init,
1261          * because there the addresses of the completion queues are being
1262          * passed as arguments to request_irq
1263          */
1264         if (cq_cnt) {
1265                 hdev->completion_queue = kcalloc(cq_cnt,
1266                                 sizeof(*hdev->completion_queue),
1267                                 GFP_KERNEL);
1268
1269                 if (!hdev->completion_queue) {
1270                         dev_err(hdev->dev,
1271                                 "failed to allocate completion queues\n");
1272                         rc = -ENOMEM;
1273                         goto hw_queues_destroy;
1274                 }
1275         }
1276
1277         for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
1278                 rc = hl_cq_init(hdev, &hdev->completion_queue[i],
1279                                 hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
1280                 if (rc) {
1281                         dev_err(hdev->dev,
1282                                 "failed to initialize completion queue\n");
1283                         goto cq_fini;
1284                 }
1285                 hdev->completion_queue[i].cq_idx = i;
1286         }
1287
1288         /*
1289          * Initialize the event queue. Must be done before hw_init,
1290          * because there the address of the event queue is being
1291          * passed as argument to request_irq
1292          */
1293         rc = hl_eq_init(hdev, &hdev->event_queue);
1294         if (rc) {
1295                 dev_err(hdev->dev, "failed to initialize event queue\n");
1296                 goto cq_fini;
1297         }
1298
1299         /* MMU S/W must be initialized before kernel context is created */
1300         rc = hl_mmu_init(hdev);
1301         if (rc) {
1302                 dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
1303                 goto eq_fini;
1304         }
1305
1306         /* Allocate the kernel context */
1307         hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
1308         if (!hdev->kernel_ctx) {
1309                 rc = -ENOMEM;
1310                 goto mmu_fini;
1311         }
1312
1313         hdev->compute_ctx = NULL;
1314
1315         hl_debugfs_add_device(hdev);
1316
1317         /* debugfs nodes are created in hl_ctx_init so it must be called after
1318          * hl_debugfs_add_device.
1319          */
1320         rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1321         if (rc) {
1322                 dev_err(hdev->dev, "failed to initialize kernel context\n");
1323                 kfree(hdev->kernel_ctx);
1324                 goto remove_device_from_debugfs;
1325         }
1326
1327         rc = hl_cb_pool_init(hdev);
1328         if (rc) {
1329                 dev_err(hdev->dev, "failed to initialize CB pool\n");
1330                 goto release_ctx;
1331         }
1332
1333         /*
1334          * From this point, in case of an error, add char devices and create
1335          * sysfs nodes as part of the error flow, to allow debugging.
1336          */
1337         add_cdev_sysfs_on_err = true;
1338
1339         /* Device is now enabled as part of the initialization requires
1340          * communication with the device firmware to get information that
1341          * is required for the initialization itself
1342          */
1343         hdev->disabled = false;
1344
1345         rc = hdev->asic_funcs->hw_init(hdev);
1346         if (rc) {
1347                 dev_err(hdev->dev, "failed to initialize the H/W\n");
1348                 rc = 0;
1349                 goto out_disabled;
1350         }
1351
1352         /* Check that the communication with the device is working */
1353         rc = hdev->asic_funcs->test_queues(hdev);
1354         if (rc) {
1355                 dev_err(hdev->dev, "Failed to detect if device is alive\n");
1356                 rc = 0;
1357                 goto out_disabled;
1358         }
1359
1360         rc = device_late_init(hdev);
1361         if (rc) {
1362                 dev_err(hdev->dev, "Failed late initialization\n");
1363                 rc = 0;
1364                 goto out_disabled;
1365         }
1366
1367         dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
1368                 hdev->asic_name,
1369                 hdev->asic_prop.dram_size / 1024 / 1024 / 1024);
1370
1371         rc = hl_vm_init(hdev);
1372         if (rc) {
1373                 dev_err(hdev->dev, "Failed to initialize memory module\n");
1374                 rc = 0;
1375                 goto out_disabled;
1376         }
1377
1378         /*
1379          * Expose devices and sysfs nodes to user.
1380          * From here there is no need to add char devices and create sysfs nodes
1381          * in case of an error.
1382          */
1383         add_cdev_sysfs_on_err = false;
1384         rc = device_cdev_sysfs_add(hdev);
1385         if (rc) {
1386                 dev_err(hdev->dev,
1387                         "Failed to add char devices and sysfs nodes\n");
1388                 rc = 0;
1389                 goto out_disabled;
1390         }
1391
1392         /* Need to call this again because the max power might change,
1393          * depending on card type for certain ASICs
1394          */
1395         hl_set_max_power(hdev);
1396
1397         /*
1398          * hl_hwmon_init() must be called after device_late_init(), because only
1399          * there we get the information from the device about which
1400          * hwmon-related sensors the device supports.
1401          * Furthermore, it must be done after adding the device to the system.
1402          */
1403         rc = hl_hwmon_init(hdev);
1404         if (rc) {
1405                 dev_err(hdev->dev, "Failed to initialize hwmon\n");
1406                 rc = 0;
1407                 goto out_disabled;
1408         }
1409
1410         dev_notice(hdev->dev,
1411                 "Successfully added device to habanalabs driver\n");
1412
1413         hdev->init_done = true;
1414
1415         return 0;
1416
1417 release_ctx:
1418         if (hl_ctx_put(hdev->kernel_ctx) != 1)
1419                 dev_err(hdev->dev,
1420                         "kernel ctx is still alive on initialization failure\n");
1421 remove_device_from_debugfs:
1422         hl_debugfs_remove_device(hdev);
1423 mmu_fini:
1424         hl_mmu_fini(hdev);
1425 eq_fini:
1426         hl_eq_fini(hdev, &hdev->event_queue);
1427 cq_fini:
1428         for (i = 0 ; i < cq_ready_cnt ; i++)
1429                 hl_cq_fini(hdev, &hdev->completion_queue[i]);
1430         kfree(hdev->completion_queue);
1431 hw_queues_destroy:
1432         hl_hw_queues_destroy(hdev);
1433 sw_fini:
1434         hdev->asic_funcs->sw_fini(hdev);
1435 early_fini:
1436         device_early_fini(hdev);
1437 free_dev_ctrl:
1438         put_device(hdev->dev_ctrl);
1439 free_dev:
1440         put_device(hdev->dev);
1441 out_disabled:
1442         hdev->disabled = true;
1443         if (add_cdev_sysfs_on_err)
1444                 device_cdev_sysfs_add(hdev);
1445         if (hdev->pdev)
1446                 dev_err(&hdev->pdev->dev,
1447                         "Failed to initialize hl%d. Device is NOT usable !\n",
1448                         hdev->id / 2);
1449         else
1450                 pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
1451                         hdev->id / 2);
1452
1453         return rc;
1454 }
1455
1456 /*
1457  * hl_device_fini - main tear-down function for habanalabs device
1458  *
1459  * @hdev: pointer to habanalabs device structure
1460  *
1461  * Destroy the device, call ASIC fini functions and release the id
1462  */
1463 void hl_device_fini(struct hl_device *hdev)
1464 {
1465         ktime_t timeout;
1466         int i, rc;
1467
1468         dev_info(hdev->dev, "Removing device\n");
1469
1470         hdev->device_fini_pending = 1;
1471         flush_delayed_work(&hdev->device_reset_work.reset_work);
1472
1473         /*
1474          * This function is competing with the reset function, so try to
1475          * take the reset atomic and if we are already in middle of reset,
1476          * wait until reset function is finished. Reset function is designed
1477          * to always finish. However, in Gaudi, because of all the network
1478          * ports, the hard reset could take between 10-30 seconds
1479          */
1480
1481         timeout = ktime_add_us(ktime_get(),
1482                                 HL_HARD_RESET_MAX_TIMEOUT * 1000 * 1000);
1483         rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
1484         while (rc) {
1485                 usleep_range(50, 200);
1486                 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
1487                 if (ktime_compare(ktime_get(), timeout) > 0) {
1488                         dev_crit(hdev->dev,
1489                                 "Failed to remove device because reset function did not finish\n");
1490                         return;
1491                 }
1492         }
1493
1494         /* Disable PCI access from device F/W so it won't send us additional
1495          * interrupts. We disable MSI/MSI-X at the halt_engines function and we
1496          * can't have the F/W sending us interrupts after that. We need to
1497          * disable the access here because if the device is marked disable, the
1498          * message won't be send. Also, in case of heartbeat, the device CPU is
1499          * marked as disable so this message won't be sent
1500          */
1501         hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
1502
1503         /* Mark device as disabled */
1504         hdev->disabled = true;
1505
1506         /* Flush anyone that is inside the critical section of enqueue
1507          * jobs to the H/W
1508          */
1509         hdev->asic_funcs->hw_queues_lock(hdev);
1510         hdev->asic_funcs->hw_queues_unlock(hdev);
1511
1512         /* Flush anyone that is inside device open */
1513         mutex_lock(&hdev->fpriv_list_lock);
1514         mutex_unlock(&hdev->fpriv_list_lock);
1515
1516         hdev->hard_reset_pending = true;
1517
1518         hl_hwmon_fini(hdev);
1519
1520         device_late_fini(hdev);
1521
1522         /*
1523          * Halt the engines and disable interrupts so we won't get any more
1524          * completions from H/W and we won't have any accesses from the
1525          * H/W to the host machine
1526          */
1527         hdev->asic_funcs->halt_engines(hdev, true);
1528
1529         /* Go over all the queues, release all CS and their jobs */
1530         hl_cs_rollback_all(hdev);
1531
1532         /* Kill processes here after CS rollback. This is because the process
1533          * can't really exit until all its CSs are done, which is what we
1534          * do in cs rollback
1535          */
1536         dev_info(hdev->dev,
1537                 "Waiting for all processes to exit (timeout of %u seconds)",
1538                 HL_PENDING_RESET_LONG_SEC);
1539
1540         rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC);
1541         if (rc)
1542                 dev_crit(hdev->dev, "Failed to kill all open processes\n");
1543
1544         hl_cb_pool_fini(hdev);
1545
1546         /* Reset the H/W. It will be in idle state after this returns */
1547         hdev->asic_funcs->hw_fini(hdev, true);
1548
1549         /* Release kernel context */
1550         if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
1551                 dev_err(hdev->dev, "kernel ctx is still alive\n");
1552
1553         hl_debugfs_remove_device(hdev);
1554
1555         hl_vm_fini(hdev);
1556
1557         hl_mmu_fini(hdev);
1558
1559         hl_eq_fini(hdev, &hdev->event_queue);
1560
1561         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1562                 hl_cq_fini(hdev, &hdev->completion_queue[i]);
1563         kfree(hdev->completion_queue);
1564
1565         hl_hw_queues_destroy(hdev);
1566
1567         /* Call ASIC S/W finalize function */
1568         hdev->asic_funcs->sw_fini(hdev);
1569
1570         device_early_fini(hdev);
1571
1572         /* Hide devices and sysfs nodes from user */
1573         device_cdev_sysfs_del(hdev);
1574
1575         pr_info("removed device successfully\n");
1576 }
1577
1578 /*
1579  * MMIO register access helper functions.
1580  */
1581
1582 /*
1583  * hl_rreg - Read an MMIO register
1584  *
1585  * @hdev: pointer to habanalabs device structure
1586  * @reg: MMIO register offset (in bytes)
1587  *
1588  * Returns the value of the MMIO register we are asked to read
1589  *
1590  */
1591 inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
1592 {
1593         return readl(hdev->rmmio + reg);
1594 }
1595
1596 /*
1597  * hl_wreg - Write to an MMIO register
1598  *
1599  * @hdev: pointer to habanalabs device structure
1600  * @reg: MMIO register offset (in bytes)
1601  * @val: 32-bit value
1602  *
1603  * Writes the 32-bit value into the MMIO register
1604  *
1605  */
1606 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
1607 {
1608         writel(val, hdev->rmmio + reg);
1609 }