1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include "drm/drm_drv.h"
12 #include "msm_fence.h"
13 #include "msm_gpu_trace.h"
14 #include "adreno/adreno_gpu.h"
16 #include <generated/utsrelease.h>
17 #include <linux/string_helpers.h>
18 #include <linux/devcoredump.h>
19 #include <linux/reset.h>
20 #include <linux/sched/task.h>
26 static int enable_pwrrail(struct msm_gpu *gpu)
28 struct drm_device *dev = gpu->dev;
32 ret = regulator_enable(gpu->gpu_reg);
34 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
40 ret = regulator_enable(gpu->gpu_cx);
42 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
50 static int disable_pwrrail(struct msm_gpu *gpu)
53 regulator_disable(gpu->gpu_cx);
55 regulator_disable(gpu->gpu_reg);
59 static int enable_clk(struct msm_gpu *gpu)
61 if (gpu->core_clk && gpu->fast_rate)
62 clk_set_rate(gpu->core_clk, gpu->fast_rate);
64 /* Set the RBBM timer rate to 19.2Mhz */
65 if (gpu->rbbmtimer_clk)
66 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
68 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
71 static int disable_clk(struct msm_gpu *gpu)
73 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
76 * Set the clock to a deliberately low rate. On older targets the clock
77 * speed had to be non zero to avoid problems. On newer targets this
78 * will be rounded down to zero anyway so it all works out.
81 clk_set_rate(gpu->core_clk, 27000000);
83 if (gpu->rbbmtimer_clk)
84 clk_set_rate(gpu->rbbmtimer_clk, 0);
89 static int enable_axi(struct msm_gpu *gpu)
91 return clk_prepare_enable(gpu->ebi1_clk);
94 static int disable_axi(struct msm_gpu *gpu)
96 clk_disable_unprepare(gpu->ebi1_clk);
100 int msm_gpu_pm_resume(struct msm_gpu *gpu)
104 DBG("%s", gpu->name);
105 trace_msm_gpu_resume(0);
107 ret = enable_pwrrail(gpu);
111 ret = enable_clk(gpu);
115 ret = enable_axi(gpu);
119 msm_devfreq_resume(gpu);
121 gpu->needs_hw_init = true;
126 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
130 DBG("%s", gpu->name);
131 trace_msm_gpu_suspend(0);
133 msm_devfreq_suspend(gpu);
135 ret = disable_axi(gpu);
139 ret = disable_clk(gpu);
143 ret = disable_pwrrail(gpu);
147 gpu->suspend_count++;
152 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
153 struct drm_printer *p)
155 drm_printf(p, "drm-driver:\t%s\n", gpu->dev->driver->name);
156 drm_printf(p, "drm-client-id:\t%u\n", ctx->seqno);
157 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
158 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
159 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
162 int msm_gpu_hw_init(struct msm_gpu *gpu)
166 WARN_ON(!mutex_is_locked(&gpu->lock));
168 if (!gpu->needs_hw_init)
171 disable_irq(gpu->irq);
172 ret = gpu->funcs->hw_init(gpu);
174 gpu->needs_hw_init = false;
175 enable_irq(gpu->irq);
180 #ifdef CONFIG_DEV_COREDUMP
181 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
182 size_t count, void *data, size_t datalen)
184 struct msm_gpu *gpu = data;
185 struct drm_print_iterator iter;
186 struct drm_printer p;
187 struct msm_gpu_state *state;
189 state = msm_gpu_crashstate_get(gpu);
198 p = drm_coredump_printer(&iter);
200 drm_printf(&p, "---\n");
201 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
202 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
203 drm_printf(&p, "time: %lld.%09ld\n",
204 state->time.tv_sec, state->time.tv_nsec);
206 drm_printf(&p, "comm: %s\n", state->comm);
208 drm_printf(&p, "cmdline: %s\n", state->cmd);
210 gpu->funcs->show(gpu, state, &p);
212 msm_gpu_crashstate_put(gpu);
214 return count - iter.remain;
217 static void msm_gpu_devcoredump_free(void *data)
219 struct msm_gpu *gpu = data;
221 msm_gpu_crashstate_put(gpu);
224 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
225 struct msm_gem_object *obj, u64 iova, bool full)
227 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
229 /* Don't record write only objects */
230 state_bo->size = obj->base.size;
231 state_bo->iova = iova;
233 BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(obj->name));
235 memcpy(state_bo->name, obj->name, sizeof(state_bo->name));
240 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
244 msm_gem_lock(&obj->base);
245 ptr = msm_gem_get_vaddr_active(&obj->base);
246 msm_gem_unlock(&obj->base);
248 kvfree(state_bo->data);
249 state_bo->data = NULL;
253 memcpy(state_bo->data, ptr, obj->base.size);
254 msm_gem_put_vaddr(&obj->base);
260 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
261 struct msm_gem_submit *submit, char *comm, char *cmd)
263 struct msm_gpu_state *state;
265 /* Check if the target supports capturing crash state */
266 if (!gpu->funcs->gpu_state_get)
269 /* Only save one crash state at a time */
273 state = gpu->funcs->gpu_state_get(gpu);
274 if (IS_ERR_OR_NULL(state))
277 /* Fill in the additional crash state information */
278 state->comm = kstrdup(comm, GFP_KERNEL);
279 state->cmd = kstrdup(cmd, GFP_KERNEL);
280 state->fault_info = gpu->fault_info;
285 state->bos = kcalloc(submit->nr_bos,
286 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
288 for (i = 0; state->bos && i < submit->nr_bos; i++) {
289 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
291 should_dump(submit, i));
295 /* Set the active crash state to be dumped on failure */
296 gpu->crashstate = state;
298 /* FIXME: Release the crashstate if this errors out? */
299 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
300 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
303 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
304 struct msm_gem_submit *submit, char *comm, char *cmd)
310 * Hangcheck detection for locked gpu:
313 static struct msm_gem_submit *
314 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
316 struct msm_gem_submit *submit;
319 spin_lock_irqsave(&ring->submit_lock, flags);
320 list_for_each_entry(submit, &ring->submits, node) {
321 if (submit->seqno == fence) {
322 spin_unlock_irqrestore(&ring->submit_lock, flags);
326 spin_unlock_irqrestore(&ring->submit_lock, flags);
331 static void retire_submits(struct msm_gpu *gpu);
333 static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
335 struct msm_file_private *ctx = submit->queue->ctx;
336 struct task_struct *task;
338 /* Note that kstrdup will return NULL if argument is NULL: */
339 *comm = kstrdup(ctx->comm, GFP_KERNEL);
340 *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
342 task = get_pid_task(submit->pid, PIDTYPE_PID);
347 *comm = kstrdup(task->comm, GFP_KERNEL);
350 *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
352 put_task_struct(task);
355 static void recover_worker(struct kthread_work *work)
357 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
358 struct drm_device *dev = gpu->dev;
359 struct msm_drm_private *priv = dev->dev_private;
360 struct msm_gem_submit *submit;
361 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
362 char *comm = NULL, *cmd = NULL;
365 mutex_lock(&gpu->lock);
367 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
369 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
371 /* Increment the fault counts */
372 submit->queue->faults++;
374 submit->aspace->faults++;
376 get_comm_cmdline(submit, &comm, &cmd);
379 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
380 gpu->name, comm, cmd);
382 msm_rd_dump_submit(priv->hangrd, submit,
383 "offending task: %s (%s)", comm, cmd);
385 msm_rd_dump_submit(priv->hangrd, submit, NULL);
389 * We couldn't attribute this fault to any particular context,
390 * so increment the global fault count instead.
392 gpu->global_faults++;
395 /* Record the crash state */
396 pm_runtime_get_sync(&gpu->pdev->dev);
397 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
403 * Update all the rings with the latest and greatest fence.. this
404 * needs to happen after msm_rd_dump_submit() to ensure that the
405 * bo's referenced by the offending submit are still around.
407 for (i = 0; i < gpu->nr_rings; i++) {
408 struct msm_ringbuffer *ring = gpu->rb[i];
410 uint32_t fence = ring->memptrs->fence;
413 * For the current (faulting?) ring/submit advance the fence by
414 * one more to clear the faulting submit
416 if (ring == cur_ring)
417 ring->memptrs->fence = ++fence;
419 msm_update_fence(ring->fctx, fence);
422 if (msm_gpu_active(gpu)) {
423 /* retire completed submits, plus the one that hung: */
426 gpu->funcs->recover(gpu);
429 * Replay all remaining submits starting with highest priority
432 for (i = 0; i < gpu->nr_rings; i++) {
433 struct msm_ringbuffer *ring = gpu->rb[i];
436 spin_lock_irqsave(&ring->submit_lock, flags);
437 list_for_each_entry(submit, &ring->submits, node)
438 gpu->funcs->submit(gpu, submit);
439 spin_unlock_irqrestore(&ring->submit_lock, flags);
443 pm_runtime_put(&gpu->pdev->dev);
445 mutex_unlock(&gpu->lock);
450 static void fault_worker(struct kthread_work *work)
452 struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
453 struct msm_gem_submit *submit;
454 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
455 char *comm = NULL, *cmd = NULL;
457 mutex_lock(&gpu->lock);
459 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
460 if (submit && submit->fault_dumped)
464 get_comm_cmdline(submit, &comm, &cmd);
467 * When we get GPU iova faults, we can get 1000s of them,
468 * but we really only want to log the first one.
470 submit->fault_dumped = true;
473 /* Record the crash state */
474 pm_runtime_get_sync(&gpu->pdev->dev);
475 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
476 pm_runtime_put_sync(&gpu->pdev->dev);
482 memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
483 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
485 mutex_unlock(&gpu->lock);
488 static void hangcheck_timer_reset(struct msm_gpu *gpu)
490 struct msm_drm_private *priv = gpu->dev->dev_private;
491 mod_timer(&gpu->hangcheck_timer,
492 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
495 static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
497 if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
500 if (!gpu->funcs->progress)
503 if (!gpu->funcs->progress(gpu, ring))
506 ring->hangcheck_progress_retries++;
510 static void hangcheck_handler(struct timer_list *t)
512 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
513 struct drm_device *dev = gpu->dev;
514 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
515 uint32_t fence = ring->memptrs->fence;
517 if (fence != ring->hangcheck_fence) {
518 /* some progress has been made.. ya! */
519 ring->hangcheck_fence = fence;
520 ring->hangcheck_progress_retries = 0;
521 } else if (fence_before(fence, ring->fctx->last_fence) &&
522 !made_progress(gpu, ring)) {
523 /* no progress and not done.. hung! */
524 ring->hangcheck_fence = fence;
525 ring->hangcheck_progress_retries = 0;
526 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
527 gpu->name, ring->id);
528 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
530 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
531 gpu->name, ring->fctx->last_fence);
533 kthread_queue_work(gpu->worker, &gpu->recover_work);
536 /* if still more pending work, reset the hangcheck timer: */
537 if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
538 hangcheck_timer_reset(gpu);
540 /* workaround for missing irq: */
545 * Performance Counters:
548 /* called under perf_lock */
549 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
551 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
552 int i, n = min(ncntrs, gpu->num_perfcntrs);
554 /* read current values: */
555 for (i = 0; i < gpu->num_perfcntrs; i++)
556 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
559 for (i = 0; i < n; i++)
560 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
562 /* save current values: */
563 for (i = 0; i < gpu->num_perfcntrs; i++)
564 gpu->last_cntrs[i] = current_cntrs[i];
569 static void update_sw_cntrs(struct msm_gpu *gpu)
575 spin_lock_irqsave(&gpu->perf_lock, flags);
576 if (!gpu->perfcntr_active)
580 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
582 gpu->totaltime += elapsed;
583 if (gpu->last_sample.active)
584 gpu->activetime += elapsed;
586 gpu->last_sample.active = msm_gpu_active(gpu);
587 gpu->last_sample.time = time;
590 spin_unlock_irqrestore(&gpu->perf_lock, flags);
593 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
597 pm_runtime_get_sync(&gpu->pdev->dev);
599 spin_lock_irqsave(&gpu->perf_lock, flags);
600 /* we could dynamically enable/disable perfcntr registers too.. */
601 gpu->last_sample.active = msm_gpu_active(gpu);
602 gpu->last_sample.time = ktime_get();
603 gpu->activetime = gpu->totaltime = 0;
604 gpu->perfcntr_active = true;
605 update_hw_cntrs(gpu, 0, NULL);
606 spin_unlock_irqrestore(&gpu->perf_lock, flags);
609 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
611 gpu->perfcntr_active = false;
612 pm_runtime_put_sync(&gpu->pdev->dev);
615 /* returns -errno or # of cntrs sampled */
616 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
617 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
622 spin_lock_irqsave(&gpu->perf_lock, flags);
624 if (!gpu->perfcntr_active) {
629 *activetime = gpu->activetime;
630 *totaltime = gpu->totaltime;
632 gpu->activetime = gpu->totaltime = 0;
634 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
637 spin_unlock_irqrestore(&gpu->perf_lock, flags);
643 * Cmdstream submission/retirement:
646 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
647 struct msm_gem_submit *submit)
649 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
650 volatile struct msm_gpu_submit_stats *stats;
651 u64 elapsed, clock = 0, cycles;
654 stats = &ring->memptrs->stats[index];
655 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
656 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
657 do_div(elapsed, 192);
659 cycles = stats->cpcycles_end - stats->cpcycles_start;
661 /* Calculate the clock frequency from the number of CP cycles */
663 clock = cycles * 1000;
664 do_div(clock, elapsed);
667 submit->queue->ctx->elapsed_ns += elapsed;
668 submit->queue->ctx->cycles += cycles;
670 trace_msm_gpu_submit_retired(submit, elapsed, clock,
671 stats->alwayson_start, stats->alwayson_end);
673 msm_submit_retire(submit);
675 pm_runtime_mark_last_busy(&gpu->pdev->dev);
677 spin_lock_irqsave(&ring->submit_lock, flags);
678 list_del(&submit->node);
679 spin_unlock_irqrestore(&ring->submit_lock, flags);
681 /* Update devfreq on transition from active->idle: */
682 mutex_lock(&gpu->active_lock);
683 gpu->active_submits--;
684 WARN_ON(gpu->active_submits < 0);
685 if (!gpu->active_submits) {
686 msm_devfreq_idle(gpu);
687 pm_runtime_put_autosuspend(&gpu->pdev->dev);
690 mutex_unlock(&gpu->active_lock);
692 msm_gem_submit_put(submit);
695 static void retire_submits(struct msm_gpu *gpu)
699 /* Retire the commits starting with highest priority */
700 for (i = 0; i < gpu->nr_rings; i++) {
701 struct msm_ringbuffer *ring = gpu->rb[i];
704 struct msm_gem_submit *submit = NULL;
707 spin_lock_irqsave(&ring->submit_lock, flags);
708 submit = list_first_entry_or_null(&ring->submits,
709 struct msm_gem_submit, node);
710 spin_unlock_irqrestore(&ring->submit_lock, flags);
713 * If no submit, we are done. If submit->fence hasn't
714 * been signalled, then later submits are not signalled
715 * either, so we are also done.
717 if (submit && dma_fence_is_signaled(submit->hw_fence)) {
718 retire_submit(gpu, ring, submit);
725 wake_up_all(&gpu->retire_event);
728 static void retire_worker(struct kthread_work *work)
730 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
735 /* call from irq handler to schedule work to retire bo's */
736 void msm_gpu_retire(struct msm_gpu *gpu)
740 for (i = 0; i < gpu->nr_rings; i++)
741 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
743 kthread_queue_work(gpu->worker, &gpu->retire_work);
744 update_sw_cntrs(gpu);
747 /* add bo's to gpu's ring, and kick gpu: */
748 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
750 struct drm_device *dev = gpu->dev;
751 struct msm_drm_private *priv = dev->dev_private;
752 struct msm_ringbuffer *ring = submit->ring;
755 WARN_ON(!mutex_is_locked(&gpu->lock));
757 pm_runtime_get_sync(&gpu->pdev->dev);
759 msm_gpu_hw_init(gpu);
761 submit->seqno = submit->hw_fence->seqno;
763 msm_rd_dump_submit(priv->rd, submit, NULL);
765 update_sw_cntrs(gpu);
768 * ring->submits holds a ref to the submit, to deal with the case
769 * that a submit completes before msm_ioctl_gem_submit() returns.
771 msm_gem_submit_get(submit);
773 spin_lock_irqsave(&ring->submit_lock, flags);
774 list_add_tail(&submit->node, &ring->submits);
775 spin_unlock_irqrestore(&ring->submit_lock, flags);
777 /* Update devfreq on transition from idle->active: */
778 mutex_lock(&gpu->active_lock);
779 if (!gpu->active_submits) {
780 pm_runtime_get(&gpu->pdev->dev);
781 msm_devfreq_active(gpu);
783 gpu->active_submits++;
784 mutex_unlock(&gpu->active_lock);
786 gpu->funcs->submit(gpu, submit);
787 gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
789 pm_runtime_put(&gpu->pdev->dev);
790 hangcheck_timer_reset(gpu);
797 static irqreturn_t irq_handler(int irq, void *data)
799 struct msm_gpu *gpu = data;
800 return gpu->funcs->irq(gpu);
803 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
805 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
812 gpu->nr_clocks = ret;
814 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
815 gpu->nr_clocks, "core");
817 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
818 gpu->nr_clocks, "rbbmtimer");
823 /* Return a new address space for a msm_drm_private instance */
824 struct msm_gem_address_space *
825 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
827 struct msm_gem_address_space *aspace = NULL;
832 * If the target doesn't support private address spaces then return
835 if (gpu->funcs->create_private_address_space) {
836 aspace = gpu->funcs->create_private_address_space(gpu);
838 aspace->pid = get_pid(task_pid(task));
841 if (IS_ERR_OR_NULL(aspace))
842 aspace = msm_gem_address_space_get(gpu->aspace);
847 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
848 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
849 const char *name, struct msm_gpu_config *config)
851 struct msm_drm_private *priv = drm->dev_private;
852 int i, ret, nr_rings = config->nr_rings;
854 uint64_t memptrs_iova;
856 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
857 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
863 gpu->worker = kthread_create_worker(0, "gpu-worker");
864 if (IS_ERR(gpu->worker)) {
865 ret = PTR_ERR(gpu->worker);
870 sched_set_fifo_low(gpu->worker->task);
872 mutex_init(&gpu->active_lock);
873 mutex_init(&gpu->lock);
874 init_waitqueue_head(&gpu->retire_event);
875 kthread_init_work(&gpu->retire_work, retire_worker);
876 kthread_init_work(&gpu->recover_work, recover_worker);
877 kthread_init_work(&gpu->fault_work, fault_worker);
879 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
882 * If progress detection is supported, halve the hangcheck timer
883 * duration, as it takes two iterations of the hangcheck handler
887 priv->hangcheck_period /= 2;
889 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
891 spin_lock_init(&gpu->perf_lock);
895 gpu->mmio = msm_ioremap(pdev, config->ioname);
896 if (IS_ERR(gpu->mmio)) {
897 ret = PTR_ERR(gpu->mmio);
902 gpu->irq = platform_get_irq(pdev, 0);
905 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
909 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
910 IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
912 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
916 ret = get_clocks(pdev, gpu);
920 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
921 DBG("ebi1_clk: %p", gpu->ebi1_clk);
922 if (IS_ERR(gpu->ebi1_clk))
923 gpu->ebi1_clk = NULL;
925 /* Acquire regulators: */
926 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
927 DBG("gpu_reg: %p", gpu->gpu_reg);
928 if (IS_ERR(gpu->gpu_reg))
931 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
932 DBG("gpu_cx: %p", gpu->gpu_cx);
933 if (IS_ERR(gpu->gpu_cx))
936 gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
940 platform_set_drvdata(pdev, &gpu->adreno_smmu);
942 msm_devfreq_init(gpu);
945 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
947 if (gpu->aspace == NULL)
948 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
949 else if (IS_ERR(gpu->aspace)) {
950 ret = PTR_ERR(gpu->aspace);
954 memptrs = msm_gem_kernel_new(drm,
955 sizeof(struct msm_rbmemptrs) * nr_rings,
956 check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
959 if (IS_ERR(memptrs)) {
960 ret = PTR_ERR(memptrs);
961 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
965 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
967 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
968 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
969 ARRAY_SIZE(gpu->rb));
970 nr_rings = ARRAY_SIZE(gpu->rb);
973 /* Create ringbuffer(s): */
974 for (i = 0; i < nr_rings; i++) {
975 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
977 if (IS_ERR(gpu->rb[i])) {
978 ret = PTR_ERR(gpu->rb[i]);
979 DRM_DEV_ERROR(drm->dev,
980 "could not create ringbuffer %d: %d\n", i, ret);
984 memptrs += sizeof(struct msm_rbmemptrs);
985 memptrs_iova += sizeof(struct msm_rbmemptrs);
988 gpu->nr_rings = nr_rings;
990 refcount_set(&gpu->sysprof_active, 1);
995 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
996 msm_ringbuffer_destroy(gpu->rb[i]);
1000 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1002 platform_set_drvdata(pdev, NULL);
1006 void msm_gpu_cleanup(struct msm_gpu *gpu)
1010 DBG("%s", gpu->name);
1012 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1013 msm_ringbuffer_destroy(gpu->rb[i]);
1017 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1019 if (!IS_ERR_OR_NULL(gpu->aspace)) {
1020 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1021 msm_gem_address_space_put(gpu->aspace);
1025 kthread_destroy_worker(gpu->worker);
1028 msm_devfreq_cleanup(gpu);
1030 platform_set_drvdata(gpu->pdev, NULL);