2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "msm_fence.h"
23 #include <generated/utsrelease.h>
24 #include <linux/string_helpers.h>
25 #include <linux/pm_opp.h>
26 #include <linux/devfreq.h>
27 #include <linux/devcoredump.h>
33 static int msm_devfreq_target(struct device *dev, unsigned long *freq,
36 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
37 struct dev_pm_opp *opp;
39 opp = devfreq_recommended_opp(dev, freq, flags);
44 if (gpu->funcs->gpu_set_freq)
45 gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
47 clk_set_rate(gpu->core_clk, *freq);
54 static int msm_devfreq_get_dev_status(struct device *dev,
55 struct devfreq_dev_status *status)
57 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
60 if (gpu->funcs->gpu_get_freq)
61 status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
63 status->current_frequency = clk_get_rate(gpu->core_clk);
65 status->busy_time = gpu->funcs->gpu_busy(gpu);
68 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
69 gpu->devfreq.time = time;
74 static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
76 struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
78 if (gpu->funcs->gpu_get_freq)
79 *freq = gpu->funcs->gpu_get_freq(gpu);
81 *freq = clk_get_rate(gpu->core_clk);
86 static struct devfreq_dev_profile msm_devfreq_profile = {
88 .target = msm_devfreq_target,
89 .get_dev_status = msm_devfreq_get_dev_status,
90 .get_cur_freq = msm_devfreq_get_cur_freq,
93 static void msm_devfreq_init(struct msm_gpu *gpu)
95 /* We need target support to do devfreq */
96 if (!gpu->funcs->gpu_busy)
99 msm_devfreq_profile.initial_freq = gpu->fast_rate;
102 * Don't set the freq_table or max_state and let devfreq build the table
106 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
107 &msm_devfreq_profile, "simple_ondemand", NULL);
109 if (IS_ERR(gpu->devfreq.devfreq)) {
110 dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
111 gpu->devfreq.devfreq = NULL;
114 devfreq_suspend_device(gpu->devfreq.devfreq);
117 static int enable_pwrrail(struct msm_gpu *gpu)
119 struct drm_device *dev = gpu->dev;
123 ret = regulator_enable(gpu->gpu_reg);
125 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
131 ret = regulator_enable(gpu->gpu_cx);
133 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
141 static int disable_pwrrail(struct msm_gpu *gpu)
144 regulator_disable(gpu->gpu_cx);
146 regulator_disable(gpu->gpu_reg);
150 static int enable_clk(struct msm_gpu *gpu)
152 if (gpu->core_clk && gpu->fast_rate)
153 clk_set_rate(gpu->core_clk, gpu->fast_rate);
155 /* Set the RBBM timer rate to 19.2Mhz */
156 if (gpu->rbbmtimer_clk)
157 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
159 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
162 static int disable_clk(struct msm_gpu *gpu)
164 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
167 * Set the clock to a deliberately low rate. On older targets the clock
168 * speed had to be non zero to avoid problems. On newer targets this
169 * will be rounded down to zero anyway so it all works out.
172 clk_set_rate(gpu->core_clk, 27000000);
174 if (gpu->rbbmtimer_clk)
175 clk_set_rate(gpu->rbbmtimer_clk, 0);
180 static int enable_axi(struct msm_gpu *gpu)
183 clk_prepare_enable(gpu->ebi1_clk);
187 static int disable_axi(struct msm_gpu *gpu)
190 clk_disable_unprepare(gpu->ebi1_clk);
194 void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
196 gpu->devfreq.busy_cycles = 0;
197 gpu->devfreq.time = ktime_get();
199 devfreq_resume_device(gpu->devfreq.devfreq);
202 int msm_gpu_pm_resume(struct msm_gpu *gpu)
206 DBG("%s", gpu->name);
208 ret = enable_pwrrail(gpu);
212 ret = enable_clk(gpu);
216 ret = enable_axi(gpu);
220 msm_gpu_resume_devfreq(gpu);
222 gpu->needs_hw_init = true;
227 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
231 DBG("%s", gpu->name);
233 devfreq_suspend_device(gpu->devfreq.devfreq);
235 ret = disable_axi(gpu);
239 ret = disable_clk(gpu);
243 ret = disable_pwrrail(gpu);
250 int msm_gpu_hw_init(struct msm_gpu *gpu)
254 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
256 if (!gpu->needs_hw_init)
259 disable_irq(gpu->irq);
260 ret = gpu->funcs->hw_init(gpu);
262 gpu->needs_hw_init = false;
263 enable_irq(gpu->irq);
268 #ifdef CONFIG_DEV_COREDUMP
269 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
270 size_t count, void *data, size_t datalen)
272 struct msm_gpu *gpu = data;
273 struct drm_print_iterator iter;
274 struct drm_printer p;
275 struct msm_gpu_state *state;
277 state = msm_gpu_crashstate_get(gpu);
286 p = drm_coredump_printer(&iter);
288 drm_printf(&p, "---\n");
289 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
290 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
291 drm_printf(&p, "time: %lld.%09ld\n",
292 state->time.tv_sec, state->time.tv_nsec);
294 drm_printf(&p, "comm: %s\n", state->comm);
296 drm_printf(&p, "cmdline: %s\n", state->cmd);
298 gpu->funcs->show(gpu, state, &p);
300 msm_gpu_crashstate_put(gpu);
302 return count - iter.remain;
305 static void msm_gpu_devcoredump_free(void *data)
307 struct msm_gpu *gpu = data;
309 msm_gpu_crashstate_put(gpu);
312 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
313 struct msm_gem_object *obj, u64 iova, u32 flags)
315 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
317 /* Don't record write only objects */
319 state_bo->size = obj->base.size;
320 state_bo->iova = iova;
322 /* Only store the data for buffer objects marked for read */
323 if ((flags & MSM_SUBMIT_BO_READ)) {
326 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
330 ptr = msm_gem_get_vaddr_active(&obj->base);
332 kvfree(state_bo->data);
336 memcpy(state_bo->data, ptr, obj->base.size);
337 msm_gem_put_vaddr(&obj->base);
343 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
344 struct msm_gem_submit *submit, char *comm, char *cmd)
346 struct msm_gpu_state *state;
348 /* Check if the target supports capturing crash state */
349 if (!gpu->funcs->gpu_state_get)
352 /* Only save one crash state at a time */
356 state = gpu->funcs->gpu_state_get(gpu);
357 if (IS_ERR_OR_NULL(state))
360 /* Fill in the additional crash state information */
361 state->comm = kstrdup(comm, GFP_KERNEL);
362 state->cmd = kstrdup(cmd, GFP_KERNEL);
367 state->bos = kcalloc(submit->nr_bos,
368 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
370 for (i = 0; state->bos && i < submit->nr_bos; i++)
371 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
372 submit->bos[i].iova, submit->bos[i].flags);
375 /* Set the active crash state to be dumped on failure */
376 gpu->crashstate = state;
378 /* FIXME: Release the crashstate if this errors out? */
379 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
380 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
383 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
384 struct msm_gem_submit *submit, char *comm, char *cmd)
390 * Hangcheck detection for locked gpu:
393 static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
396 struct msm_gem_submit *submit;
398 list_for_each_entry(submit, &ring->submits, node) {
399 if (submit->seqno > fence)
402 msm_update_fence(submit->ring->fctx,
403 submit->fence->seqno);
407 static struct msm_gem_submit *
408 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
410 struct msm_gem_submit *submit;
412 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
414 list_for_each_entry(submit, &ring->submits, node)
415 if (submit->seqno == fence)
421 static void retire_submits(struct msm_gpu *gpu);
423 static void recover_worker(struct work_struct *work)
425 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
426 struct drm_device *dev = gpu->dev;
427 struct msm_drm_private *priv = dev->dev_private;
428 struct msm_gem_submit *submit;
429 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
430 char *comm = NULL, *cmd = NULL;
433 mutex_lock(&dev->struct_mutex);
435 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
437 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
439 struct task_struct *task;
441 task = get_pid_task(submit->pid, PIDTYPE_PID);
443 comm = kstrdup(task->comm, GFP_KERNEL);
446 * So slightly annoying, in other paths like
447 * mmap'ing gem buffers, mmap_sem is acquired
448 * before struct_mutex, which means we can't
449 * hold struct_mutex across the call to
450 * get_cmdline(). But submits are retired
451 * from the same in-order workqueue, so we can
452 * safely drop the lock here without worrying
453 * about the submit going away.
455 mutex_unlock(&dev->struct_mutex);
456 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
457 put_task_struct(task);
458 mutex_lock(&dev->struct_mutex);
462 dev_err(dev->dev, "%s: offending task: %s (%s)\n",
463 gpu->name, comm, cmd);
465 msm_rd_dump_submit(priv->hangrd, submit,
466 "offending task: %s (%s)", comm, cmd);
468 msm_rd_dump_submit(priv->hangrd, submit, NULL);
471 /* Record the crash state */
472 pm_runtime_get_sync(&gpu->pdev->dev);
473 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
474 pm_runtime_put_sync(&gpu->pdev->dev);
480 * Update all the rings with the latest and greatest fence.. this
481 * needs to happen after msm_rd_dump_submit() to ensure that the
482 * bo's referenced by the offending submit are still around.
484 for (i = 0; i < gpu->nr_rings; i++) {
485 struct msm_ringbuffer *ring = gpu->rb[i];
487 uint32_t fence = ring->memptrs->fence;
490 * For the current (faulting?) ring/submit advance the fence by
491 * one more to clear the faulting submit
493 if (ring == cur_ring)
496 update_fences(gpu, ring, fence);
499 if (msm_gpu_active(gpu)) {
500 /* retire completed submits, plus the one that hung: */
503 pm_runtime_get_sync(&gpu->pdev->dev);
504 gpu->funcs->recover(gpu);
505 pm_runtime_put_sync(&gpu->pdev->dev);
508 * Replay all remaining submits starting with highest priority
511 for (i = 0; i < gpu->nr_rings; i++) {
512 struct msm_ringbuffer *ring = gpu->rb[i];
514 list_for_each_entry(submit, &ring->submits, node)
515 gpu->funcs->submit(gpu, submit, NULL);
519 mutex_unlock(&dev->struct_mutex);
524 static void hangcheck_timer_reset(struct msm_gpu *gpu)
526 DBG("%s", gpu->name);
527 mod_timer(&gpu->hangcheck_timer,
528 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
531 static void hangcheck_handler(struct timer_list *t)
533 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
534 struct drm_device *dev = gpu->dev;
535 struct msm_drm_private *priv = dev->dev_private;
536 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
537 uint32_t fence = ring->memptrs->fence;
539 if (fence != ring->hangcheck_fence) {
540 /* some progress has been made.. ya! */
541 ring->hangcheck_fence = fence;
542 } else if (fence < ring->seqno) {
543 /* no progress and not done.. hung! */
544 ring->hangcheck_fence = fence;
545 dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
546 gpu->name, ring->id);
547 dev_err(dev->dev, "%s: completed fence: %u\n",
549 dev_err(dev->dev, "%s: submitted fence: %u\n",
550 gpu->name, ring->seqno);
552 queue_work(priv->wq, &gpu->recover_work);
555 /* if still more pending work, reset the hangcheck timer: */
556 if (ring->seqno > ring->hangcheck_fence)
557 hangcheck_timer_reset(gpu);
559 /* workaround for missing irq: */
560 queue_work(priv->wq, &gpu->retire_work);
564 * Performance Counters:
567 /* called under perf_lock */
568 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
570 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
571 int i, n = min(ncntrs, gpu->num_perfcntrs);
573 /* read current values: */
574 for (i = 0; i < gpu->num_perfcntrs; i++)
575 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
578 for (i = 0; i < n; i++)
579 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
581 /* save current values: */
582 for (i = 0; i < gpu->num_perfcntrs; i++)
583 gpu->last_cntrs[i] = current_cntrs[i];
588 static void update_sw_cntrs(struct msm_gpu *gpu)
594 spin_lock_irqsave(&gpu->perf_lock, flags);
595 if (!gpu->perfcntr_active)
599 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
601 gpu->totaltime += elapsed;
602 if (gpu->last_sample.active)
603 gpu->activetime += elapsed;
605 gpu->last_sample.active = msm_gpu_active(gpu);
606 gpu->last_sample.time = time;
609 spin_unlock_irqrestore(&gpu->perf_lock, flags);
612 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
616 pm_runtime_get_sync(&gpu->pdev->dev);
618 spin_lock_irqsave(&gpu->perf_lock, flags);
619 /* we could dynamically enable/disable perfcntr registers too.. */
620 gpu->last_sample.active = msm_gpu_active(gpu);
621 gpu->last_sample.time = ktime_get();
622 gpu->activetime = gpu->totaltime = 0;
623 gpu->perfcntr_active = true;
624 update_hw_cntrs(gpu, 0, NULL);
625 spin_unlock_irqrestore(&gpu->perf_lock, flags);
628 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
630 gpu->perfcntr_active = false;
631 pm_runtime_put_sync(&gpu->pdev->dev);
634 /* returns -errno or # of cntrs sampled */
635 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
636 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
641 spin_lock_irqsave(&gpu->perf_lock, flags);
643 if (!gpu->perfcntr_active) {
648 *activetime = gpu->activetime;
649 *totaltime = gpu->totaltime;
651 gpu->activetime = gpu->totaltime = 0;
653 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
656 spin_unlock_irqrestore(&gpu->perf_lock, flags);
662 * Cmdstream submission/retirement:
665 static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
669 for (i = 0; i < submit->nr_bos; i++) {
670 struct msm_gem_object *msm_obj = submit->bos[i].obj;
671 /* move to inactive: */
672 msm_gem_move_to_inactive(&msm_obj->base);
673 msm_gem_put_iova(&msm_obj->base, gpu->aspace);
674 drm_gem_object_put(&msm_obj->base);
677 pm_runtime_mark_last_busy(&gpu->pdev->dev);
678 pm_runtime_put_autosuspend(&gpu->pdev->dev);
679 msm_gem_submit_free(submit);
682 static void retire_submits(struct msm_gpu *gpu)
684 struct drm_device *dev = gpu->dev;
685 struct msm_gem_submit *submit, *tmp;
688 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
690 /* Retire the commits starting with highest priority */
691 for (i = 0; i < gpu->nr_rings; i++) {
692 struct msm_ringbuffer *ring = gpu->rb[i];
694 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
695 if (dma_fence_is_signaled(submit->fence))
696 retire_submit(gpu, submit);
701 static void retire_worker(struct work_struct *work)
703 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
704 struct drm_device *dev = gpu->dev;
707 for (i = 0; i < gpu->nr_rings; i++)
708 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
710 mutex_lock(&dev->struct_mutex);
712 mutex_unlock(&dev->struct_mutex);
715 /* call from irq handler to schedule work to retire bo's */
716 void msm_gpu_retire(struct msm_gpu *gpu)
718 struct msm_drm_private *priv = gpu->dev->dev_private;
719 queue_work(priv->wq, &gpu->retire_work);
720 update_sw_cntrs(gpu);
723 /* add bo's to gpu's ring, and kick gpu: */
724 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
725 struct msm_file_private *ctx)
727 struct drm_device *dev = gpu->dev;
728 struct msm_drm_private *priv = dev->dev_private;
729 struct msm_ringbuffer *ring = submit->ring;
732 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
734 pm_runtime_get_sync(&gpu->pdev->dev);
736 msm_gpu_hw_init(gpu);
738 submit->seqno = ++ring->seqno;
740 list_add_tail(&submit->node, &ring->submits);
742 msm_rd_dump_submit(priv->rd, submit, NULL);
744 update_sw_cntrs(gpu);
746 for (i = 0; i < submit->nr_bos; i++) {
747 struct msm_gem_object *msm_obj = submit->bos[i].obj;
750 /* can't happen yet.. but when we add 2d support we'll have
751 * to deal w/ cross-ring synchronization:
753 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
755 /* submit takes a reference to the bo and iova until retired: */
756 drm_gem_object_get(&msm_obj->base);
757 msm_gem_get_iova(&msm_obj->base,
758 submit->gpu->aspace, &iova);
760 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
761 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
762 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
763 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
766 gpu->funcs->submit(gpu, submit, ctx);
769 hangcheck_timer_reset(gpu);
776 static irqreturn_t irq_handler(int irq, void *data)
778 struct msm_gpu *gpu = data;
779 return gpu->funcs->irq(gpu);
782 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
784 int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
791 gpu->nr_clocks = ret;
793 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
794 gpu->nr_clocks, "core");
796 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
797 gpu->nr_clocks, "rbbmtimer");
802 static struct msm_gem_address_space *
803 msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
804 uint64_t va_start, uint64_t va_end)
806 struct iommu_domain *iommu;
807 struct msm_gem_address_space *aspace;
811 * Setup IOMMU.. eventually we will (I think) do this once per context
812 * and have separate page tables per context. For now, to keep things
813 * simple and to get something working, just use a single address space:
815 iommu = iommu_domain_alloc(&platform_bus_type);
819 iommu->geometry.aperture_start = va_start;
820 iommu->geometry.aperture_end = va_end;
822 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
824 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
825 if (IS_ERR(aspace)) {
826 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
828 iommu_domain_free(iommu);
829 return ERR_CAST(aspace);
832 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
834 msm_gem_address_space_put(aspace);
841 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
842 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
843 const char *name, struct msm_gpu_config *config)
845 int i, ret, nr_rings = config->nr_rings;
847 uint64_t memptrs_iova;
849 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
850 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
856 INIT_LIST_HEAD(&gpu->active_list);
857 INIT_WORK(&gpu->retire_work, retire_worker);
858 INIT_WORK(&gpu->recover_work, recover_worker);
861 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
863 spin_lock_init(&gpu->perf_lock);
867 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
868 if (IS_ERR(gpu->mmio)) {
869 ret = PTR_ERR(gpu->mmio);
874 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
877 dev_err(drm->dev, "failed to get irq: %d\n", ret);
881 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
882 IRQF_TRIGGER_HIGH, gpu->name, gpu);
884 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
888 ret = get_clocks(pdev, gpu);
892 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
893 DBG("ebi1_clk: %p", gpu->ebi1_clk);
894 if (IS_ERR(gpu->ebi1_clk))
895 gpu->ebi1_clk = NULL;
897 /* Acquire regulators: */
898 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
899 DBG("gpu_reg: %p", gpu->gpu_reg);
900 if (IS_ERR(gpu->gpu_reg))
903 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
904 DBG("gpu_cx: %p", gpu->gpu_cx);
905 if (IS_ERR(gpu->gpu_cx))
909 platform_set_drvdata(pdev, gpu);
911 msm_devfreq_init(gpu);
913 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
914 config->va_start, config->va_end);
916 if (gpu->aspace == NULL)
917 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
918 else if (IS_ERR(gpu->aspace)) {
919 ret = PTR_ERR(gpu->aspace);
923 memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
924 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
927 if (IS_ERR(memptrs)) {
928 ret = PTR_ERR(memptrs);
929 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
933 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
934 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
935 ARRAY_SIZE(gpu->rb));
936 nr_rings = ARRAY_SIZE(gpu->rb);
939 /* Create ringbuffer(s): */
940 for (i = 0; i < nr_rings; i++) {
941 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
943 if (IS_ERR(gpu->rb[i])) {
944 ret = PTR_ERR(gpu->rb[i]);
946 "could not create ringbuffer %d: %d\n", i, ret);
950 memptrs += sizeof(struct msm_rbmemptrs);
951 memptrs_iova += sizeof(struct msm_rbmemptrs);
954 gpu->nr_rings = nr_rings;
959 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
960 msm_ringbuffer_destroy(gpu->rb[i]);
964 if (gpu->memptrs_bo) {
965 msm_gem_put_vaddr(gpu->memptrs_bo);
966 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
967 drm_gem_object_put_unlocked(gpu->memptrs_bo);
970 platform_set_drvdata(pdev, NULL);
974 void msm_gpu_cleanup(struct msm_gpu *gpu)
978 DBG("%s", gpu->name);
980 WARN_ON(!list_empty(&gpu->active_list));
982 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
983 msm_ringbuffer_destroy(gpu->rb[i]);
987 if (gpu->memptrs_bo) {
988 msm_gem_put_vaddr(gpu->memptrs_bo);
989 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
990 drm_gem_object_put_unlocked(gpu->memptrs_bo);
993 if (!IS_ERR_OR_NULL(gpu->aspace)) {
994 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
996 msm_gem_address_space_put(gpu->aspace);