2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "msm_fence.h"
28 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
29 #include <mach/board.h>
30 static void bs_init(struct msm_gpu *gpu)
32 if (gpu->bus_scale_table) {
33 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
34 DBG("bus scale client: %08x", gpu->bsc);
38 static void bs_fini(struct msm_gpu *gpu)
41 msm_bus_scale_unregister_client(gpu->bsc);
46 static void bs_set(struct msm_gpu *gpu, int idx)
49 DBG("set bus scaling: %d", idx);
50 msm_bus_scale_client_update_request(gpu->bsc, idx);
54 static void bs_init(struct msm_gpu *gpu) {}
55 static void bs_fini(struct msm_gpu *gpu) {}
56 static void bs_set(struct msm_gpu *gpu, int idx) {}
59 static int enable_pwrrail(struct msm_gpu *gpu)
61 struct drm_device *dev = gpu->dev;
65 ret = regulator_enable(gpu->gpu_reg);
67 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
73 ret = regulator_enable(gpu->gpu_cx);
75 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
83 static int disable_pwrrail(struct msm_gpu *gpu)
86 regulator_disable(gpu->gpu_cx);
88 regulator_disable(gpu->gpu_reg);
92 static int enable_clk(struct msm_gpu *gpu)
96 if (gpu->core_clk && gpu->fast_rate)
97 clk_set_rate(gpu->core_clk, gpu->fast_rate);
99 /* Set the RBBM timer rate to 19.2Mhz */
100 if (gpu->rbbmtimer_clk)
101 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
103 for (i = gpu->nr_clocks - 1; i >= 0; i--)
104 if (gpu->grp_clks[i])
105 clk_prepare(gpu->grp_clks[i]);
107 for (i = gpu->nr_clocks - 1; i >= 0; i--)
108 if (gpu->grp_clks[i])
109 clk_enable(gpu->grp_clks[i]);
114 static int disable_clk(struct msm_gpu *gpu)
118 for (i = gpu->nr_clocks - 1; i >= 0; i--)
119 if (gpu->grp_clks[i])
120 clk_disable(gpu->grp_clks[i]);
122 for (i = gpu->nr_clocks - 1; i >= 0; i--)
123 if (gpu->grp_clks[i])
124 clk_unprepare(gpu->grp_clks[i]);
127 * Set the clock to a deliberately low rate. On older targets the clock
128 * speed had to be non zero to avoid problems. On newer targets this
129 * will be rounded down to zero anyway so it all works out.
132 clk_set_rate(gpu->core_clk, 27000000);
134 if (gpu->rbbmtimer_clk)
135 clk_set_rate(gpu->rbbmtimer_clk, 0);
140 static int enable_axi(struct msm_gpu *gpu)
143 clk_prepare_enable(gpu->ebi1_clk);
145 bs_set(gpu, gpu->bus_freq);
149 static int disable_axi(struct msm_gpu *gpu)
152 clk_disable_unprepare(gpu->ebi1_clk);
158 int msm_gpu_pm_resume(struct msm_gpu *gpu)
162 DBG("%s", gpu->name);
164 ret = enable_pwrrail(gpu);
168 ret = enable_clk(gpu);
172 ret = enable_axi(gpu);
176 gpu->needs_hw_init = true;
181 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
185 DBG("%s", gpu->name);
187 ret = disable_axi(gpu);
191 ret = disable_clk(gpu);
195 ret = disable_pwrrail(gpu);
202 int msm_gpu_hw_init(struct msm_gpu *gpu)
206 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
208 if (!gpu->needs_hw_init)
211 disable_irq(gpu->irq);
212 ret = gpu->funcs->hw_init(gpu);
214 gpu->needs_hw_init = false;
215 enable_irq(gpu->irq);
221 * Hangcheck detection for locked gpu:
224 static void retire_submits(struct msm_gpu *gpu);
226 static void recover_worker(struct work_struct *work)
228 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
229 struct drm_device *dev = gpu->dev;
230 struct msm_gem_submit *submit;
231 uint32_t fence = gpu->funcs->last_fence(gpu);
233 msm_update_fence(gpu->fctx, fence + 1);
235 mutex_lock(&dev->struct_mutex);
237 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
238 list_for_each_entry(submit, &gpu->submit_list, node) {
239 if (submit->fence->seqno == (fence + 1)) {
240 struct task_struct *task;
243 task = pid_task(submit->pid, PIDTYPE_PID);
245 dev_err(dev->dev, "%s: offending task: %s\n",
246 gpu->name, task->comm);
253 if (msm_gpu_active(gpu)) {
254 /* retire completed submits, plus the one that hung: */
257 pm_runtime_get_sync(&gpu->pdev->dev);
258 gpu->funcs->recover(gpu);
259 pm_runtime_put_sync(&gpu->pdev->dev);
261 /* replay the remaining submits after the one that hung: */
262 list_for_each_entry(submit, &gpu->submit_list, node) {
263 gpu->funcs->submit(gpu, submit, NULL);
267 mutex_unlock(&dev->struct_mutex);
272 static void hangcheck_timer_reset(struct msm_gpu *gpu)
274 DBG("%s", gpu->name);
275 mod_timer(&gpu->hangcheck_timer,
276 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
279 static void hangcheck_handler(unsigned long data)
281 struct msm_gpu *gpu = (struct msm_gpu *)data;
282 struct drm_device *dev = gpu->dev;
283 struct msm_drm_private *priv = dev->dev_private;
284 uint32_t fence = gpu->funcs->last_fence(gpu);
286 if (fence != gpu->hangcheck_fence) {
287 /* some progress has been made.. ya! */
288 gpu->hangcheck_fence = fence;
289 } else if (fence < gpu->fctx->last_fence) {
290 /* no progress and not done.. hung! */
291 gpu->hangcheck_fence = fence;
292 dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
294 dev_err(dev->dev, "%s: completed fence: %u\n",
296 dev_err(dev->dev, "%s: submitted fence: %u\n",
297 gpu->name, gpu->fctx->last_fence);
298 queue_work(priv->wq, &gpu->recover_work);
301 /* if still more pending work, reset the hangcheck timer: */
302 if (gpu->fctx->last_fence > gpu->hangcheck_fence)
303 hangcheck_timer_reset(gpu);
305 /* workaround for missing irq: */
306 queue_work(priv->wq, &gpu->retire_work);
310 * Performance Counters:
313 /* called under perf_lock */
314 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
316 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
317 int i, n = min(ncntrs, gpu->num_perfcntrs);
319 /* read current values: */
320 for (i = 0; i < gpu->num_perfcntrs; i++)
321 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
324 for (i = 0; i < n; i++)
325 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
327 /* save current values: */
328 for (i = 0; i < gpu->num_perfcntrs; i++)
329 gpu->last_cntrs[i] = current_cntrs[i];
334 static void update_sw_cntrs(struct msm_gpu *gpu)
340 spin_lock_irqsave(&gpu->perf_lock, flags);
341 if (!gpu->perfcntr_active)
345 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
347 gpu->totaltime += elapsed;
348 if (gpu->last_sample.active)
349 gpu->activetime += elapsed;
351 gpu->last_sample.active = msm_gpu_active(gpu);
352 gpu->last_sample.time = time;
355 spin_unlock_irqrestore(&gpu->perf_lock, flags);
358 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
362 pm_runtime_get_sync(&gpu->pdev->dev);
364 spin_lock_irqsave(&gpu->perf_lock, flags);
365 /* we could dynamically enable/disable perfcntr registers too.. */
366 gpu->last_sample.active = msm_gpu_active(gpu);
367 gpu->last_sample.time = ktime_get();
368 gpu->activetime = gpu->totaltime = 0;
369 gpu->perfcntr_active = true;
370 update_hw_cntrs(gpu, 0, NULL);
371 spin_unlock_irqrestore(&gpu->perf_lock, flags);
374 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
376 gpu->perfcntr_active = false;
377 pm_runtime_put_sync(&gpu->pdev->dev);
380 /* returns -errno or # of cntrs sampled */
381 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
382 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
387 spin_lock_irqsave(&gpu->perf_lock, flags);
389 if (!gpu->perfcntr_active) {
394 *activetime = gpu->activetime;
395 *totaltime = gpu->totaltime;
397 gpu->activetime = gpu->totaltime = 0;
399 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
402 spin_unlock_irqrestore(&gpu->perf_lock, flags);
408 * Cmdstream submission/retirement:
411 static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
415 for (i = 0; i < submit->nr_bos; i++) {
416 struct msm_gem_object *msm_obj = submit->bos[i].obj;
417 /* move to inactive: */
418 msm_gem_move_to_inactive(&msm_obj->base);
419 msm_gem_put_iova(&msm_obj->base, gpu->aspace);
420 drm_gem_object_unreference(&msm_obj->base);
423 pm_runtime_mark_last_busy(&gpu->pdev->dev);
424 pm_runtime_put_autosuspend(&gpu->pdev->dev);
425 msm_gem_submit_free(submit);
428 static void retire_submits(struct msm_gpu *gpu)
430 struct drm_device *dev = gpu->dev;
432 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
434 while (!list_empty(&gpu->submit_list)) {
435 struct msm_gem_submit *submit;
437 submit = list_first_entry(&gpu->submit_list,
438 struct msm_gem_submit, node);
440 if (dma_fence_is_signaled(submit->fence)) {
441 retire_submit(gpu, submit);
448 static void retire_worker(struct work_struct *work)
450 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
451 struct drm_device *dev = gpu->dev;
452 uint32_t fence = gpu->funcs->last_fence(gpu);
454 msm_update_fence(gpu->fctx, fence);
456 mutex_lock(&dev->struct_mutex);
458 mutex_unlock(&dev->struct_mutex);
461 /* call from irq handler to schedule work to retire bo's */
462 void msm_gpu_retire(struct msm_gpu *gpu)
464 struct msm_drm_private *priv = gpu->dev->dev_private;
465 queue_work(priv->wq, &gpu->retire_work);
466 update_sw_cntrs(gpu);
469 /* add bo's to gpu's ring, and kick gpu: */
470 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
471 struct msm_file_private *ctx)
473 struct drm_device *dev = gpu->dev;
474 struct msm_drm_private *priv = dev->dev_private;
477 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
479 pm_runtime_get_sync(&gpu->pdev->dev);
481 msm_gpu_hw_init(gpu);
483 list_add_tail(&submit->node, &gpu->submit_list);
485 msm_rd_dump_submit(submit);
487 update_sw_cntrs(gpu);
489 for (i = 0; i < submit->nr_bos; i++) {
490 struct msm_gem_object *msm_obj = submit->bos[i].obj;
493 /* can't happen yet.. but when we add 2d support we'll have
494 * to deal w/ cross-ring synchronization:
496 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
498 /* submit takes a reference to the bo and iova until retired: */
499 drm_gem_object_reference(&msm_obj->base);
500 msm_gem_get_iova(&msm_obj->base,
501 submit->gpu->aspace, &iova);
503 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
504 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
505 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
506 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
509 gpu->funcs->submit(gpu, submit, ctx);
512 hangcheck_timer_reset(gpu);
519 static irqreturn_t irq_handler(int irq, void *data)
521 struct msm_gpu *gpu = data;
522 return gpu->funcs->irq(gpu);
525 static struct clk *get_clock(struct device *dev, const char *name)
527 struct clk *clk = devm_clk_get(dev, name);
529 return IS_ERR(clk) ? NULL : clk;
532 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
534 struct device *dev = &pdev->dev;
535 struct property *prop;
539 gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
540 if (gpu->nr_clocks < 1) {
545 gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
550 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
551 gpu->grp_clks[i] = get_clock(dev, name);
553 /* Remember the key clocks that we need to control later */
554 if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
555 gpu->core_clk = gpu->grp_clks[i];
556 else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
557 gpu->rbbmtimer_clk = gpu->grp_clks[i];
565 static struct msm_gem_address_space *
566 msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
567 uint64_t va_start, uint64_t va_end)
569 struct iommu_domain *iommu;
570 struct msm_gem_address_space *aspace;
574 * Setup IOMMU.. eventually we will (I think) do this once per context
575 * and have separate page tables per context. For now, to keep things
576 * simple and to get something working, just use a single address space:
578 iommu = iommu_domain_alloc(&platform_bus_type);
582 iommu->geometry.aperture_start = va_start;
583 iommu->geometry.aperture_end = va_end;
585 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
587 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
588 if (IS_ERR(aspace)) {
589 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
591 iommu_domain_free(iommu);
592 return ERR_CAST(aspace);
595 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
597 msm_gem_address_space_put(aspace);
604 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
605 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
606 const char *name, struct msm_gpu_config *config)
610 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
611 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
616 gpu->fctx = msm_fence_context_alloc(drm, name);
617 if (IS_ERR(gpu->fctx)) {
618 ret = PTR_ERR(gpu->fctx);
623 INIT_LIST_HEAD(&gpu->active_list);
624 INIT_WORK(&gpu->retire_work, retire_worker);
625 INIT_WORK(&gpu->recover_work, recover_worker);
627 INIT_LIST_HEAD(&gpu->submit_list);
629 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
632 spin_lock_init(&gpu->perf_lock);
636 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
637 if (IS_ERR(gpu->mmio)) {
638 ret = PTR_ERR(gpu->mmio);
643 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
646 dev_err(drm->dev, "failed to get irq: %d\n", ret);
650 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
651 IRQF_TRIGGER_HIGH, gpu->name, gpu);
653 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
657 ret = get_clocks(pdev, gpu);
661 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
662 DBG("ebi1_clk: %p", gpu->ebi1_clk);
663 if (IS_ERR(gpu->ebi1_clk))
664 gpu->ebi1_clk = NULL;
666 /* Acquire regulators: */
667 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
668 DBG("gpu_reg: %p", gpu->gpu_reg);
669 if (IS_ERR(gpu->gpu_reg))
672 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
673 DBG("gpu_cx: %p", gpu->gpu_cx);
674 if (IS_ERR(gpu->gpu_cx))
678 platform_set_drvdata(pdev, gpu);
682 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
683 config->va_start, config->va_end);
685 if (gpu->aspace == NULL)
686 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
687 else if (IS_ERR(gpu->aspace)) {
688 ret = PTR_ERR(gpu->aspace);
692 /* Create ringbuffer: */
693 gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
694 if (IS_ERR(gpu->rb)) {
695 ret = PTR_ERR(gpu->rb);
697 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
704 platform_set_drvdata(pdev, NULL);
708 void msm_gpu_cleanup(struct msm_gpu *gpu)
710 DBG("%s", gpu->name);
712 WARN_ON(!list_empty(&gpu->active_list));
718 msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
719 msm_ringbuffer_destroy(gpu->rb);
722 if (!IS_ERR_OR_NULL(gpu->aspace)) {
723 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
725 msm_gem_address_space_put(gpu->aspace);