2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
21 #include "etnaviv_dump.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_gem.h"
24 #include "etnaviv_mmu.h"
25 #include "etnaviv_iommu.h"
26 #include "etnaviv_iommu_v2.h"
27 #include "common.xml.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
30 #include "cmdstream.xml.h"
32 static const struct platform_device_id gpu_ids[] = {
33 { .name = "etnaviv-gpu,2d" },
37 static bool etnaviv_dump_core = true;
38 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
44 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
47 case ETNAVIV_PARAM_GPU_MODEL:
48 *value = gpu->identity.model;
51 case ETNAVIV_PARAM_GPU_REVISION:
52 *value = gpu->identity.revision;
55 case ETNAVIV_PARAM_GPU_FEATURES_0:
56 *value = gpu->identity.features;
59 case ETNAVIV_PARAM_GPU_FEATURES_1:
60 *value = gpu->identity.minor_features0;
63 case ETNAVIV_PARAM_GPU_FEATURES_2:
64 *value = gpu->identity.minor_features1;
67 case ETNAVIV_PARAM_GPU_FEATURES_3:
68 *value = gpu->identity.minor_features2;
71 case ETNAVIV_PARAM_GPU_FEATURES_4:
72 *value = gpu->identity.minor_features3;
75 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
76 *value = gpu->identity.stream_count;
79 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
80 *value = gpu->identity.register_max;
83 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
84 *value = gpu->identity.thread_count;
87 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
88 *value = gpu->identity.vertex_cache_size;
91 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
92 *value = gpu->identity.shader_core_count;
95 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
96 *value = gpu->identity.pixel_pipes;
99 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
100 *value = gpu->identity.vertex_output_buffer_size;
103 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
104 *value = gpu->identity.buffer_size;
107 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
108 *value = gpu->identity.instruction_count;
111 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
112 *value = gpu->identity.num_constants;
116 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
123 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
125 if (gpu->identity.minor_features0 &
126 chipMinorFeatures0_MORE_MINOR_FEATURES) {
129 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
130 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
132 gpu->identity.stream_count =
133 (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
134 >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
135 gpu->identity.register_max =
136 (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
137 >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
138 gpu->identity.thread_count =
139 (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
140 >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
141 gpu->identity.vertex_cache_size =
142 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
143 >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
144 gpu->identity.shader_core_count =
145 (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
146 >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
147 gpu->identity.pixel_pipes =
148 (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
149 >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
150 gpu->identity.vertex_output_buffer_size =
151 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
152 >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
154 gpu->identity.buffer_size =
155 (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
156 >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
157 gpu->identity.instruction_count =
158 (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
159 >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
160 gpu->identity.num_constants =
161 (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
162 >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
165 /* Fill in the stream count if not specified */
166 if (gpu->identity.stream_count == 0) {
167 if (gpu->identity.model >= 0x1000)
168 gpu->identity.stream_count = 4;
170 gpu->identity.stream_count = 1;
173 /* Convert the register max value */
174 if (gpu->identity.register_max)
175 gpu->identity.register_max = 1 << gpu->identity.register_max;
176 else if (gpu->identity.model == chipModel_GC400)
177 gpu->identity.register_max = 32;
179 gpu->identity.register_max = 64;
181 /* Convert thread count */
182 if (gpu->identity.thread_count)
183 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
184 else if (gpu->identity.model == chipModel_GC400)
185 gpu->identity.thread_count = 64;
186 else if (gpu->identity.model == chipModel_GC500 ||
187 gpu->identity.model == chipModel_GC530)
188 gpu->identity.thread_count = 128;
190 gpu->identity.thread_count = 256;
192 if (gpu->identity.vertex_cache_size == 0)
193 gpu->identity.vertex_cache_size = 8;
195 if (gpu->identity.shader_core_count == 0) {
196 if (gpu->identity.model >= 0x1000)
197 gpu->identity.shader_core_count = 2;
199 gpu->identity.shader_core_count = 1;
202 if (gpu->identity.pixel_pipes == 0)
203 gpu->identity.pixel_pipes = 1;
205 /* Convert virtex buffer size */
206 if (gpu->identity.vertex_output_buffer_size) {
207 gpu->identity.vertex_output_buffer_size =
208 1 << gpu->identity.vertex_output_buffer_size;
209 } else if (gpu->identity.model == chipModel_GC400) {
210 if (gpu->identity.revision < 0x4000)
211 gpu->identity.vertex_output_buffer_size = 512;
212 else if (gpu->identity.revision < 0x4200)
213 gpu->identity.vertex_output_buffer_size = 256;
215 gpu->identity.vertex_output_buffer_size = 128;
217 gpu->identity.vertex_output_buffer_size = 512;
220 switch (gpu->identity.instruction_count) {
222 if ((gpu->identity.model == chipModel_GC2000 &&
223 gpu->identity.revision == 0x5108) ||
224 gpu->identity.model == chipModel_GC880)
225 gpu->identity.instruction_count = 512;
227 gpu->identity.instruction_count = 256;
231 gpu->identity.instruction_count = 1024;
235 gpu->identity.instruction_count = 2048;
239 gpu->identity.instruction_count = 256;
243 if (gpu->identity.num_constants == 0)
244 gpu->identity.num_constants = 168;
247 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
251 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
253 /* Special case for older graphic cores. */
254 if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
255 >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) {
256 gpu->identity.model = chipModel_GC500;
257 gpu->identity.revision =
258 (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
259 >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT;
262 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
263 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
266 * !!!! HACK ALERT !!!!
267 * Because people change device IDs without letting software
268 * know about it - here is the hack to make it all look the
269 * same. Only for GC400 family.
271 if ((gpu->identity.model & 0xff00) == 0x0400 &&
272 gpu->identity.model != chipModel_GC420) {
273 gpu->identity.model = gpu->identity.model & 0x0400;
276 /* Another special case */
277 if (gpu->identity.model == chipModel_GC300 &&
278 gpu->identity.revision == 0x2201) {
279 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
280 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
282 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
284 * This IP has an ECO; put the correct
287 gpu->identity.revision = 0x1051;
292 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
293 gpu->identity.model, gpu->identity.revision);
295 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
297 /* Disable fast clear on GC700. */
298 if (gpu->identity.model == chipModel_GC700)
299 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
301 if ((gpu->identity.model == chipModel_GC500 &&
302 gpu->identity.revision < 2) ||
303 (gpu->identity.model == chipModel_GC300 &&
304 gpu->identity.revision < 0x2000)) {
307 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
310 gpu->identity.minor_features0 = 0;
311 gpu->identity.minor_features1 = 0;
312 gpu->identity.minor_features2 = 0;
313 gpu->identity.minor_features3 = 0;
315 gpu->identity.minor_features0 =
316 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
318 if (gpu->identity.minor_features0 &
319 chipMinorFeatures0_MORE_MINOR_FEATURES) {
320 gpu->identity.minor_features1 =
321 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
322 gpu->identity.minor_features2 =
323 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
324 gpu->identity.minor_features3 =
325 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
328 /* GC600 idle register reports zero bits where modules aren't present */
329 if (gpu->identity.model == chipModel_GC600) {
330 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
331 VIVS_HI_IDLE_STATE_RA |
332 VIVS_HI_IDLE_STATE_SE |
333 VIVS_HI_IDLE_STATE_PA |
334 VIVS_HI_IDLE_STATE_SH |
335 VIVS_HI_IDLE_STATE_PE |
336 VIVS_HI_IDLE_STATE_DE |
337 VIVS_HI_IDLE_STATE_FE;
339 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
342 etnaviv_hw_specs(gpu);
345 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
347 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
348 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
349 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
352 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
355 unsigned long timeout;
365 /* We hope that the GPU resets in under one second */
366 timeout = jiffies + msecs_to_jiffies(1000);
368 while (time_is_after_jiffies(timeout)) {
369 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
370 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
373 etnaviv_gpu_load_clock(gpu, control);
375 /* Wait for stable clock. Vivante's code waited for 1ms */
376 usleep_range(1000, 10000);
378 /* isolate the GPU. */
379 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
380 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
382 /* set soft reset. */
383 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
384 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
386 /* wait for reset. */
389 /* reset soft reset bit. */
390 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
391 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
393 /* reset GPU isolation. */
394 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
395 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
397 /* read idle register. */
398 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
400 /* try reseting again if FE it not idle */
401 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
402 dev_dbg(gpu->dev, "FE is not idle\n");
406 /* read reset register. */
407 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
409 /* is the GPU idle? */
410 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
411 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
412 dev_dbg(gpu->dev, "GPU is not idle\n");
421 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
422 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
424 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
425 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
426 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
427 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
432 /* We rely on the GPU running, so program the clock */
433 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
434 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
437 etnaviv_gpu_load_clock(gpu, control);
442 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
446 if (gpu->identity.model == chipModel_GC320 &&
447 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
448 (gpu->identity.revision == 0x5007 ||
449 gpu->identity.revision == 0x5220)) {
452 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
454 if (gpu->identity.revision == 0x5007)
455 mc_memory_debug |= 0x0c;
457 mc_memory_debug |= 0x08;
459 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
463 * Update GPU AXI cache atttribute to "cacheable, no allocate".
464 * This is necessary to prevent the iMX6 SoC locking up.
466 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
467 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
468 VIVS_HI_AXI_CONFIG_ARCACHE(2));
470 /* GC2000 rev 5108 needs a special bus config */
471 if (gpu->identity.model == chipModel_GC2000 &&
472 gpu->identity.revision == 0x5108) {
473 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
474 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
475 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
476 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
477 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
478 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
481 /* set base addresses */
482 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
483 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
484 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
485 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
486 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
488 /* setup the MMU page table pointers */
489 etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
491 /* Start command processor */
492 prefetch = etnaviv_buffer_init(gpu);
494 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
495 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
496 gpu->buffer->paddr - gpu->memory_base);
497 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
498 VIVS_FE_COMMAND_CONTROL_ENABLE |
499 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
502 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
505 struct iommu_domain *iommu;
506 enum etnaviv_iommu_version version;
509 ret = pm_runtime_get_sync(gpu->dev);
513 etnaviv_hw_identify(gpu);
515 if (gpu->identity.model == 0) {
516 dev_err(gpu->dev, "Unknown GPU model\n");
521 /* Exclude VG cores with FE2.0 */
522 if (gpu->identity.features & chipFeatures_PIPE_VG &&
523 gpu->identity.features & chipFeatures_FE20) {
524 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
529 ret = etnaviv_hw_reset(gpu);
533 /* Setup IOMMU.. eventually we will (I think) do this once per context
534 * and have separate page tables per context. For now, to keep things
535 * simple and to get something working, just use a single address space:
537 mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
538 dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
541 iommu = etnaviv_iommu_domain_alloc(gpu);
542 version = ETNAVIV_IOMMU_V1;
544 iommu = etnaviv_iommu_v2_domain_alloc(gpu);
545 version = ETNAVIV_IOMMU_V2;
553 /* TODO: we will leak here memory - fix it! */
555 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
562 gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
565 dev_err(gpu->dev, "could not create command buffer\n");
568 if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
571 "command buffer outside valid memory window\n");
575 /* Setup event management */
576 spin_lock_init(&gpu->event_spinlock);
577 init_completion(&gpu->event_free);
578 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
579 gpu->event[i].used = false;
580 complete(&gpu->event_free);
583 /* Now program the hardware */
584 mutex_lock(&gpu->lock);
585 etnaviv_gpu_hw_init(gpu);
586 mutex_unlock(&gpu->lock);
588 pm_runtime_mark_last_busy(gpu->dev);
589 pm_runtime_put_autosuspend(gpu->dev);
594 etnaviv_gpu_cmdbuf_free(gpu->buffer);
597 pm_runtime_mark_last_busy(gpu->dev);
598 pm_runtime_put_autosuspend(gpu->dev);
603 #ifdef CONFIG_DEBUG_FS
609 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
613 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
614 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
616 for (i = 0; i < 500; i++) {
617 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
618 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
620 if (debug->address[0] != debug->address[1])
623 if (debug->state[0] != debug->state[1])
628 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
630 struct dma_debug debug;
631 u32 dma_lo, dma_hi, axi, idle;
634 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
636 ret = pm_runtime_get_sync(gpu->dev);
640 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
641 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
642 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
643 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
645 verify_dma(gpu, &debug);
647 seq_puts(m, "\tfeatures\n");
648 seq_printf(m, "\t minor_features0: 0x%08x\n",
649 gpu->identity.minor_features0);
650 seq_printf(m, "\t minor_features1: 0x%08x\n",
651 gpu->identity.minor_features1);
652 seq_printf(m, "\t minor_features2: 0x%08x\n",
653 gpu->identity.minor_features2);
654 seq_printf(m, "\t minor_features3: 0x%08x\n",
655 gpu->identity.minor_features3);
657 seq_puts(m, "\tspecs\n");
658 seq_printf(m, "\t stream_count: %d\n",
659 gpu->identity.stream_count);
660 seq_printf(m, "\t register_max: %d\n",
661 gpu->identity.register_max);
662 seq_printf(m, "\t thread_count: %d\n",
663 gpu->identity.thread_count);
664 seq_printf(m, "\t vertex_cache_size: %d\n",
665 gpu->identity.vertex_cache_size);
666 seq_printf(m, "\t shader_core_count: %d\n",
667 gpu->identity.shader_core_count);
668 seq_printf(m, "\t pixel_pipes: %d\n",
669 gpu->identity.pixel_pipes);
670 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
671 gpu->identity.vertex_output_buffer_size);
672 seq_printf(m, "\t buffer_size: %d\n",
673 gpu->identity.buffer_size);
674 seq_printf(m, "\t instruction_count: %d\n",
675 gpu->identity.instruction_count);
676 seq_printf(m, "\t num_constants: %d\n",
677 gpu->identity.num_constants);
679 seq_printf(m, "\taxi: 0x%08x\n", axi);
680 seq_printf(m, "\tidle: 0x%08x\n", idle);
681 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
682 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
683 seq_puts(m, "\t FE is not idle\n");
684 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
685 seq_puts(m, "\t DE is not idle\n");
686 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
687 seq_puts(m, "\t PE is not idle\n");
688 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
689 seq_puts(m, "\t SH is not idle\n");
690 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
691 seq_puts(m, "\t PA is not idle\n");
692 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
693 seq_puts(m, "\t SE is not idle\n");
694 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
695 seq_puts(m, "\t RA is not idle\n");
696 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
697 seq_puts(m, "\t TX is not idle\n");
698 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
699 seq_puts(m, "\t VG is not idle\n");
700 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
701 seq_puts(m, "\t IM is not idle\n");
702 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
703 seq_puts(m, "\t FP is not idle\n");
704 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
705 seq_puts(m, "\t TS is not idle\n");
706 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
707 seq_puts(m, "\t AXI low power mode\n");
709 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
710 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
711 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
712 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
714 seq_puts(m, "\tMC\n");
715 seq_printf(m, "\t read0: 0x%08x\n", read0);
716 seq_printf(m, "\t read1: 0x%08x\n", read1);
717 seq_printf(m, "\t write: 0x%08x\n", write);
720 seq_puts(m, "\tDMA ");
722 if (debug.address[0] == debug.address[1] &&
723 debug.state[0] == debug.state[1]) {
724 seq_puts(m, "seems to be stuck\n");
725 } else if (debug.address[0] == debug.address[1]) {
726 seq_puts(m, "adress is constant\n");
728 seq_puts(m, "is runing\n");
731 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
732 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
733 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
734 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
735 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
740 pm_runtime_mark_last_busy(gpu->dev);
741 pm_runtime_put_autosuspend(gpu->dev);
750 static int enable_clk(struct etnaviv_gpu *gpu)
753 clk_prepare_enable(gpu->clk_core);
755 clk_prepare_enable(gpu->clk_shader);
760 static int disable_clk(struct etnaviv_gpu *gpu)
763 clk_disable_unprepare(gpu->clk_core);
765 clk_disable_unprepare(gpu->clk_shader);
770 static int enable_axi(struct etnaviv_gpu *gpu)
773 clk_prepare_enable(gpu->clk_bus);
778 static int disable_axi(struct etnaviv_gpu *gpu)
781 clk_disable_unprepare(gpu->clk_bus);
787 * Hangcheck detection for locked gpu:
789 static void recover_worker(struct work_struct *work)
791 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
796 dev_err(gpu->dev, "hangcheck recover!\n");
798 if (pm_runtime_get_sync(gpu->dev) < 0)
801 mutex_lock(&gpu->lock);
803 /* Only catch the first event, or when manually re-armed */
804 if (etnaviv_dump_core) {
805 etnaviv_core_dump(gpu);
806 etnaviv_dump_core = false;
809 etnaviv_hw_reset(gpu);
811 /* complete all events, the GPU won't do it after the reset */
812 spin_lock_irqsave(&gpu->event_spinlock, flags);
813 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
814 if (!gpu->event[i].used)
816 fence_signal(gpu->event[i].fence);
817 gpu->event[i].fence = NULL;
818 gpu->event[i].used = false;
819 complete(&gpu->event_free);
821 * Decrement the PM count for each stuck event. This is safe
822 * even in atomic context as we use ASYNC RPM here.
824 pm_runtime_put_autosuspend(gpu->dev);
826 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
827 gpu->completed_fence = gpu->active_fence;
829 etnaviv_gpu_hw_init(gpu);
830 gpu->switch_context = true;
832 mutex_unlock(&gpu->lock);
833 pm_runtime_mark_last_busy(gpu->dev);
834 pm_runtime_put_autosuspend(gpu->dev);
836 /* Retire the buffer objects in a work */
837 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
840 static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
842 DBG("%s", dev_name(gpu->dev));
843 mod_timer(&gpu->hangcheck_timer,
844 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
847 static void hangcheck_handler(unsigned long data)
849 struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
850 u32 fence = gpu->completed_fence;
851 bool progress = false;
853 if (fence != gpu->hangcheck_fence) {
854 gpu->hangcheck_fence = fence;
859 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
860 int change = dma_addr - gpu->hangcheck_dma_addr;
862 if (change < 0 || change > 16) {
863 gpu->hangcheck_dma_addr = dma_addr;
868 if (!progress && fence_after(gpu->active_fence, fence)) {
869 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
870 dev_err(gpu->dev, " completed fence: %u\n", fence);
871 dev_err(gpu->dev, " active fence: %u\n",
873 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
876 /* if still more pending work, reset the hangcheck timer: */
877 if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
878 hangcheck_timer_reset(gpu);
881 static void hangcheck_disable(struct etnaviv_gpu *gpu)
883 del_timer_sync(&gpu->hangcheck_timer);
884 cancel_work_sync(&gpu->recover_work);
887 /* fence object management */
888 struct etnaviv_fence {
889 struct etnaviv_gpu *gpu;
893 static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
895 return container_of(fence, struct etnaviv_fence, base);
898 static const char *etnaviv_fence_get_driver_name(struct fence *fence)
903 static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
905 struct etnaviv_fence *f = to_etnaviv_fence(fence);
907 return dev_name(f->gpu->dev);
910 static bool etnaviv_fence_enable_signaling(struct fence *fence)
915 static bool etnaviv_fence_signaled(struct fence *fence)
917 struct etnaviv_fence *f = to_etnaviv_fence(fence);
919 return fence_completed(f->gpu, f->base.seqno);
922 static void etnaviv_fence_release(struct fence *fence)
924 struct etnaviv_fence *f = to_etnaviv_fence(fence);
926 kfree_rcu(f, base.rcu);
929 static const struct fence_ops etnaviv_fence_ops = {
930 .get_driver_name = etnaviv_fence_get_driver_name,
931 .get_timeline_name = etnaviv_fence_get_timeline_name,
932 .enable_signaling = etnaviv_fence_enable_signaling,
933 .signaled = etnaviv_fence_signaled,
934 .wait = fence_default_wait,
935 .release = etnaviv_fence_release,
938 static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
940 struct etnaviv_fence *f;
942 f = kzalloc(sizeof(*f), GFP_KERNEL);
948 fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
949 gpu->fence_context, ++gpu->next_fence);
954 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
955 unsigned int context, bool exclusive)
957 struct reservation_object *robj = etnaviv_obj->resv;
958 struct reservation_object_list *fobj;
963 ret = reservation_object_reserve_shared(robj);
969 * If we have any shared fences, then the exclusive fence
970 * should be ignored as it will already have been signalled.
972 fobj = reservation_object_get_list(robj);
973 if (!fobj || fobj->shared_count == 0) {
974 /* Wait on any existing exclusive fence which isn't our own */
975 fence = reservation_object_get_excl(robj);
976 if (fence && fence->context != context) {
977 ret = fence_wait(fence, true);
983 if (!exclusive || !fobj)
986 for (i = 0; i < fobj->shared_count; i++) {
987 fence = rcu_dereference_protected(fobj->shared[i],
988 reservation_object_held(robj));
989 if (fence->context != context) {
990 ret = fence_wait(fence, true);
1003 static unsigned int event_alloc(struct etnaviv_gpu *gpu)
1005 unsigned long ret, flags;
1006 unsigned int i, event = ~0U;
1008 ret = wait_for_completion_timeout(&gpu->event_free,
1009 msecs_to_jiffies(10 * 10000));
1011 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1013 spin_lock_irqsave(&gpu->event_spinlock, flags);
1015 /* find first free event */
1016 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1017 if (gpu->event[i].used == false) {
1018 gpu->event[i].used = true;
1024 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1029 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1031 unsigned long flags;
1033 spin_lock_irqsave(&gpu->event_spinlock, flags);
1035 if (gpu->event[event].used == false) {
1036 dev_warn(gpu->dev, "event %u is already marked as free",
1038 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1040 gpu->event[event].used = false;
1041 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1043 complete(&gpu->event_free);
1048 * Cmdstream submission/retirement:
1051 struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1054 struct etnaviv_cmdbuf *cmdbuf;
1055 size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
1058 cmdbuf = kzalloc(sz, GFP_KERNEL);
1062 cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
1064 if (!cmdbuf->vaddr) {
1070 cmdbuf->size = size;
1075 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1077 dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
1078 cmdbuf->vaddr, cmdbuf->paddr);
1082 static void retire_worker(struct work_struct *work)
1084 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1086 u32 fence = gpu->completed_fence;
1087 struct etnaviv_cmdbuf *cmdbuf, *tmp;
1090 mutex_lock(&gpu->lock);
1091 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1092 if (!fence_is_signaled(cmdbuf->fence))
1095 list_del(&cmdbuf->node);
1096 fence_put(cmdbuf->fence);
1098 for (i = 0; i < cmdbuf->nr_bos; i++) {
1099 struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
1101 atomic_dec(&etnaviv_obj->gpu_active);
1102 /* drop the refcount taken in etnaviv_gpu_submit */
1103 etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
1106 etnaviv_gpu_cmdbuf_free(cmdbuf);
1109 gpu->retired_fence = fence;
1111 mutex_unlock(&gpu->lock);
1113 wake_up_all(&gpu->fence_event);
1116 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1117 u32 fence, struct timespec *timeout)
1121 if (fence_after(fence, gpu->next_fence)) {
1122 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1123 fence, gpu->next_fence);
1128 /* No timeout was requested: just test for completion */
1129 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1131 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1133 ret = wait_event_interruptible_timeout(gpu->fence_event,
1134 fence_completed(gpu, fence),
1137 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1138 fence, gpu->retired_fence,
1139 gpu->completed_fence);
1141 } else if (ret != -ERESTARTSYS) {
1150 * Wait for an object to become inactive. This, on it's own, is not race
1151 * free: the object is moved by the retire worker off the active list, and
1152 * then the iova is put. Moreover, the object could be re-submitted just
1153 * after we notice that it's become inactive.
1155 * Although the retirement happens under the gpu lock, we don't want to hold
1156 * that lock in this function while waiting.
1158 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1159 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1161 unsigned long remaining;
1165 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1167 remaining = etnaviv_timeout_to_jiffies(timeout);
1169 ret = wait_event_interruptible_timeout(gpu->fence_event,
1170 !is_active(etnaviv_obj),
1173 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1175 /* Synchronise with the retire worker */
1176 flush_workqueue(priv->wq);
1178 } else if (ret == -ERESTARTSYS) {
1179 return -ERESTARTSYS;
1185 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1187 return pm_runtime_get_sync(gpu->dev);
1190 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1192 pm_runtime_mark_last_busy(gpu->dev);
1193 pm_runtime_put_autosuspend(gpu->dev);
1196 /* add bo's to gpu's ring, and kick gpu: */
1197 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1198 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1200 struct fence *fence;
1201 unsigned int event, i;
1204 ret = etnaviv_gpu_pm_get_sync(gpu);
1208 mutex_lock(&gpu->lock);
1219 event = event_alloc(gpu);
1220 if (unlikely(event == ~0U)) {
1221 DRM_ERROR("no free event\n");
1226 fence = etnaviv_gpu_fence_alloc(gpu);
1228 event_free(gpu, event);
1233 gpu->event[event].fence = fence;
1234 submit->fence = fence->seqno;
1235 gpu->active_fence = submit->fence;
1237 if (gpu->lastctx != cmdbuf->ctx) {
1238 gpu->mmu->need_flush = true;
1239 gpu->switch_context = true;
1240 gpu->lastctx = cmdbuf->ctx;
1243 etnaviv_buffer_queue(gpu, event, cmdbuf);
1245 cmdbuf->fence = fence;
1246 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1248 /* We're committed to adding this command buffer, hold a PM reference */
1249 pm_runtime_get_noresume(gpu->dev);
1251 for (i = 0; i < submit->nr_bos; i++) {
1252 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1255 /* Each cmdbuf takes a refcount on the iova */
1256 etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
1257 cmdbuf->bo[i] = etnaviv_obj;
1258 atomic_inc(&etnaviv_obj->gpu_active);
1260 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1261 reservation_object_add_excl_fence(etnaviv_obj->resv,
1264 reservation_object_add_shared_fence(etnaviv_obj->resv,
1267 cmdbuf->nr_bos = submit->nr_bos;
1268 hangcheck_timer_reset(gpu);
1272 mutex_unlock(&gpu->lock);
1274 etnaviv_gpu_pm_put(gpu);
1282 static irqreturn_t irq_handler(int irq, void *data)
1284 struct etnaviv_gpu *gpu = data;
1285 irqreturn_t ret = IRQ_NONE;
1287 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1292 pm_runtime_mark_last_busy(gpu->dev);
1294 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1296 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1297 dev_err(gpu->dev, "AXI bus error\n");
1298 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1301 while ((event = ffs(intr)) != 0) {
1302 struct fence *fence;
1306 intr &= ~(1 << event);
1308 dev_dbg(gpu->dev, "event %u\n", event);
1310 fence = gpu->event[event].fence;
1311 gpu->event[event].fence = NULL;
1312 fence_signal(fence);
1315 * Events can be processed out of order. Eg,
1316 * - allocate and queue event 0
1317 * - allocate event 1
1318 * - event 0 completes, we process it
1319 * - allocate and queue event 0
1320 * - event 1 and event 0 complete
1321 * we can end up processing event 0 first, then 1.
1323 if (fence_after(fence->seqno, gpu->completed_fence))
1324 gpu->completed_fence = fence->seqno;
1326 event_free(gpu, event);
1329 * We need to balance the runtime PM count caused by
1330 * each submission. Upon submission, we increment
1331 * the runtime PM counter, and allocate one event.
1332 * So here, we put the runtime PM count for each
1335 pm_runtime_put_autosuspend(gpu->dev);
1338 /* Retire the buffer objects in a work */
1339 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1347 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1351 ret = enable_clk(gpu);
1355 ret = enable_axi(gpu);
1364 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1368 ret = disable_axi(gpu);
1372 ret = disable_clk(gpu);
1379 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1382 unsigned long timeout;
1384 /* Replace the last WAIT with END */
1385 etnaviv_buffer_end(gpu);
1388 * We know that only the FE is busy here, this should
1389 * happen quickly (as the WAIT is only 200 cycles). If
1390 * we fail, just warn and continue.
1392 timeout = jiffies + msecs_to_jiffies(100);
1394 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1396 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1399 if (time_is_before_jiffies(timeout)) {
1401 "timed out waiting for idle: idle=0x%x\n",
1410 return etnaviv_gpu_clk_disable(gpu);
1414 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1419 ret = mutex_lock_killable(&gpu->lock);
1423 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1424 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1426 etnaviv_gpu_load_clock(gpu, clock);
1427 etnaviv_gpu_hw_init(gpu);
1429 gpu->switch_context = true;
1431 mutex_unlock(&gpu->lock);
1437 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1440 struct drm_device *drm = data;
1441 struct etnaviv_drm_private *priv = drm->dev_private;
1442 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1446 ret = pm_runtime_get_sync(gpu->dev);
1448 ret = etnaviv_gpu_clk_enable(gpu);
1454 gpu->fence_context = fence_context_alloc(1);
1455 spin_lock_init(&gpu->fence_spinlock);
1457 INIT_LIST_HEAD(&gpu->active_cmd_list);
1458 INIT_WORK(&gpu->retire_work, retire_worker);
1459 INIT_WORK(&gpu->recover_work, recover_worker);
1460 init_waitqueue_head(&gpu->fence_event);
1462 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
1463 (unsigned long)gpu);
1465 priv->gpu[priv->num_gpus++] = gpu;
1467 pm_runtime_mark_last_busy(gpu->dev);
1468 pm_runtime_put_autosuspend(gpu->dev);
1473 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1476 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1478 DBG("%s", dev_name(gpu->dev));
1480 hangcheck_disable(gpu);
1483 pm_runtime_get_sync(gpu->dev);
1484 pm_runtime_put_sync_suspend(gpu->dev);
1486 etnaviv_gpu_hw_suspend(gpu);
1490 etnaviv_gpu_cmdbuf_free(gpu->buffer);
1495 etnaviv_iommu_destroy(gpu->mmu);
1502 static const struct component_ops gpu_ops = {
1503 .bind = etnaviv_gpu_bind,
1504 .unbind = etnaviv_gpu_unbind,
1507 static const struct of_device_id etnaviv_gpu_match[] = {
1509 .compatible = "vivante,gc"
1514 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1516 struct device *dev = &pdev->dev;
1517 struct etnaviv_gpu *gpu;
1520 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1524 gpu->dev = &pdev->dev;
1525 mutex_init(&gpu->lock);
1528 * Set the GPU base address to the start of physical memory. This
1529 * ensures that if we have up to 2GB, the v1 MMU can address the
1530 * highest memory. This is important as command buffers may be
1531 * allocated outside of this limit.
1533 gpu->memory_base = PHYS_OFFSET;
1535 /* Map registers: */
1536 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1537 if (IS_ERR(gpu->mmio))
1538 return PTR_ERR(gpu->mmio);
1540 /* Get Interrupt: */
1541 gpu->irq = platform_get_irq(pdev, 0);
1544 dev_err(dev, "failed to get irq: %d\n", err);
1548 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1549 dev_name(gpu->dev), gpu);
1551 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1556 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1557 DBG("clk_bus: %p", gpu->clk_bus);
1558 if (IS_ERR(gpu->clk_bus))
1559 gpu->clk_bus = NULL;
1561 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1562 DBG("clk_core: %p", gpu->clk_core);
1563 if (IS_ERR(gpu->clk_core))
1564 gpu->clk_core = NULL;
1566 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1567 DBG("clk_shader: %p", gpu->clk_shader);
1568 if (IS_ERR(gpu->clk_shader))
1569 gpu->clk_shader = NULL;
1571 /* TODO: figure out max mapped size */
1572 dev_set_drvdata(dev, gpu);
1575 * We treat the device as initially suspended. The runtime PM
1576 * autosuspend delay is rather arbitary: no measurements have
1577 * yet been performed to determine an appropriate value.
1579 pm_runtime_use_autosuspend(gpu->dev);
1580 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1581 pm_runtime_enable(gpu->dev);
1583 err = component_add(&pdev->dev, &gpu_ops);
1585 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1595 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1597 component_del(&pdev->dev, &gpu_ops);
1598 pm_runtime_disable(&pdev->dev);
1603 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1605 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1608 /* If we have outstanding fences, we're not idle */
1609 if (gpu->completed_fence != gpu->active_fence)
1612 /* Check whether the hardware (except FE) is idle */
1613 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1614 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1618 return etnaviv_gpu_hw_suspend(gpu);
1621 static int etnaviv_gpu_rpm_resume(struct device *dev)
1623 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1626 ret = etnaviv_gpu_clk_enable(gpu);
1630 /* Re-initialise the basic hardware state */
1631 if (gpu->drm && gpu->buffer) {
1632 ret = etnaviv_gpu_hw_resume(gpu);
1634 etnaviv_gpu_clk_disable(gpu);
1643 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1644 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1648 struct platform_driver etnaviv_gpu_driver = {
1650 .name = "etnaviv-gpu",
1651 .owner = THIS_MODULE,
1652 .pm = &etnaviv_gpu_pm_ops,
1653 .of_match_table = etnaviv_gpu_match,
1655 .probe = etnaviv_gpu_platform_probe,
1656 .remove = etnaviv_gpu_platform_remove,
1657 .id_table = gpu_ids,