2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
21 #include "etnaviv_dump.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_gem.h"
24 #include "etnaviv_mmu.h"
25 #include "etnaviv_iommu.h"
26 #include "etnaviv_iommu_v2.h"
27 #include "common.xml.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
30 #include "cmdstream.xml.h"
32 static const struct platform_device_id gpu_ids[] = {
33 { .name = "etnaviv-gpu,2d" },
37 static bool etnaviv_dump_core = true;
38 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
44 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
47 case ETNAVIV_PARAM_GPU_MODEL:
48 *value = gpu->identity.model;
51 case ETNAVIV_PARAM_GPU_REVISION:
52 *value = gpu->identity.revision;
55 case ETNAVIV_PARAM_GPU_FEATURES_0:
56 *value = gpu->identity.features;
59 case ETNAVIV_PARAM_GPU_FEATURES_1:
60 *value = gpu->identity.minor_features0;
63 case ETNAVIV_PARAM_GPU_FEATURES_2:
64 *value = gpu->identity.minor_features1;
67 case ETNAVIV_PARAM_GPU_FEATURES_3:
68 *value = gpu->identity.minor_features2;
71 case ETNAVIV_PARAM_GPU_FEATURES_4:
72 *value = gpu->identity.minor_features3;
75 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
76 *value = gpu->identity.stream_count;
79 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
80 *value = gpu->identity.register_max;
83 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
84 *value = gpu->identity.thread_count;
87 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
88 *value = gpu->identity.vertex_cache_size;
91 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
92 *value = gpu->identity.shader_core_count;
95 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
96 *value = gpu->identity.pixel_pipes;
99 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
100 *value = gpu->identity.vertex_output_buffer_size;
103 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
104 *value = gpu->identity.buffer_size;
107 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
108 *value = gpu->identity.instruction_count;
111 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
112 *value = gpu->identity.num_constants;
116 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
123 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
125 if (gpu->identity.minor_features0 &
126 chipMinorFeatures0_MORE_MINOR_FEATURES) {
129 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
130 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
132 gpu->identity.stream_count =
133 (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
134 >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
135 gpu->identity.register_max =
136 (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
137 >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
138 gpu->identity.thread_count =
139 (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
140 >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
141 gpu->identity.vertex_cache_size =
142 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
143 >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
144 gpu->identity.shader_core_count =
145 (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
146 >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
147 gpu->identity.pixel_pipes =
148 (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
149 >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
150 gpu->identity.vertex_output_buffer_size =
151 (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
152 >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
154 gpu->identity.buffer_size =
155 (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
156 >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
157 gpu->identity.instruction_count =
158 (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
159 >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
160 gpu->identity.num_constants =
161 (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
162 >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
165 /* Fill in the stream count if not specified */
166 if (gpu->identity.stream_count == 0) {
167 if (gpu->identity.model >= 0x1000)
168 gpu->identity.stream_count = 4;
170 gpu->identity.stream_count = 1;
173 /* Convert the register max value */
174 if (gpu->identity.register_max)
175 gpu->identity.register_max = 1 << gpu->identity.register_max;
176 else if (gpu->identity.model == 0x0400)
177 gpu->identity.register_max = 32;
179 gpu->identity.register_max = 64;
181 /* Convert thread count */
182 if (gpu->identity.thread_count)
183 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
184 else if (gpu->identity.model == 0x0400)
185 gpu->identity.thread_count = 64;
186 else if (gpu->identity.model == 0x0500 ||
187 gpu->identity.model == 0x0530)
188 gpu->identity.thread_count = 128;
190 gpu->identity.thread_count = 256;
192 if (gpu->identity.vertex_cache_size == 0)
193 gpu->identity.vertex_cache_size = 8;
195 if (gpu->identity.shader_core_count == 0) {
196 if (gpu->identity.model >= 0x1000)
197 gpu->identity.shader_core_count = 2;
199 gpu->identity.shader_core_count = 1;
202 if (gpu->identity.pixel_pipes == 0)
203 gpu->identity.pixel_pipes = 1;
205 /* Convert virtex buffer size */
206 if (gpu->identity.vertex_output_buffer_size) {
207 gpu->identity.vertex_output_buffer_size =
208 1 << gpu->identity.vertex_output_buffer_size;
209 } else if (gpu->identity.model == 0x0400) {
210 if (gpu->identity.revision < 0x4000)
211 gpu->identity.vertex_output_buffer_size = 512;
212 else if (gpu->identity.revision < 0x4200)
213 gpu->identity.vertex_output_buffer_size = 256;
215 gpu->identity.vertex_output_buffer_size = 128;
217 gpu->identity.vertex_output_buffer_size = 512;
220 switch (gpu->identity.instruction_count) {
222 if ((gpu->identity.model == 0x2000 &&
223 gpu->identity.revision == 0x5108) ||
224 gpu->identity.model == 0x880)
225 gpu->identity.instruction_count = 512;
227 gpu->identity.instruction_count = 256;
231 gpu->identity.instruction_count = 1024;
235 gpu->identity.instruction_count = 2048;
239 gpu->identity.instruction_count = 256;
243 if (gpu->identity.num_constants == 0)
244 gpu->identity.num_constants = 168;
247 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
251 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
253 /* Special case for older graphic cores. */
254 if (VIVS_HI_CHIP_IDENTITY_FAMILY(chipIdentity) == 0x01) {
255 gpu->identity.model = 0x500; /* gc500 */
256 gpu->identity.revision = VIVS_HI_CHIP_IDENTITY_REVISION(chipIdentity);
259 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
260 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
263 * !!!! HACK ALERT !!!!
264 * Because people change device IDs without letting software
265 * know about it - here is the hack to make it all look the
266 * same. Only for GC400 family.
268 if ((gpu->identity.model & 0xff00) == 0x0400 &&
269 gpu->identity.model != 0x0420) {
270 gpu->identity.model = gpu->identity.model & 0x0400;
273 /* Another special case */
274 if (gpu->identity.model == 0x300 &&
275 gpu->identity.revision == 0x2201) {
276 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
277 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
279 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
281 * This IP has an ECO; put the correct
284 gpu->identity.revision = 0x1051;
289 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
290 gpu->identity.model, gpu->identity.revision);
292 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
294 /* Disable fast clear on GC700. */
295 if (gpu->identity.model == 0x700)
296 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
298 if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
299 (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
302 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
305 gpu->identity.minor_features0 = 0;
306 gpu->identity.minor_features1 = 0;
307 gpu->identity.minor_features2 = 0;
308 gpu->identity.minor_features3 = 0;
310 gpu->identity.minor_features0 =
311 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
313 if (gpu->identity.minor_features0 &
314 chipMinorFeatures0_MORE_MINOR_FEATURES) {
315 gpu->identity.minor_features1 =
316 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
317 gpu->identity.minor_features2 =
318 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
319 gpu->identity.minor_features3 =
320 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
323 /* GC600 idle register reports zero bits where modules aren't present */
324 if (gpu->identity.model == chipModel_GC600) {
325 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
326 VIVS_HI_IDLE_STATE_RA |
327 VIVS_HI_IDLE_STATE_SE |
328 VIVS_HI_IDLE_STATE_PA |
329 VIVS_HI_IDLE_STATE_SH |
330 VIVS_HI_IDLE_STATE_PE |
331 VIVS_HI_IDLE_STATE_DE |
332 VIVS_HI_IDLE_STATE_FE;
334 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
337 etnaviv_hw_specs(gpu);
340 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
342 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
343 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
344 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
347 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
350 unsigned long timeout;
360 /* We hope that the GPU resets in under one second */
361 timeout = jiffies + msecs_to_jiffies(1000);
363 while (time_is_after_jiffies(timeout)) {
364 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
365 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
368 etnaviv_gpu_load_clock(gpu, control);
370 /* Wait for stable clock. Vivante's code waited for 1ms */
371 usleep_range(1000, 10000);
373 /* isolate the GPU. */
374 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
375 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
377 /* set soft reset. */
378 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
379 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
381 /* wait for reset. */
384 /* reset soft reset bit. */
385 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
386 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
388 /* reset GPU isolation. */
389 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
390 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
392 /* read idle register. */
393 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
395 /* try reseting again if FE it not idle */
396 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
397 dev_dbg(gpu->dev, "FE is not idle\n");
401 /* read reset register. */
402 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
404 /* is the GPU idle? */
405 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
406 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
407 dev_dbg(gpu->dev, "GPU is not idle\n");
416 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
417 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
419 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
420 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
421 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
422 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
427 /* We rely on the GPU running, so program the clock */
428 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
429 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
432 etnaviv_gpu_load_clock(gpu, control);
437 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
441 if (gpu->identity.model == chipModel_GC320 &&
442 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
443 (gpu->identity.revision == 0x5007 ||
444 gpu->identity.revision == 0x5220)) {
447 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
449 if (gpu->identity.revision == 0x5007)
450 mc_memory_debug |= 0x0c;
452 mc_memory_debug |= 0x08;
454 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
458 * Update GPU AXI cache atttribute to "cacheable, no allocate".
459 * This is necessary to prevent the iMX6 SoC locking up.
461 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
462 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
463 VIVS_HI_AXI_CONFIG_ARCACHE(2));
465 /* GC2000 rev 5108 needs a special bus config */
466 if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
467 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
468 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
469 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
470 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
471 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
472 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
475 /* set base addresses */
476 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
477 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
478 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
479 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
480 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
482 /* setup the MMU page table pointers */
483 etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
485 /* Start command processor */
486 prefetch = etnaviv_buffer_init(gpu);
488 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
489 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
490 gpu->buffer->paddr - gpu->memory_base);
491 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
492 VIVS_FE_COMMAND_CONTROL_ENABLE |
493 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
496 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
499 struct iommu_domain *iommu;
500 enum etnaviv_iommu_version version;
503 ret = pm_runtime_get_sync(gpu->dev);
507 etnaviv_hw_identify(gpu);
509 if (gpu->identity.model == 0) {
510 dev_err(gpu->dev, "Unknown GPU model\n");
511 pm_runtime_put_autosuspend(gpu->dev);
515 ret = etnaviv_hw_reset(gpu);
519 /* Setup IOMMU.. eventually we will (I think) do this once per context
520 * and have separate page tables per context. For now, to keep things
521 * simple and to get something working, just use a single address space:
523 mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
524 dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
527 iommu = etnaviv_iommu_domain_alloc(gpu);
528 version = ETNAVIV_IOMMU_V1;
530 iommu = etnaviv_iommu_v2_domain_alloc(gpu);
531 version = ETNAVIV_IOMMU_V2;
539 /* TODO: we will leak here memory - fix it! */
541 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
548 gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
551 dev_err(gpu->dev, "could not create command buffer\n");
554 if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
557 "command buffer outside valid memory window\n");
561 /* Setup event management */
562 spin_lock_init(&gpu->event_spinlock);
563 init_completion(&gpu->event_free);
564 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
565 gpu->event[i].used = false;
566 complete(&gpu->event_free);
569 /* Now program the hardware */
570 mutex_lock(&gpu->lock);
571 etnaviv_gpu_hw_init(gpu);
572 mutex_unlock(&gpu->lock);
574 pm_runtime_mark_last_busy(gpu->dev);
575 pm_runtime_put_autosuspend(gpu->dev);
580 etnaviv_gpu_cmdbuf_free(gpu->buffer);
583 pm_runtime_mark_last_busy(gpu->dev);
584 pm_runtime_put_autosuspend(gpu->dev);
589 #ifdef CONFIG_DEBUG_FS
595 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
599 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
600 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
602 for (i = 0; i < 500; i++) {
603 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
604 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
606 if (debug->address[0] != debug->address[1])
609 if (debug->state[0] != debug->state[1])
614 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
616 struct dma_debug debug;
617 u32 dma_lo, dma_hi, axi, idle;
620 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
622 ret = pm_runtime_get_sync(gpu->dev);
626 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
627 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
628 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
629 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
631 verify_dma(gpu, &debug);
633 seq_puts(m, "\tfeatures\n");
634 seq_printf(m, "\t minor_features0: 0x%08x\n",
635 gpu->identity.minor_features0);
636 seq_printf(m, "\t minor_features1: 0x%08x\n",
637 gpu->identity.minor_features1);
638 seq_printf(m, "\t minor_features2: 0x%08x\n",
639 gpu->identity.minor_features2);
640 seq_printf(m, "\t minor_features3: 0x%08x\n",
641 gpu->identity.minor_features3);
643 seq_puts(m, "\tspecs\n");
644 seq_printf(m, "\t stream_count: %d\n",
645 gpu->identity.stream_count);
646 seq_printf(m, "\t register_max: %d\n",
647 gpu->identity.register_max);
648 seq_printf(m, "\t thread_count: %d\n",
649 gpu->identity.thread_count);
650 seq_printf(m, "\t vertex_cache_size: %d\n",
651 gpu->identity.vertex_cache_size);
652 seq_printf(m, "\t shader_core_count: %d\n",
653 gpu->identity.shader_core_count);
654 seq_printf(m, "\t pixel_pipes: %d\n",
655 gpu->identity.pixel_pipes);
656 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
657 gpu->identity.vertex_output_buffer_size);
658 seq_printf(m, "\t buffer_size: %d\n",
659 gpu->identity.buffer_size);
660 seq_printf(m, "\t instruction_count: %d\n",
661 gpu->identity.instruction_count);
662 seq_printf(m, "\t num_constants: %d\n",
663 gpu->identity.num_constants);
665 seq_printf(m, "\taxi: 0x%08x\n", axi);
666 seq_printf(m, "\tidle: 0x%08x\n", idle);
667 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
668 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
669 seq_puts(m, "\t FE is not idle\n");
670 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
671 seq_puts(m, "\t DE is not idle\n");
672 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
673 seq_puts(m, "\t PE is not idle\n");
674 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
675 seq_puts(m, "\t SH is not idle\n");
676 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
677 seq_puts(m, "\t PA is not idle\n");
678 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
679 seq_puts(m, "\t SE is not idle\n");
680 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
681 seq_puts(m, "\t RA is not idle\n");
682 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
683 seq_puts(m, "\t TX is not idle\n");
684 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
685 seq_puts(m, "\t VG is not idle\n");
686 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
687 seq_puts(m, "\t IM is not idle\n");
688 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
689 seq_puts(m, "\t FP is not idle\n");
690 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
691 seq_puts(m, "\t TS is not idle\n");
692 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
693 seq_puts(m, "\t AXI low power mode\n");
695 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
696 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
697 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
698 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
700 seq_puts(m, "\tMC\n");
701 seq_printf(m, "\t read0: 0x%08x\n", read0);
702 seq_printf(m, "\t read1: 0x%08x\n", read1);
703 seq_printf(m, "\t write: 0x%08x\n", write);
706 seq_puts(m, "\tDMA ");
708 if (debug.address[0] == debug.address[1] &&
709 debug.state[0] == debug.state[1]) {
710 seq_puts(m, "seems to be stuck\n");
711 } else if (debug.address[0] == debug.address[1]) {
712 seq_puts(m, "adress is constant\n");
714 seq_puts(m, "is runing\n");
717 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
718 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
719 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
720 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
721 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
726 pm_runtime_mark_last_busy(gpu->dev);
727 pm_runtime_put_autosuspend(gpu->dev);
736 static int enable_clk(struct etnaviv_gpu *gpu)
739 clk_prepare_enable(gpu->clk_core);
741 clk_prepare_enable(gpu->clk_shader);
746 static int disable_clk(struct etnaviv_gpu *gpu)
749 clk_disable_unprepare(gpu->clk_core);
751 clk_disable_unprepare(gpu->clk_shader);
756 static int enable_axi(struct etnaviv_gpu *gpu)
759 clk_prepare_enable(gpu->clk_bus);
764 static int disable_axi(struct etnaviv_gpu *gpu)
767 clk_disable_unprepare(gpu->clk_bus);
773 * Hangcheck detection for locked gpu:
775 static void recover_worker(struct work_struct *work)
777 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
782 dev_err(gpu->dev, "hangcheck recover!\n");
784 if (pm_runtime_get_sync(gpu->dev) < 0)
787 mutex_lock(&gpu->lock);
789 /* Only catch the first event, or when manually re-armed */
790 if (etnaviv_dump_core) {
791 etnaviv_core_dump(gpu);
792 etnaviv_dump_core = false;
795 etnaviv_hw_reset(gpu);
797 /* complete all events, the GPU won't do it after the reset */
798 spin_lock_irqsave(&gpu->event_spinlock, flags);
799 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
800 if (!gpu->event[i].used)
802 fence_signal(gpu->event[i].fence);
803 gpu->event[i].fence = NULL;
804 gpu->event[i].used = false;
805 complete(&gpu->event_free);
807 * Decrement the PM count for each stuck event. This is safe
808 * even in atomic context as we use ASYNC RPM here.
810 pm_runtime_put_autosuspend(gpu->dev);
812 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
813 gpu->completed_fence = gpu->active_fence;
815 etnaviv_gpu_hw_init(gpu);
816 gpu->switch_context = true;
818 mutex_unlock(&gpu->lock);
819 pm_runtime_mark_last_busy(gpu->dev);
820 pm_runtime_put_autosuspend(gpu->dev);
822 /* Retire the buffer objects in a work */
823 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
826 static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
828 DBG("%s", dev_name(gpu->dev));
829 mod_timer(&gpu->hangcheck_timer,
830 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
833 static void hangcheck_handler(unsigned long data)
835 struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
836 u32 fence = gpu->completed_fence;
837 bool progress = false;
839 if (fence != gpu->hangcheck_fence) {
840 gpu->hangcheck_fence = fence;
845 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
846 int change = dma_addr - gpu->hangcheck_dma_addr;
848 if (change < 0 || change > 16) {
849 gpu->hangcheck_dma_addr = dma_addr;
854 if (!progress && fence_after(gpu->active_fence, fence)) {
855 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
856 dev_err(gpu->dev, " completed fence: %u\n", fence);
857 dev_err(gpu->dev, " active fence: %u\n",
859 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
862 /* if still more pending work, reset the hangcheck timer: */
863 if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
864 hangcheck_timer_reset(gpu);
867 static void hangcheck_disable(struct etnaviv_gpu *gpu)
869 del_timer_sync(&gpu->hangcheck_timer);
870 cancel_work_sync(&gpu->recover_work);
873 /* fence object management */
874 struct etnaviv_fence {
875 struct etnaviv_gpu *gpu;
879 static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
881 return container_of(fence, struct etnaviv_fence, base);
884 static const char *etnaviv_fence_get_driver_name(struct fence *fence)
889 static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
891 struct etnaviv_fence *f = to_etnaviv_fence(fence);
893 return dev_name(f->gpu->dev);
896 static bool etnaviv_fence_enable_signaling(struct fence *fence)
901 static bool etnaviv_fence_signaled(struct fence *fence)
903 struct etnaviv_fence *f = to_etnaviv_fence(fence);
905 return fence_completed(f->gpu, f->base.seqno);
908 static void etnaviv_fence_release(struct fence *fence)
910 struct etnaviv_fence *f = to_etnaviv_fence(fence);
912 kfree_rcu(f, base.rcu);
915 static const struct fence_ops etnaviv_fence_ops = {
916 .get_driver_name = etnaviv_fence_get_driver_name,
917 .get_timeline_name = etnaviv_fence_get_timeline_name,
918 .enable_signaling = etnaviv_fence_enable_signaling,
919 .signaled = etnaviv_fence_signaled,
920 .wait = fence_default_wait,
921 .release = etnaviv_fence_release,
924 static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
926 struct etnaviv_fence *f;
928 f = kzalloc(sizeof(*f), GFP_KERNEL);
934 fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
935 gpu->fence_context, ++gpu->next_fence);
940 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
941 unsigned int context, bool exclusive)
943 struct reservation_object *robj = etnaviv_obj->resv;
944 struct reservation_object_list *fobj;
949 ret = reservation_object_reserve_shared(robj);
955 * If we have any shared fences, then the exclusive fence
956 * should be ignored as it will already have been signalled.
958 fobj = reservation_object_get_list(robj);
959 if (!fobj || fobj->shared_count == 0) {
960 /* Wait on any existing exclusive fence which isn't our own */
961 fence = reservation_object_get_excl(robj);
962 if (fence && fence->context != context) {
963 ret = fence_wait(fence, true);
969 if (!exclusive || !fobj)
972 for (i = 0; i < fobj->shared_count; i++) {
973 fence = rcu_dereference_protected(fobj->shared[i],
974 reservation_object_held(robj));
975 if (fence->context != context) {
976 ret = fence_wait(fence, true);
989 static unsigned int event_alloc(struct etnaviv_gpu *gpu)
991 unsigned long ret, flags;
992 unsigned int i, event = ~0U;
994 ret = wait_for_completion_timeout(&gpu->event_free,
995 msecs_to_jiffies(10 * 10000));
997 dev_err(gpu->dev, "wait_for_completion_timeout failed");
999 spin_lock_irqsave(&gpu->event_spinlock, flags);
1001 /* find first free event */
1002 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1003 if (gpu->event[i].used == false) {
1004 gpu->event[i].used = true;
1010 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1015 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1017 unsigned long flags;
1019 spin_lock_irqsave(&gpu->event_spinlock, flags);
1021 if (gpu->event[event].used == false) {
1022 dev_warn(gpu->dev, "event %u is already marked as free",
1024 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1026 gpu->event[event].used = false;
1027 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1029 complete(&gpu->event_free);
1034 * Cmdstream submission/retirement:
1037 struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1040 struct etnaviv_cmdbuf *cmdbuf;
1041 size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
1044 cmdbuf = kzalloc(sz, GFP_KERNEL);
1048 cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
1050 if (!cmdbuf->vaddr) {
1056 cmdbuf->size = size;
1061 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1063 dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
1064 cmdbuf->vaddr, cmdbuf->paddr);
1068 static void retire_worker(struct work_struct *work)
1070 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1072 u32 fence = gpu->completed_fence;
1073 struct etnaviv_cmdbuf *cmdbuf, *tmp;
1076 mutex_lock(&gpu->lock);
1077 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1078 if (!fence_is_signaled(cmdbuf->fence))
1081 list_del(&cmdbuf->node);
1082 fence_put(cmdbuf->fence);
1084 for (i = 0; i < cmdbuf->nr_bos; i++) {
1085 struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
1087 atomic_dec(&etnaviv_obj->gpu_active);
1088 /* drop the refcount taken in etnaviv_gpu_submit */
1089 etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
1092 etnaviv_gpu_cmdbuf_free(cmdbuf);
1095 gpu->retired_fence = fence;
1097 mutex_unlock(&gpu->lock);
1099 wake_up_all(&gpu->fence_event);
1102 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1103 u32 fence, struct timespec *timeout)
1107 if (fence_after(fence, gpu->next_fence)) {
1108 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1109 fence, gpu->next_fence);
1114 /* No timeout was requested: just test for completion */
1115 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1117 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1119 ret = wait_event_interruptible_timeout(gpu->fence_event,
1120 fence_completed(gpu, fence),
1123 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1124 fence, gpu->retired_fence,
1125 gpu->completed_fence);
1127 } else if (ret != -ERESTARTSYS) {
1136 * Wait for an object to become inactive. This, on it's own, is not race
1137 * free: the object is moved by the retire worker off the active list, and
1138 * then the iova is put. Moreover, the object could be re-submitted just
1139 * after we notice that it's become inactive.
1141 * Although the retirement happens under the gpu lock, we don't want to hold
1142 * that lock in this function while waiting.
1144 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1145 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1147 unsigned long remaining;
1151 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1153 remaining = etnaviv_timeout_to_jiffies(timeout);
1155 ret = wait_event_interruptible_timeout(gpu->fence_event,
1156 !is_active(etnaviv_obj),
1159 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1161 /* Synchronise with the retire worker */
1162 flush_workqueue(priv->wq);
1164 } else if (ret == -ERESTARTSYS) {
1165 return -ERESTARTSYS;
1171 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1173 return pm_runtime_get_sync(gpu->dev);
1176 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1178 pm_runtime_mark_last_busy(gpu->dev);
1179 pm_runtime_put_autosuspend(gpu->dev);
1182 /* add bo's to gpu's ring, and kick gpu: */
1183 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1184 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1186 struct fence *fence;
1187 unsigned int event, i;
1190 ret = etnaviv_gpu_pm_get_sync(gpu);
1194 mutex_lock(&gpu->lock);
1205 event = event_alloc(gpu);
1206 if (unlikely(event == ~0U)) {
1207 DRM_ERROR("no free event\n");
1212 fence = etnaviv_gpu_fence_alloc(gpu);
1214 event_free(gpu, event);
1219 gpu->event[event].fence = fence;
1220 submit->fence = fence->seqno;
1221 gpu->active_fence = submit->fence;
1223 if (gpu->lastctx != cmdbuf->ctx) {
1224 gpu->mmu->need_flush = true;
1225 gpu->switch_context = true;
1226 gpu->lastctx = cmdbuf->ctx;
1229 etnaviv_buffer_queue(gpu, event, cmdbuf);
1231 cmdbuf->fence = fence;
1232 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1234 /* We're committed to adding this command buffer, hold a PM reference */
1235 pm_runtime_get_noresume(gpu->dev);
1237 for (i = 0; i < submit->nr_bos; i++) {
1238 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1241 /* Each cmdbuf takes a refcount on the iova */
1242 etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
1243 cmdbuf->bo[i] = etnaviv_obj;
1244 atomic_inc(&etnaviv_obj->gpu_active);
1246 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1247 reservation_object_add_excl_fence(etnaviv_obj->resv,
1250 reservation_object_add_shared_fence(etnaviv_obj->resv,
1253 cmdbuf->nr_bos = submit->nr_bos;
1254 hangcheck_timer_reset(gpu);
1258 mutex_unlock(&gpu->lock);
1260 etnaviv_gpu_pm_put(gpu);
1268 static irqreturn_t irq_handler(int irq, void *data)
1270 struct etnaviv_gpu *gpu = data;
1271 irqreturn_t ret = IRQ_NONE;
1273 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1278 pm_runtime_mark_last_busy(gpu->dev);
1280 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1282 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1283 dev_err(gpu->dev, "AXI bus error\n");
1284 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1287 while ((event = ffs(intr)) != 0) {
1288 struct fence *fence;
1292 intr &= ~(1 << event);
1294 dev_dbg(gpu->dev, "event %u\n", event);
1296 fence = gpu->event[event].fence;
1297 gpu->event[event].fence = NULL;
1298 fence_signal(fence);
1301 * Events can be processed out of order. Eg,
1302 * - allocate and queue event 0
1303 * - allocate event 1
1304 * - event 0 completes, we process it
1305 * - allocate and queue event 0
1306 * - event 1 and event 0 complete
1307 * we can end up processing event 0 first, then 1.
1309 if (fence_after(fence->seqno, gpu->completed_fence))
1310 gpu->completed_fence = fence->seqno;
1312 event_free(gpu, event);
1315 * We need to balance the runtime PM count caused by
1316 * each submission. Upon submission, we increment
1317 * the runtime PM counter, and allocate one event.
1318 * So here, we put the runtime PM count for each
1321 pm_runtime_put_autosuspend(gpu->dev);
1324 /* Retire the buffer objects in a work */
1325 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1333 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1337 ret = enable_clk(gpu);
1341 ret = enable_axi(gpu);
1350 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1354 ret = disable_axi(gpu);
1358 ret = disable_clk(gpu);
1365 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1368 unsigned long timeout;
1370 /* Replace the last WAIT with END */
1371 etnaviv_buffer_end(gpu);
1374 * We know that only the FE is busy here, this should
1375 * happen quickly (as the WAIT is only 200 cycles). If
1376 * we fail, just warn and continue.
1378 timeout = jiffies + msecs_to_jiffies(100);
1380 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1382 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1385 if (time_is_before_jiffies(timeout)) {
1387 "timed out waiting for idle: idle=0x%x\n",
1396 return etnaviv_gpu_clk_disable(gpu);
1400 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1405 ret = mutex_lock_killable(&gpu->lock);
1409 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1410 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1412 etnaviv_gpu_load_clock(gpu, clock);
1413 etnaviv_gpu_hw_init(gpu);
1415 gpu->switch_context = true;
1417 mutex_unlock(&gpu->lock);
1423 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1426 struct drm_device *drm = data;
1427 struct etnaviv_drm_private *priv = drm->dev_private;
1428 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1432 ret = pm_runtime_get_sync(gpu->dev);
1434 ret = etnaviv_gpu_clk_enable(gpu);
1440 gpu->fence_context = fence_context_alloc(1);
1441 spin_lock_init(&gpu->fence_spinlock);
1443 INIT_LIST_HEAD(&gpu->active_cmd_list);
1444 INIT_WORK(&gpu->retire_work, retire_worker);
1445 INIT_WORK(&gpu->recover_work, recover_worker);
1446 init_waitqueue_head(&gpu->fence_event);
1448 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
1449 (unsigned long)gpu);
1451 priv->gpu[priv->num_gpus++] = gpu;
1453 pm_runtime_mark_last_busy(gpu->dev);
1454 pm_runtime_put_autosuspend(gpu->dev);
1459 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1462 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1464 DBG("%s", dev_name(gpu->dev));
1466 hangcheck_disable(gpu);
1469 pm_runtime_get_sync(gpu->dev);
1470 pm_runtime_put_sync_suspend(gpu->dev);
1472 etnaviv_gpu_hw_suspend(gpu);
1476 etnaviv_gpu_cmdbuf_free(gpu->buffer);
1481 etnaviv_iommu_destroy(gpu->mmu);
1488 static const struct component_ops gpu_ops = {
1489 .bind = etnaviv_gpu_bind,
1490 .unbind = etnaviv_gpu_unbind,
1493 static const struct of_device_id etnaviv_gpu_match[] = {
1495 .compatible = "vivante,gc"
1500 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1502 struct device *dev = &pdev->dev;
1503 struct etnaviv_gpu *gpu;
1506 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1510 gpu->dev = &pdev->dev;
1511 mutex_init(&gpu->lock);
1514 * Set the GPU base address to the start of physical memory. This
1515 * ensures that if we have up to 2GB, the v1 MMU can address the
1516 * highest memory. This is important as command buffers may be
1517 * allocated outside of this limit.
1519 gpu->memory_base = PHYS_OFFSET;
1521 /* Map registers: */
1522 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1523 if (IS_ERR(gpu->mmio))
1524 return PTR_ERR(gpu->mmio);
1526 /* Get Interrupt: */
1527 gpu->irq = platform_get_irq(pdev, 0);
1530 dev_err(dev, "failed to get irq: %d\n", err);
1534 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1535 dev_name(gpu->dev), gpu);
1537 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1542 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1543 DBG("clk_bus: %p", gpu->clk_bus);
1544 if (IS_ERR(gpu->clk_bus))
1545 gpu->clk_bus = NULL;
1547 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1548 DBG("clk_core: %p", gpu->clk_core);
1549 if (IS_ERR(gpu->clk_core))
1550 gpu->clk_core = NULL;
1552 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1553 DBG("clk_shader: %p", gpu->clk_shader);
1554 if (IS_ERR(gpu->clk_shader))
1555 gpu->clk_shader = NULL;
1557 /* TODO: figure out max mapped size */
1558 dev_set_drvdata(dev, gpu);
1561 * We treat the device as initially suspended. The runtime PM
1562 * autosuspend delay is rather arbitary: no measurements have
1563 * yet been performed to determine an appropriate value.
1565 pm_runtime_use_autosuspend(gpu->dev);
1566 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1567 pm_runtime_enable(gpu->dev);
1569 err = component_add(&pdev->dev, &gpu_ops);
1571 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1581 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1583 component_del(&pdev->dev, &gpu_ops);
1584 pm_runtime_disable(&pdev->dev);
1589 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1591 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1594 /* If we have outstanding fences, we're not idle */
1595 if (gpu->completed_fence != gpu->active_fence)
1598 /* Check whether the hardware (except FE) is idle */
1599 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1600 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1604 return etnaviv_gpu_hw_suspend(gpu);
1607 static int etnaviv_gpu_rpm_resume(struct device *dev)
1609 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1612 ret = etnaviv_gpu_clk_enable(gpu);
1616 /* Re-initialise the basic hardware state */
1617 if (gpu->drm && gpu->buffer) {
1618 ret = etnaviv_gpu_hw_resume(gpu);
1620 etnaviv_gpu_clk_disable(gpu);
1629 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1630 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1634 struct platform_driver etnaviv_gpu_driver = {
1636 .name = "etnaviv-gpu",
1637 .owner = THIS_MODULE,
1638 .pm = &etnaviv_gpu_pm_ops,
1639 .of_match_table = etnaviv_gpu_match,
1641 .probe = etnaviv_gpu_platform_probe,
1642 .remove = etnaviv_gpu_platform_remove,
1643 .id_table = gpu_ids,