2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/dma-fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
21 #include <linux/thermal.h>
23 #include "etnaviv_cmdbuf.h"
24 #include "etnaviv_dump.h"
25 #include "etnaviv_gpu.h"
26 #include "etnaviv_gem.h"
27 #include "etnaviv_mmu.h"
28 #include "common.xml.h"
29 #include "state.xml.h"
30 #include "state_hi.xml.h"
31 #include "cmdstream.xml.h"
33 static const struct platform_device_id gpu_ids[] = {
34 { .name = "etnaviv-gpu,2d" },
38 static bool etnaviv_dump_core = true;
39 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
45 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
48 case ETNAVIV_PARAM_GPU_MODEL:
49 *value = gpu->identity.model;
52 case ETNAVIV_PARAM_GPU_REVISION:
53 *value = gpu->identity.revision;
56 case ETNAVIV_PARAM_GPU_FEATURES_0:
57 *value = gpu->identity.features;
60 case ETNAVIV_PARAM_GPU_FEATURES_1:
61 *value = gpu->identity.minor_features0;
64 case ETNAVIV_PARAM_GPU_FEATURES_2:
65 *value = gpu->identity.minor_features1;
68 case ETNAVIV_PARAM_GPU_FEATURES_3:
69 *value = gpu->identity.minor_features2;
72 case ETNAVIV_PARAM_GPU_FEATURES_4:
73 *value = gpu->identity.minor_features3;
76 case ETNAVIV_PARAM_GPU_FEATURES_5:
77 *value = gpu->identity.minor_features4;
80 case ETNAVIV_PARAM_GPU_FEATURES_6:
81 *value = gpu->identity.minor_features5;
84 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
85 *value = gpu->identity.stream_count;
88 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
89 *value = gpu->identity.register_max;
92 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
93 *value = gpu->identity.thread_count;
96 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
97 *value = gpu->identity.vertex_cache_size;
100 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
101 *value = gpu->identity.shader_core_count;
104 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
105 *value = gpu->identity.pixel_pipes;
108 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
109 *value = gpu->identity.vertex_output_buffer_size;
112 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
113 *value = gpu->identity.buffer_size;
116 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
117 *value = gpu->identity.instruction_count;
120 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
121 *value = gpu->identity.num_constants;
124 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
125 *value = gpu->identity.varyings_count;
129 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
137 #define etnaviv_is_model_rev(gpu, mod, rev) \
138 ((gpu)->identity.model == chipModel_##mod && \
139 (gpu)->identity.revision == rev)
140 #define etnaviv_field(val, field) \
141 (((val) & field##__MASK) >> field##__SHIFT)
143 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
145 if (gpu->identity.minor_features0 &
146 chipMinorFeatures0_MORE_MINOR_FEATURES) {
148 unsigned int streams;
150 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
151 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
152 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
153 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
155 gpu->identity.stream_count = etnaviv_field(specs[0],
156 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
157 gpu->identity.register_max = etnaviv_field(specs[0],
158 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
159 gpu->identity.thread_count = etnaviv_field(specs[0],
160 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
161 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
162 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
163 gpu->identity.shader_core_count = etnaviv_field(specs[0],
164 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
165 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
166 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
167 gpu->identity.vertex_output_buffer_size =
168 etnaviv_field(specs[0],
169 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
171 gpu->identity.buffer_size = etnaviv_field(specs[1],
172 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
173 gpu->identity.instruction_count = etnaviv_field(specs[1],
174 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
175 gpu->identity.num_constants = etnaviv_field(specs[1],
176 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
178 gpu->identity.varyings_count = etnaviv_field(specs[2],
179 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
181 /* This overrides the value from older register if non-zero */
182 streams = etnaviv_field(specs[3],
183 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
185 gpu->identity.stream_count = streams;
188 /* Fill in the stream count if not specified */
189 if (gpu->identity.stream_count == 0) {
190 if (gpu->identity.model >= 0x1000)
191 gpu->identity.stream_count = 4;
193 gpu->identity.stream_count = 1;
196 /* Convert the register max value */
197 if (gpu->identity.register_max)
198 gpu->identity.register_max = 1 << gpu->identity.register_max;
199 else if (gpu->identity.model == chipModel_GC400)
200 gpu->identity.register_max = 32;
202 gpu->identity.register_max = 64;
204 /* Convert thread count */
205 if (gpu->identity.thread_count)
206 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
207 else if (gpu->identity.model == chipModel_GC400)
208 gpu->identity.thread_count = 64;
209 else if (gpu->identity.model == chipModel_GC500 ||
210 gpu->identity.model == chipModel_GC530)
211 gpu->identity.thread_count = 128;
213 gpu->identity.thread_count = 256;
215 if (gpu->identity.vertex_cache_size == 0)
216 gpu->identity.vertex_cache_size = 8;
218 if (gpu->identity.shader_core_count == 0) {
219 if (gpu->identity.model >= 0x1000)
220 gpu->identity.shader_core_count = 2;
222 gpu->identity.shader_core_count = 1;
225 if (gpu->identity.pixel_pipes == 0)
226 gpu->identity.pixel_pipes = 1;
228 /* Convert virtex buffer size */
229 if (gpu->identity.vertex_output_buffer_size) {
230 gpu->identity.vertex_output_buffer_size =
231 1 << gpu->identity.vertex_output_buffer_size;
232 } else if (gpu->identity.model == chipModel_GC400) {
233 if (gpu->identity.revision < 0x4000)
234 gpu->identity.vertex_output_buffer_size = 512;
235 else if (gpu->identity.revision < 0x4200)
236 gpu->identity.vertex_output_buffer_size = 256;
238 gpu->identity.vertex_output_buffer_size = 128;
240 gpu->identity.vertex_output_buffer_size = 512;
243 switch (gpu->identity.instruction_count) {
245 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
246 gpu->identity.model == chipModel_GC880)
247 gpu->identity.instruction_count = 512;
249 gpu->identity.instruction_count = 256;
253 gpu->identity.instruction_count = 1024;
257 gpu->identity.instruction_count = 2048;
261 gpu->identity.instruction_count = 256;
265 if (gpu->identity.num_constants == 0)
266 gpu->identity.num_constants = 168;
268 if (gpu->identity.varyings_count == 0) {
269 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
270 gpu->identity.varyings_count = 12;
272 gpu->identity.varyings_count = 8;
276 * For some cores, two varyings are consumed for position, so the
277 * maximum varying count needs to be reduced by one.
279 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
280 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
281 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
282 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
283 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
284 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
285 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
286 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
287 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
288 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
289 etnaviv_is_model_rev(gpu, GC880, 0x5106))
290 gpu->identity.varyings_count -= 1;
293 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
297 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
299 /* Special case for older graphic cores. */
300 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
301 gpu->identity.model = chipModel_GC500;
302 gpu->identity.revision = etnaviv_field(chipIdentity,
303 VIVS_HI_CHIP_IDENTITY_REVISION);
306 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
307 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
310 * !!!! HACK ALERT !!!!
311 * Because people change device IDs without letting software
312 * know about it - here is the hack to make it all look the
313 * same. Only for GC400 family.
315 if ((gpu->identity.model & 0xff00) == 0x0400 &&
316 gpu->identity.model != chipModel_GC420) {
317 gpu->identity.model = gpu->identity.model & 0x0400;
320 /* Another special case */
321 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
322 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
323 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
325 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
327 * This IP has an ECO; put the correct
330 gpu->identity.revision = 0x1051;
335 * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
336 * reality it's just a re-branded GC3000. We can identify this
337 * core by the upper half of the revision register being all 1.
338 * Fix model/rev here, so all other places can refer to this
339 * core by its real identity.
341 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
342 gpu->identity.model = chipModel_GC3000;
343 gpu->identity.revision &= 0xffff;
347 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
348 gpu->identity.model, gpu->identity.revision);
350 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
352 /* Disable fast clear on GC700. */
353 if (gpu->identity.model == chipModel_GC700)
354 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
356 if ((gpu->identity.model == chipModel_GC500 &&
357 gpu->identity.revision < 2) ||
358 (gpu->identity.model == chipModel_GC300 &&
359 gpu->identity.revision < 0x2000)) {
362 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
365 gpu->identity.minor_features0 = 0;
366 gpu->identity.minor_features1 = 0;
367 gpu->identity.minor_features2 = 0;
368 gpu->identity.minor_features3 = 0;
369 gpu->identity.minor_features4 = 0;
370 gpu->identity.minor_features5 = 0;
372 gpu->identity.minor_features0 =
373 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
375 if (gpu->identity.minor_features0 &
376 chipMinorFeatures0_MORE_MINOR_FEATURES) {
377 gpu->identity.minor_features1 =
378 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
379 gpu->identity.minor_features2 =
380 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
381 gpu->identity.minor_features3 =
382 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
383 gpu->identity.minor_features4 =
384 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
385 gpu->identity.minor_features5 =
386 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
389 /* GC600 idle register reports zero bits where modules aren't present */
390 if (gpu->identity.model == chipModel_GC600) {
391 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
392 VIVS_HI_IDLE_STATE_RA |
393 VIVS_HI_IDLE_STATE_SE |
394 VIVS_HI_IDLE_STATE_PA |
395 VIVS_HI_IDLE_STATE_SH |
396 VIVS_HI_IDLE_STATE_PE |
397 VIVS_HI_IDLE_STATE_DE |
398 VIVS_HI_IDLE_STATE_FE;
400 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
403 etnaviv_hw_specs(gpu);
406 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
408 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
409 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
410 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
413 static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
415 unsigned int fscale = 1 << (6 - gpu->freq_scale);
418 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
419 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
421 etnaviv_gpu_load_clock(gpu, clock);
424 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
427 unsigned long timeout;
437 /* We hope that the GPU resets in under one second */
438 timeout = jiffies + msecs_to_jiffies(1000);
440 while (time_is_after_jiffies(timeout)) {
442 etnaviv_gpu_update_clock(gpu);
444 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
446 /* Wait for stable clock. Vivante's code waited for 1ms */
447 usleep_range(1000, 10000);
449 /* isolate the GPU. */
450 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
451 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
453 /* set soft reset. */
454 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
455 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
457 /* wait for reset. */
460 /* reset soft reset bit. */
461 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
462 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
464 /* reset GPU isolation. */
465 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
466 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
468 /* read idle register. */
469 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
471 /* try reseting again if FE it not idle */
472 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
473 dev_dbg(gpu->dev, "FE is not idle\n");
477 /* read reset register. */
478 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
480 /* is the GPU idle? */
481 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
482 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
483 dev_dbg(gpu->dev, "GPU is not idle\n");
492 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
493 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
495 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
496 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
497 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
498 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
503 /* We rely on the GPU running, so program the clock */
504 etnaviv_gpu_update_clock(gpu);
509 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
513 /* enable clock gating */
514 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
515 ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
517 /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
518 if (gpu->identity.revision == 0x4301 ||
519 gpu->identity.revision == 0x4302)
520 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
522 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
524 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
526 /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
527 if (gpu->identity.model >= chipModel_GC400 &&
528 gpu->identity.model != chipModel_GC420 &&
529 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
530 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
533 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
534 * present without a bug fix.
536 if (gpu->identity.revision < 0x5000 &&
537 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
538 !(gpu->identity.minor_features1 &
539 chipMinorFeatures1_DISABLE_PE_GATING))
540 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
542 if (gpu->identity.revision < 0x5422)
543 pmc |= BIT(15); /* Unknown bit */
545 /* Disable TX clock gating on affected core revisions. */
546 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
547 etnaviv_is_model_rev(gpu, GC2000, 0x5108))
548 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
550 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
551 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
553 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
556 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
558 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
559 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
560 VIVS_FE_COMMAND_CONTROL_ENABLE |
561 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
564 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
567 * Base value for VIVS_PM_PULSE_EATER register on models where it
568 * cannot be read, extracted from vivante kernel driver.
570 u32 pulse_eater = 0x01590880;
572 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
573 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
574 pulse_eater |= BIT(23);
578 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
579 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
580 pulse_eater &= ~BIT(16);
581 pulse_eater |= BIT(17);
584 if ((gpu->identity.revision > 0x5420) &&
585 (gpu->identity.features & chipFeatures_PIPE_3D))
587 /* Performance fix: disable internal DFS */
588 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
589 pulse_eater |= BIT(18);
592 gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
595 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
599 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
600 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
601 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
604 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
606 if (gpu->identity.revision == 0x5007)
607 mc_memory_debug |= 0x0c;
609 mc_memory_debug |= 0x08;
611 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
614 /* enable module-level clock gating */
615 etnaviv_gpu_enable_mlcg(gpu);
618 * Update GPU AXI cache atttribute to "cacheable, no allocate".
619 * This is necessary to prevent the iMX6 SoC locking up.
621 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
622 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
623 VIVS_HI_AXI_CONFIG_ARCACHE(2));
625 /* GC2000 rev 5108 needs a special bus config */
626 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
627 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
628 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
629 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
630 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
631 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
632 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
635 /* setup the pulse eater */
636 etnaviv_gpu_setup_pulse_eater(gpu);
639 etnaviv_iommu_restore(gpu);
641 /* Start command processor */
642 prefetch = etnaviv_buffer_init(gpu);
644 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
645 etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
649 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
653 ret = pm_runtime_get_sync(gpu->dev);
655 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
659 etnaviv_hw_identify(gpu);
661 if (gpu->identity.model == 0) {
662 dev_err(gpu->dev, "Unknown GPU model\n");
667 /* Exclude VG cores with FE2.0 */
668 if (gpu->identity.features & chipFeatures_PIPE_VG &&
669 gpu->identity.features & chipFeatures_FE20) {
670 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
676 * Set the GPU linear window to be at the end of the DMA window, where
677 * the CMA area is likely to reside. This ensures that we are able to
678 * map the command buffers while having the linear window overlap as
679 * much RAM as possible, so we can optimize mappings for other buffers.
681 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
682 * to different views of the memory on the individual engines.
684 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
685 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
686 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
687 if (dma_mask < PHYS_OFFSET + SZ_2G)
688 gpu->memory_base = PHYS_OFFSET;
690 gpu->memory_base = dma_mask - SZ_2G + 1;
691 } else if (PHYS_OFFSET >= SZ_2G) {
692 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
693 gpu->memory_base = PHYS_OFFSET;
694 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
697 ret = etnaviv_hw_reset(gpu);
699 dev_err(gpu->dev, "GPU reset failed\n");
703 gpu->mmu = etnaviv_iommu_new(gpu);
704 if (IS_ERR(gpu->mmu)) {
705 dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
706 ret = PTR_ERR(gpu->mmu);
710 gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
711 if (IS_ERR(gpu->cmdbuf_suballoc)) {
712 dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
713 ret = PTR_ERR(gpu->cmdbuf_suballoc);
718 gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
721 dev_err(gpu->dev, "could not create command buffer\n");
725 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
726 etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
729 "command buffer outside valid memory window\n");
733 /* Setup event management */
734 spin_lock_init(&gpu->event_spinlock);
735 init_completion(&gpu->event_free);
736 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
737 gpu->event[i].used = false;
738 complete(&gpu->event_free);
741 /* Now program the hardware */
742 mutex_lock(&gpu->lock);
743 etnaviv_gpu_hw_init(gpu);
744 gpu->exec_state = -1;
745 mutex_unlock(&gpu->lock);
747 pm_runtime_mark_last_busy(gpu->dev);
748 pm_runtime_put_autosuspend(gpu->dev);
753 etnaviv_cmdbuf_free(gpu->buffer);
756 etnaviv_iommu_destroy(gpu->mmu);
759 pm_runtime_mark_last_busy(gpu->dev);
760 pm_runtime_put_autosuspend(gpu->dev);
765 #ifdef CONFIG_DEBUG_FS
771 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
775 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
776 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
778 for (i = 0; i < 500; i++) {
779 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
780 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
782 if (debug->address[0] != debug->address[1])
785 if (debug->state[0] != debug->state[1])
790 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
792 struct dma_debug debug;
793 u32 dma_lo, dma_hi, axi, idle;
796 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
798 ret = pm_runtime_get_sync(gpu->dev);
802 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
803 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
804 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
805 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
807 verify_dma(gpu, &debug);
809 seq_puts(m, "\tfeatures\n");
810 seq_printf(m, "\t minor_features0: 0x%08x\n",
811 gpu->identity.minor_features0);
812 seq_printf(m, "\t minor_features1: 0x%08x\n",
813 gpu->identity.minor_features1);
814 seq_printf(m, "\t minor_features2: 0x%08x\n",
815 gpu->identity.minor_features2);
816 seq_printf(m, "\t minor_features3: 0x%08x\n",
817 gpu->identity.minor_features3);
818 seq_printf(m, "\t minor_features4: 0x%08x\n",
819 gpu->identity.minor_features4);
820 seq_printf(m, "\t minor_features5: 0x%08x\n",
821 gpu->identity.minor_features5);
823 seq_puts(m, "\tspecs\n");
824 seq_printf(m, "\t stream_count: %d\n",
825 gpu->identity.stream_count);
826 seq_printf(m, "\t register_max: %d\n",
827 gpu->identity.register_max);
828 seq_printf(m, "\t thread_count: %d\n",
829 gpu->identity.thread_count);
830 seq_printf(m, "\t vertex_cache_size: %d\n",
831 gpu->identity.vertex_cache_size);
832 seq_printf(m, "\t shader_core_count: %d\n",
833 gpu->identity.shader_core_count);
834 seq_printf(m, "\t pixel_pipes: %d\n",
835 gpu->identity.pixel_pipes);
836 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
837 gpu->identity.vertex_output_buffer_size);
838 seq_printf(m, "\t buffer_size: %d\n",
839 gpu->identity.buffer_size);
840 seq_printf(m, "\t instruction_count: %d\n",
841 gpu->identity.instruction_count);
842 seq_printf(m, "\t num_constants: %d\n",
843 gpu->identity.num_constants);
844 seq_printf(m, "\t varyings_count: %d\n",
845 gpu->identity.varyings_count);
847 seq_printf(m, "\taxi: 0x%08x\n", axi);
848 seq_printf(m, "\tidle: 0x%08x\n", idle);
849 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
850 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
851 seq_puts(m, "\t FE is not idle\n");
852 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
853 seq_puts(m, "\t DE is not idle\n");
854 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
855 seq_puts(m, "\t PE is not idle\n");
856 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
857 seq_puts(m, "\t SH is not idle\n");
858 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
859 seq_puts(m, "\t PA is not idle\n");
860 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
861 seq_puts(m, "\t SE is not idle\n");
862 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
863 seq_puts(m, "\t RA is not idle\n");
864 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
865 seq_puts(m, "\t TX is not idle\n");
866 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
867 seq_puts(m, "\t VG is not idle\n");
868 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
869 seq_puts(m, "\t IM is not idle\n");
870 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
871 seq_puts(m, "\t FP is not idle\n");
872 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
873 seq_puts(m, "\t TS is not idle\n");
874 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
875 seq_puts(m, "\t AXI low power mode\n");
877 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
878 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
879 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
880 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
882 seq_puts(m, "\tMC\n");
883 seq_printf(m, "\t read0: 0x%08x\n", read0);
884 seq_printf(m, "\t read1: 0x%08x\n", read1);
885 seq_printf(m, "\t write: 0x%08x\n", write);
888 seq_puts(m, "\tDMA ");
890 if (debug.address[0] == debug.address[1] &&
891 debug.state[0] == debug.state[1]) {
892 seq_puts(m, "seems to be stuck\n");
893 } else if (debug.address[0] == debug.address[1]) {
894 seq_puts(m, "address is constant\n");
896 seq_puts(m, "is running\n");
899 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
900 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
901 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
902 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
903 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
908 pm_runtime_mark_last_busy(gpu->dev);
909 pm_runtime_put_autosuspend(gpu->dev);
916 * Hangcheck detection for locked gpu:
918 static void recover_worker(struct work_struct *work)
920 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
925 dev_err(gpu->dev, "hangcheck recover!\n");
927 if (pm_runtime_get_sync(gpu->dev) < 0)
930 mutex_lock(&gpu->lock);
932 /* Only catch the first event, or when manually re-armed */
933 if (etnaviv_dump_core) {
934 etnaviv_core_dump(gpu);
935 etnaviv_dump_core = false;
938 etnaviv_hw_reset(gpu);
940 /* complete all events, the GPU won't do it after the reset */
941 spin_lock_irqsave(&gpu->event_spinlock, flags);
942 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
943 if (!gpu->event[i].used)
945 dma_fence_signal(gpu->event[i].fence);
946 gpu->event[i].fence = NULL;
947 gpu->event[i].used = false;
948 complete(&gpu->event_free);
950 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
951 gpu->completed_fence = gpu->active_fence;
953 etnaviv_gpu_hw_init(gpu);
955 gpu->exec_state = -1;
957 mutex_unlock(&gpu->lock);
958 pm_runtime_mark_last_busy(gpu->dev);
959 pm_runtime_put_autosuspend(gpu->dev);
961 /* Retire the buffer objects in a work */
962 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
965 static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
967 DBG("%s", dev_name(gpu->dev));
968 mod_timer(&gpu->hangcheck_timer,
969 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
972 static void hangcheck_handler(unsigned long data)
974 struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
975 u32 fence = gpu->completed_fence;
976 bool progress = false;
978 if (fence != gpu->hangcheck_fence) {
979 gpu->hangcheck_fence = fence;
984 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
985 int change = dma_addr - gpu->hangcheck_dma_addr;
987 if (change < 0 || change > 16) {
988 gpu->hangcheck_dma_addr = dma_addr;
993 if (!progress && fence_after(gpu->active_fence, fence)) {
994 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
995 dev_err(gpu->dev, " completed fence: %u\n", fence);
996 dev_err(gpu->dev, " active fence: %u\n",
998 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
1001 /* if still more pending work, reset the hangcheck timer: */
1002 if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
1003 hangcheck_timer_reset(gpu);
1006 static void hangcheck_disable(struct etnaviv_gpu *gpu)
1008 del_timer_sync(&gpu->hangcheck_timer);
1009 cancel_work_sync(&gpu->recover_work);
1012 /* fence object management */
1013 struct etnaviv_fence {
1014 struct etnaviv_gpu *gpu;
1015 struct dma_fence base;
1018 static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1020 return container_of(fence, struct etnaviv_fence, base);
1023 static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1028 static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1030 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1032 return dev_name(f->gpu->dev);
1035 static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
1040 static bool etnaviv_fence_signaled(struct dma_fence *fence)
1042 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1044 return fence_completed(f->gpu, f->base.seqno);
1047 static void etnaviv_fence_release(struct dma_fence *fence)
1049 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1051 kfree_rcu(f, base.rcu);
1054 static const struct dma_fence_ops etnaviv_fence_ops = {
1055 .get_driver_name = etnaviv_fence_get_driver_name,
1056 .get_timeline_name = etnaviv_fence_get_timeline_name,
1057 .enable_signaling = etnaviv_fence_enable_signaling,
1058 .signaled = etnaviv_fence_signaled,
1059 .wait = dma_fence_default_wait,
1060 .release = etnaviv_fence_release,
1063 static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1065 struct etnaviv_fence *f;
1068 * GPU lock must already be held, otherwise fence completion order might
1069 * not match the seqno order assigned here.
1071 lockdep_assert_held(&gpu->lock);
1073 f = kzalloc(sizeof(*f), GFP_KERNEL);
1079 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1080 gpu->fence_context, ++gpu->next_fence);
1085 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1086 unsigned int context, bool exclusive, bool explicit)
1088 struct reservation_object *robj = etnaviv_obj->resv;
1089 struct reservation_object_list *fobj;
1090 struct dma_fence *fence;
1094 ret = reservation_object_reserve_shared(robj);
1103 * If we have any shared fences, then the exclusive fence
1104 * should be ignored as it will already have been signalled.
1106 fobj = reservation_object_get_list(robj);
1107 if (!fobj || fobj->shared_count == 0) {
1108 /* Wait on any existing exclusive fence which isn't our own */
1109 fence = reservation_object_get_excl(robj);
1110 if (fence && fence->context != context) {
1111 ret = dma_fence_wait(fence, true);
1117 if (!exclusive || !fobj)
1120 for (i = 0; i < fobj->shared_count; i++) {
1121 fence = rcu_dereference_protected(fobj->shared[i],
1122 reservation_object_held(robj));
1123 if (fence->context != context) {
1124 ret = dma_fence_wait(fence, true);
1137 static unsigned int event_alloc(struct etnaviv_gpu *gpu)
1139 unsigned long ret, flags;
1140 unsigned int i, event = ~0U;
1142 ret = wait_for_completion_timeout(&gpu->event_free,
1143 msecs_to_jiffies(10 * 10000));
1145 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1147 spin_lock_irqsave(&gpu->event_spinlock, flags);
1149 /* find first free event */
1150 for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1151 if (gpu->event[i].used == false) {
1152 gpu->event[i].used = true;
1158 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1163 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1165 unsigned long flags;
1167 spin_lock_irqsave(&gpu->event_spinlock, flags);
1169 if (gpu->event[event].used == false) {
1170 dev_warn(gpu->dev, "event %u is already marked as free",
1172 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1174 gpu->event[event].used = false;
1175 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1177 complete(&gpu->event_free);
1182 * Cmdstream submission/retirement:
1185 static void retire_worker(struct work_struct *work)
1187 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1189 u32 fence = gpu->completed_fence;
1190 struct etnaviv_cmdbuf *cmdbuf, *tmp;
1193 mutex_lock(&gpu->lock);
1194 list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1195 if (!dma_fence_is_signaled(cmdbuf->fence))
1198 list_del(&cmdbuf->node);
1199 dma_fence_put(cmdbuf->fence);
1201 for (i = 0; i < cmdbuf->nr_bos; i++) {
1202 struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
1203 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
1205 atomic_dec(&etnaviv_obj->gpu_active);
1206 /* drop the refcount taken in etnaviv_gpu_submit */
1207 etnaviv_gem_mapping_unreference(mapping);
1210 etnaviv_cmdbuf_free(cmdbuf);
1212 * We need to balance the runtime PM count caused by
1213 * each submission. Upon submission, we increment
1214 * the runtime PM counter, and allocate one event.
1215 * So here, we put the runtime PM count for each
1218 pm_runtime_put_autosuspend(gpu->dev);
1221 gpu->retired_fence = fence;
1223 mutex_unlock(&gpu->lock);
1225 wake_up_all(&gpu->fence_event);
1228 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1229 u32 fence, struct timespec *timeout)
1233 if (fence_after(fence, gpu->next_fence)) {
1234 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1235 fence, gpu->next_fence);
1240 /* No timeout was requested: just test for completion */
1241 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1243 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1245 ret = wait_event_interruptible_timeout(gpu->fence_event,
1246 fence_completed(gpu, fence),
1249 DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1250 fence, gpu->retired_fence,
1251 gpu->completed_fence);
1253 } else if (ret != -ERESTARTSYS) {
1262 * Wait for an object to become inactive. This, on it's own, is not race
1263 * free: the object is moved by the retire worker off the active list, and
1264 * then the iova is put. Moreover, the object could be re-submitted just
1265 * after we notice that it's become inactive.
1267 * Although the retirement happens under the gpu lock, we don't want to hold
1268 * that lock in this function while waiting.
1270 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1271 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1273 unsigned long remaining;
1277 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1279 remaining = etnaviv_timeout_to_jiffies(timeout);
1281 ret = wait_event_interruptible_timeout(gpu->fence_event,
1282 !is_active(etnaviv_obj),
1285 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1287 /* Synchronise with the retire worker */
1288 flush_workqueue(priv->wq);
1290 } else if (ret == -ERESTARTSYS) {
1291 return -ERESTARTSYS;
1297 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1299 return pm_runtime_get_sync(gpu->dev);
1302 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1304 pm_runtime_mark_last_busy(gpu->dev);
1305 pm_runtime_put_autosuspend(gpu->dev);
1308 /* add bo's to gpu's ring, and kick gpu: */
1309 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1310 struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1312 struct dma_fence *fence;
1313 unsigned int event, i;
1316 ret = etnaviv_gpu_pm_get_sync(gpu);
1329 event = event_alloc(gpu);
1330 if (unlikely(event == ~0U)) {
1331 DRM_ERROR("no free event\n");
1336 mutex_lock(&gpu->lock);
1338 fence = etnaviv_gpu_fence_alloc(gpu);
1340 event_free(gpu, event);
1345 gpu->event[event].fence = fence;
1346 submit->fence = dma_fence_get(fence);
1347 gpu->active_fence = submit->fence->seqno;
1349 if (gpu->lastctx != cmdbuf->ctx) {
1350 gpu->mmu->need_flush = true;
1351 gpu->switch_context = true;
1352 gpu->lastctx = cmdbuf->ctx;
1355 etnaviv_buffer_queue(gpu, event, cmdbuf);
1357 cmdbuf->fence = fence;
1358 list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1360 /* We're committed to adding this command buffer, hold a PM reference */
1361 pm_runtime_get_noresume(gpu->dev);
1363 for (i = 0; i < submit->nr_bos; i++) {
1364 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1366 /* Each cmdbuf takes a refcount on the mapping */
1367 etnaviv_gem_mapping_reference(submit->bos[i].mapping);
1368 cmdbuf->bo_map[i] = submit->bos[i].mapping;
1369 atomic_inc(&etnaviv_obj->gpu_active);
1371 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1372 reservation_object_add_excl_fence(etnaviv_obj->resv,
1375 reservation_object_add_shared_fence(etnaviv_obj->resv,
1378 cmdbuf->nr_bos = submit->nr_bos;
1379 hangcheck_timer_reset(gpu);
1383 mutex_unlock(&gpu->lock);
1386 etnaviv_gpu_pm_put(gpu);
1394 static irqreturn_t irq_handler(int irq, void *data)
1396 struct etnaviv_gpu *gpu = data;
1397 irqreturn_t ret = IRQ_NONE;
1399 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1404 pm_runtime_mark_last_busy(gpu->dev);
1406 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1408 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1409 dev_err(gpu->dev, "AXI bus error\n");
1410 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1413 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1416 dev_err_ratelimited(gpu->dev,
1417 "MMU fault status 0x%08x\n",
1418 gpu_read(gpu, VIVS_MMUv2_STATUS));
1419 for (i = 0; i < 4; i++) {
1420 dev_err_ratelimited(gpu->dev,
1421 "MMU %d fault addr 0x%08x\n",
1423 VIVS_MMUv2_EXCEPTION_ADDR(i)));
1425 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1428 while ((event = ffs(intr)) != 0) {
1429 struct dma_fence *fence;
1433 intr &= ~(1 << event);
1435 dev_dbg(gpu->dev, "event %u\n", event);
1437 fence = gpu->event[event].fence;
1438 gpu->event[event].fence = NULL;
1439 dma_fence_signal(fence);
1442 * Events can be processed out of order. Eg,
1443 * - allocate and queue event 0
1444 * - allocate event 1
1445 * - event 0 completes, we process it
1446 * - allocate and queue event 0
1447 * - event 1 and event 0 complete
1448 * we can end up processing event 0 first, then 1.
1450 if (fence_after(fence->seqno, gpu->completed_fence))
1451 gpu->completed_fence = fence->seqno;
1453 event_free(gpu, event);
1456 /* Retire the buffer objects in a work */
1457 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1465 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1470 ret = clk_prepare_enable(gpu->clk_bus);
1475 if (gpu->clk_core) {
1476 ret = clk_prepare_enable(gpu->clk_core);
1478 goto disable_clk_bus;
1481 if (gpu->clk_shader) {
1482 ret = clk_prepare_enable(gpu->clk_shader);
1484 goto disable_clk_core;
1491 clk_disable_unprepare(gpu->clk_core);
1494 clk_disable_unprepare(gpu->clk_bus);
1499 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1501 if (gpu->clk_shader)
1502 clk_disable_unprepare(gpu->clk_shader);
1504 clk_disable_unprepare(gpu->clk_core);
1506 clk_disable_unprepare(gpu->clk_bus);
1511 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1513 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1516 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1518 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1521 if (time_is_before_jiffies(timeout)) {
1523 "timed out waiting for idle: idle=0x%x\n",
1532 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1535 /* Replace the last WAIT with END */
1536 etnaviv_buffer_end(gpu);
1539 * We know that only the FE is busy here, this should
1540 * happen quickly (as the WAIT is only 200 cycles). If
1541 * we fail, just warn and continue.
1543 etnaviv_gpu_wait_idle(gpu, 100);
1546 return etnaviv_gpu_clk_disable(gpu);
1550 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1554 ret = mutex_lock_killable(&gpu->lock);
1558 etnaviv_gpu_update_clock(gpu);
1559 etnaviv_gpu_hw_init(gpu);
1561 gpu->switch_context = true;
1562 gpu->exec_state = -1;
1564 mutex_unlock(&gpu->lock);
1571 etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1572 unsigned long *state)
1580 etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1581 unsigned long *state)
1583 struct etnaviv_gpu *gpu = cdev->devdata;
1585 *state = gpu->freq_scale;
1591 etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1592 unsigned long state)
1594 struct etnaviv_gpu *gpu = cdev->devdata;
1596 mutex_lock(&gpu->lock);
1597 gpu->freq_scale = state;
1598 if (!pm_runtime_suspended(gpu->dev))
1599 etnaviv_gpu_update_clock(gpu);
1600 mutex_unlock(&gpu->lock);
1605 static struct thermal_cooling_device_ops cooling_ops = {
1606 .get_max_state = etnaviv_gpu_cooling_get_max_state,
1607 .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1608 .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1611 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1614 struct drm_device *drm = data;
1615 struct etnaviv_drm_private *priv = drm->dev_private;
1616 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1619 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1620 (char *)dev_name(dev), gpu, &cooling_ops);
1621 if (IS_ERR(gpu->cooling))
1622 return PTR_ERR(gpu->cooling);
1625 ret = pm_runtime_get_sync(gpu->dev);
1627 ret = etnaviv_gpu_clk_enable(gpu);
1630 thermal_cooling_device_unregister(gpu->cooling);
1635 gpu->fence_context = dma_fence_context_alloc(1);
1636 spin_lock_init(&gpu->fence_spinlock);
1638 INIT_LIST_HEAD(&gpu->active_cmd_list);
1639 INIT_WORK(&gpu->retire_work, retire_worker);
1640 INIT_WORK(&gpu->recover_work, recover_worker);
1641 init_waitqueue_head(&gpu->fence_event);
1643 setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
1644 (unsigned long)gpu);
1646 priv->gpu[priv->num_gpus++] = gpu;
1648 pm_runtime_mark_last_busy(gpu->dev);
1649 pm_runtime_put_autosuspend(gpu->dev);
1654 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1657 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1659 DBG("%s", dev_name(gpu->dev));
1661 hangcheck_disable(gpu);
1664 pm_runtime_get_sync(gpu->dev);
1665 pm_runtime_put_sync_suspend(gpu->dev);
1667 etnaviv_gpu_hw_suspend(gpu);
1671 etnaviv_cmdbuf_free(gpu->buffer);
1675 if (gpu->cmdbuf_suballoc) {
1676 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
1677 gpu->cmdbuf_suballoc = NULL;
1681 etnaviv_iommu_destroy(gpu->mmu);
1687 thermal_cooling_device_unregister(gpu->cooling);
1688 gpu->cooling = NULL;
1691 static const struct component_ops gpu_ops = {
1692 .bind = etnaviv_gpu_bind,
1693 .unbind = etnaviv_gpu_unbind,
1696 static const struct of_device_id etnaviv_gpu_match[] = {
1698 .compatible = "vivante,gc"
1703 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1705 struct device *dev = &pdev->dev;
1706 struct etnaviv_gpu *gpu;
1709 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1713 gpu->dev = &pdev->dev;
1714 mutex_init(&gpu->lock);
1716 /* Map registers: */
1717 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1718 if (IS_ERR(gpu->mmio))
1719 return PTR_ERR(gpu->mmio);
1721 /* Get Interrupt: */
1722 gpu->irq = platform_get_irq(pdev, 0);
1724 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1728 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1729 dev_name(gpu->dev), gpu);
1731 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1736 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1737 DBG("clk_bus: %p", gpu->clk_bus);
1738 if (IS_ERR(gpu->clk_bus))
1739 gpu->clk_bus = NULL;
1741 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1742 DBG("clk_core: %p", gpu->clk_core);
1743 if (IS_ERR(gpu->clk_core))
1744 gpu->clk_core = NULL;
1746 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1747 DBG("clk_shader: %p", gpu->clk_shader);
1748 if (IS_ERR(gpu->clk_shader))
1749 gpu->clk_shader = NULL;
1751 /* TODO: figure out max mapped size */
1752 dev_set_drvdata(dev, gpu);
1755 * We treat the device as initially suspended. The runtime PM
1756 * autosuspend delay is rather arbitary: no measurements have
1757 * yet been performed to determine an appropriate value.
1759 pm_runtime_use_autosuspend(gpu->dev);
1760 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1761 pm_runtime_enable(gpu->dev);
1763 err = component_add(&pdev->dev, &gpu_ops);
1765 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1772 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1774 component_del(&pdev->dev, &gpu_ops);
1775 pm_runtime_disable(&pdev->dev);
1780 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1782 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1785 /* If we have outstanding fences, we're not idle */
1786 if (gpu->completed_fence != gpu->active_fence)
1789 /* Check whether the hardware (except FE) is idle */
1790 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1791 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1795 return etnaviv_gpu_hw_suspend(gpu);
1798 static int etnaviv_gpu_rpm_resume(struct device *dev)
1800 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1803 ret = etnaviv_gpu_clk_enable(gpu);
1807 /* Re-initialise the basic hardware state */
1808 if (gpu->drm && gpu->buffer) {
1809 ret = etnaviv_gpu_hw_resume(gpu);
1811 etnaviv_gpu_clk_disable(gpu);
1820 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1821 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1825 struct platform_driver etnaviv_gpu_driver = {
1827 .name = "etnaviv-gpu",
1828 .owner = THIS_MODULE,
1829 .pm = &etnaviv_gpu_pm_ops,
1830 .of_match_table = etnaviv_gpu_match,
1832 .probe = etnaviv_gpu_platform_probe,
1833 .remove = etnaviv_gpu_platform_remove,
1834 .id_table = gpu_ids,