drm/etnaviv: add helper to extract bitfields
[linux-2.6-block.git] / drivers / gpu / drm / etnaviv / etnaviv_gpu.c
1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/component.h>
18 #include <linux/fence.h>
19 #include <linux/moduleparam.h>
20 #include <linux/of_device.h>
21 #include "etnaviv_dump.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_gem.h"
24 #include "etnaviv_mmu.h"
25 #include "etnaviv_iommu.h"
26 #include "etnaviv_iommu_v2.h"
27 #include "common.xml.h"
28 #include "state.xml.h"
29 #include "state_hi.xml.h"
30 #include "cmdstream.xml.h"
31
32 static const struct platform_device_id gpu_ids[] = {
33         { .name = "etnaviv-gpu,2d" },
34         { },
35 };
36
37 static bool etnaviv_dump_core = true;
38 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
39
40 /*
41  * Driver functions:
42  */
43
44 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
45 {
46         switch (param) {
47         case ETNAVIV_PARAM_GPU_MODEL:
48                 *value = gpu->identity.model;
49                 break;
50
51         case ETNAVIV_PARAM_GPU_REVISION:
52                 *value = gpu->identity.revision;
53                 break;
54
55         case ETNAVIV_PARAM_GPU_FEATURES_0:
56                 *value = gpu->identity.features;
57                 break;
58
59         case ETNAVIV_PARAM_GPU_FEATURES_1:
60                 *value = gpu->identity.minor_features0;
61                 break;
62
63         case ETNAVIV_PARAM_GPU_FEATURES_2:
64                 *value = gpu->identity.minor_features1;
65                 break;
66
67         case ETNAVIV_PARAM_GPU_FEATURES_3:
68                 *value = gpu->identity.minor_features2;
69                 break;
70
71         case ETNAVIV_PARAM_GPU_FEATURES_4:
72                 *value = gpu->identity.minor_features3;
73                 break;
74
75         case ETNAVIV_PARAM_GPU_STREAM_COUNT:
76                 *value = gpu->identity.stream_count;
77                 break;
78
79         case ETNAVIV_PARAM_GPU_REGISTER_MAX:
80                 *value = gpu->identity.register_max;
81                 break;
82
83         case ETNAVIV_PARAM_GPU_THREAD_COUNT:
84                 *value = gpu->identity.thread_count;
85                 break;
86
87         case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
88                 *value = gpu->identity.vertex_cache_size;
89                 break;
90
91         case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
92                 *value = gpu->identity.shader_core_count;
93                 break;
94
95         case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
96                 *value = gpu->identity.pixel_pipes;
97                 break;
98
99         case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
100                 *value = gpu->identity.vertex_output_buffer_size;
101                 break;
102
103         case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
104                 *value = gpu->identity.buffer_size;
105                 break;
106
107         case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
108                 *value = gpu->identity.instruction_count;
109                 break;
110
111         case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
112                 *value = gpu->identity.num_constants;
113                 break;
114
115         default:
116                 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
117                 return -EINVAL;
118         }
119
120         return 0;
121 }
122
123 #define etnaviv_field(val, field) \
124         (((val) & field##__MASK) >> field##__SHIFT)
125
126 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
127 {
128         if (gpu->identity.minor_features0 &
129             chipMinorFeatures0_MORE_MINOR_FEATURES) {
130                 u32 specs[2];
131
132                 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
133                 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
134
135                 gpu->identity.stream_count = etnaviv_field(specs[0],
136                                         VIVS_HI_CHIP_SPECS_STREAM_COUNT);
137                 gpu->identity.register_max = etnaviv_field(specs[0],
138                                         VIVS_HI_CHIP_SPECS_REGISTER_MAX);
139                 gpu->identity.thread_count = etnaviv_field(specs[0],
140                                         VIVS_HI_CHIP_SPECS_THREAD_COUNT);
141                 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
142                                         VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
143                 gpu->identity.shader_core_count = etnaviv_field(specs[0],
144                                         VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
145                 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
146                                         VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
147                 gpu->identity.vertex_output_buffer_size =
148                         etnaviv_field(specs[0],
149                                 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
150
151                 gpu->identity.buffer_size = etnaviv_field(specs[1],
152                                         VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
153                 gpu->identity.instruction_count = etnaviv_field(specs[1],
154                                         VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
155                 gpu->identity.num_constants = etnaviv_field(specs[1],
156                                         VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
157         }
158
159         /* Fill in the stream count if not specified */
160         if (gpu->identity.stream_count == 0) {
161                 if (gpu->identity.model >= 0x1000)
162                         gpu->identity.stream_count = 4;
163                 else
164                         gpu->identity.stream_count = 1;
165         }
166
167         /* Convert the register max value */
168         if (gpu->identity.register_max)
169                 gpu->identity.register_max = 1 << gpu->identity.register_max;
170         else if (gpu->identity.model == chipModel_GC400)
171                 gpu->identity.register_max = 32;
172         else
173                 gpu->identity.register_max = 64;
174
175         /* Convert thread count */
176         if (gpu->identity.thread_count)
177                 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
178         else if (gpu->identity.model == chipModel_GC400)
179                 gpu->identity.thread_count = 64;
180         else if (gpu->identity.model == chipModel_GC500 ||
181                  gpu->identity.model == chipModel_GC530)
182                 gpu->identity.thread_count = 128;
183         else
184                 gpu->identity.thread_count = 256;
185
186         if (gpu->identity.vertex_cache_size == 0)
187                 gpu->identity.vertex_cache_size = 8;
188
189         if (gpu->identity.shader_core_count == 0) {
190                 if (gpu->identity.model >= 0x1000)
191                         gpu->identity.shader_core_count = 2;
192                 else
193                         gpu->identity.shader_core_count = 1;
194         }
195
196         if (gpu->identity.pixel_pipes == 0)
197                 gpu->identity.pixel_pipes = 1;
198
199         /* Convert virtex buffer size */
200         if (gpu->identity.vertex_output_buffer_size) {
201                 gpu->identity.vertex_output_buffer_size =
202                         1 << gpu->identity.vertex_output_buffer_size;
203         } else if (gpu->identity.model == chipModel_GC400) {
204                 if (gpu->identity.revision < 0x4000)
205                         gpu->identity.vertex_output_buffer_size = 512;
206                 else if (gpu->identity.revision < 0x4200)
207                         gpu->identity.vertex_output_buffer_size = 256;
208                 else
209                         gpu->identity.vertex_output_buffer_size = 128;
210         } else {
211                 gpu->identity.vertex_output_buffer_size = 512;
212         }
213
214         switch (gpu->identity.instruction_count) {
215         case 0:
216                 if ((gpu->identity.model == chipModel_GC2000 &&
217                      gpu->identity.revision == 0x5108) ||
218                     gpu->identity.model == chipModel_GC880)
219                         gpu->identity.instruction_count = 512;
220                 else
221                         gpu->identity.instruction_count = 256;
222                 break;
223
224         case 1:
225                 gpu->identity.instruction_count = 1024;
226                 break;
227
228         case 2:
229                 gpu->identity.instruction_count = 2048;
230                 break;
231
232         default:
233                 gpu->identity.instruction_count = 256;
234                 break;
235         }
236
237         if (gpu->identity.num_constants == 0)
238                 gpu->identity.num_constants = 168;
239 }
240
241 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
242 {
243         u32 chipIdentity;
244
245         chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
246
247         /* Special case for older graphic cores. */
248         if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
249                 gpu->identity.model    = chipModel_GC500;
250                 gpu->identity.revision = etnaviv_field(chipIdentity,
251                                          VIVS_HI_CHIP_IDENTITY_REVISION);
252         } else {
253
254                 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
255                 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
256
257                 /*
258                  * !!!! HACK ALERT !!!!
259                  * Because people change device IDs without letting software
260                  * know about it - here is the hack to make it all look the
261                  * same.  Only for GC400 family.
262                  */
263                 if ((gpu->identity.model & 0xff00) == 0x0400 &&
264                     gpu->identity.model != chipModel_GC420) {
265                         gpu->identity.model = gpu->identity.model & 0x0400;
266                 }
267
268                 /* Another special case */
269                 if (gpu->identity.model == chipModel_GC300 &&
270                     gpu->identity.revision == 0x2201) {
271                         u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
272                         u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
273
274                         if (chipDate == 0x20080814 && chipTime == 0x12051100) {
275                                 /*
276                                  * This IP has an ECO; put the correct
277                                  * revision in it.
278                                  */
279                                 gpu->identity.revision = 0x1051;
280                         }
281                 }
282         }
283
284         dev_info(gpu->dev, "model: GC%x, revision: %x\n",
285                  gpu->identity.model, gpu->identity.revision);
286
287         gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
288
289         /* Disable fast clear on GC700. */
290         if (gpu->identity.model == chipModel_GC700)
291                 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
292
293         if ((gpu->identity.model == chipModel_GC500 &&
294              gpu->identity.revision < 2) ||
295             (gpu->identity.model == chipModel_GC300 &&
296              gpu->identity.revision < 0x2000)) {
297
298                 /*
299                  * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
300                  * registers.
301                  */
302                 gpu->identity.minor_features0 = 0;
303                 gpu->identity.minor_features1 = 0;
304                 gpu->identity.minor_features2 = 0;
305                 gpu->identity.minor_features3 = 0;
306         } else
307                 gpu->identity.minor_features0 =
308                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
309
310         if (gpu->identity.minor_features0 &
311             chipMinorFeatures0_MORE_MINOR_FEATURES) {
312                 gpu->identity.minor_features1 =
313                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
314                 gpu->identity.minor_features2 =
315                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
316                 gpu->identity.minor_features3 =
317                                 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
318         }
319
320         /* GC600 idle register reports zero bits where modules aren't present */
321         if (gpu->identity.model == chipModel_GC600) {
322                 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
323                                  VIVS_HI_IDLE_STATE_RA |
324                                  VIVS_HI_IDLE_STATE_SE |
325                                  VIVS_HI_IDLE_STATE_PA |
326                                  VIVS_HI_IDLE_STATE_SH |
327                                  VIVS_HI_IDLE_STATE_PE |
328                                  VIVS_HI_IDLE_STATE_DE |
329                                  VIVS_HI_IDLE_STATE_FE;
330         } else {
331                 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
332         }
333
334         etnaviv_hw_specs(gpu);
335 }
336
337 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
338 {
339         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
340                   VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
341         gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
342 }
343
344 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
345 {
346         u32 control, idle;
347         unsigned long timeout;
348         bool failed = true;
349
350         /* TODO
351          *
352          * - clock gating
353          * - puls eater
354          * - what about VG?
355          */
356
357         /* We hope that the GPU resets in under one second */
358         timeout = jiffies + msecs_to_jiffies(1000);
359
360         while (time_is_after_jiffies(timeout)) {
361                 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
362                           VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
363
364                 /* enable clock */
365                 etnaviv_gpu_load_clock(gpu, control);
366
367                 /* Wait for stable clock.  Vivante's code waited for 1ms */
368                 usleep_range(1000, 10000);
369
370                 /* isolate the GPU. */
371                 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
372                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
373
374                 /* set soft reset. */
375                 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
376                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
377
378                 /* wait for reset. */
379                 msleep(1);
380
381                 /* reset soft reset bit. */
382                 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
383                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
384
385                 /* reset GPU isolation. */
386                 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
387                 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
388
389                 /* read idle register. */
390                 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
391
392                 /* try reseting again if FE it not idle */
393                 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
394                         dev_dbg(gpu->dev, "FE is not idle\n");
395                         continue;
396                 }
397
398                 /* read reset register. */
399                 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
400
401                 /* is the GPU idle? */
402                 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
403                     ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
404                         dev_dbg(gpu->dev, "GPU is not idle\n");
405                         continue;
406                 }
407
408                 failed = false;
409                 break;
410         }
411
412         if (failed) {
413                 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
414                 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
415
416                 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
417                         idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
418                         control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
419                         control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
420
421                 return -EBUSY;
422         }
423
424         /* We rely on the GPU running, so program the clock */
425         control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
426                   VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
427
428         /* enable clock */
429         etnaviv_gpu_load_clock(gpu, control);
430
431         return 0;
432 }
433
434 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
435 {
436         u16 prefetch;
437
438         if (gpu->identity.model == chipModel_GC320 &&
439             gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
440             (gpu->identity.revision == 0x5007 ||
441              gpu->identity.revision == 0x5220)) {
442                 u32 mc_memory_debug;
443
444                 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
445
446                 if (gpu->identity.revision == 0x5007)
447                         mc_memory_debug |= 0x0c;
448                 else
449                         mc_memory_debug |= 0x08;
450
451                 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
452         }
453
454         /*
455          * Update GPU AXI cache atttribute to "cacheable, no allocate".
456          * This is necessary to prevent the iMX6 SoC locking up.
457          */
458         gpu_write(gpu, VIVS_HI_AXI_CONFIG,
459                   VIVS_HI_AXI_CONFIG_AWCACHE(2) |
460                   VIVS_HI_AXI_CONFIG_ARCACHE(2));
461
462         /* GC2000 rev 5108 needs a special bus config */
463         if (gpu->identity.model == chipModel_GC2000 &&
464             gpu->identity.revision == 0x5108) {
465                 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
466                 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
467                                 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
468                 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
469                               VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
470                 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
471         }
472
473         /* set base addresses */
474         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
475         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
476         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
477         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
478         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
479
480         /* setup the MMU page table pointers */
481         etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
482
483         /* Start command processor */
484         prefetch = etnaviv_buffer_init(gpu);
485
486         gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
487         gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
488                   gpu->buffer->paddr - gpu->memory_base);
489         gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
490                   VIVS_FE_COMMAND_CONTROL_ENABLE |
491                   VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
492 }
493
494 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
495 {
496         int ret, i;
497         struct iommu_domain *iommu;
498         enum etnaviv_iommu_version version;
499         bool mmuv2;
500
501         ret = pm_runtime_get_sync(gpu->dev);
502         if (ret < 0)
503                 return ret;
504
505         etnaviv_hw_identify(gpu);
506
507         if (gpu->identity.model == 0) {
508                 dev_err(gpu->dev, "Unknown GPU model\n");
509                 ret = -ENXIO;
510                 goto fail;
511         }
512
513         /* Exclude VG cores with FE2.0 */
514         if (gpu->identity.features & chipFeatures_PIPE_VG &&
515             gpu->identity.features & chipFeatures_FE20) {
516                 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
517                 ret = -ENXIO;
518                 goto fail;
519         }
520
521         ret = etnaviv_hw_reset(gpu);
522         if (ret)
523                 goto fail;
524
525         /* Setup IOMMU.. eventually we will (I think) do this once per context
526          * and have separate page tables per context.  For now, to keep things
527          * simple and to get something working, just use a single address space:
528          */
529         mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
530         dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
531
532         if (!mmuv2) {
533                 iommu = etnaviv_iommu_domain_alloc(gpu);
534                 version = ETNAVIV_IOMMU_V1;
535         } else {
536                 iommu = etnaviv_iommu_v2_domain_alloc(gpu);
537                 version = ETNAVIV_IOMMU_V2;
538         }
539
540         if (!iommu) {
541                 ret = -ENOMEM;
542                 goto fail;
543         }
544
545         /* TODO: we will leak here memory - fix it! */
546
547         gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
548         if (!gpu->mmu) {
549                 ret = -ENOMEM;
550                 goto fail;
551         }
552
553         /* Create buffer: */
554         gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
555         if (!gpu->buffer) {
556                 ret = -ENOMEM;
557                 dev_err(gpu->dev, "could not create command buffer\n");
558                 goto fail;
559         }
560         if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
561                 ret = -EINVAL;
562                 dev_err(gpu->dev,
563                         "command buffer outside valid memory window\n");
564                 goto free_buffer;
565         }
566
567         /* Setup event management */
568         spin_lock_init(&gpu->event_spinlock);
569         init_completion(&gpu->event_free);
570         for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
571                 gpu->event[i].used = false;
572                 complete(&gpu->event_free);
573         }
574
575         /* Now program the hardware */
576         mutex_lock(&gpu->lock);
577         etnaviv_gpu_hw_init(gpu);
578         mutex_unlock(&gpu->lock);
579
580         pm_runtime_mark_last_busy(gpu->dev);
581         pm_runtime_put_autosuspend(gpu->dev);
582
583         return 0;
584
585 free_buffer:
586         etnaviv_gpu_cmdbuf_free(gpu->buffer);
587         gpu->buffer = NULL;
588 fail:
589         pm_runtime_mark_last_busy(gpu->dev);
590         pm_runtime_put_autosuspend(gpu->dev);
591
592         return ret;
593 }
594
595 #ifdef CONFIG_DEBUG_FS
596 struct dma_debug {
597         u32 address[2];
598         u32 state[2];
599 };
600
601 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
602 {
603         u32 i;
604
605         debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
606         debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
607
608         for (i = 0; i < 500; i++) {
609                 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
610                 debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
611
612                 if (debug->address[0] != debug->address[1])
613                         break;
614
615                 if (debug->state[0] != debug->state[1])
616                         break;
617         }
618 }
619
620 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
621 {
622         struct dma_debug debug;
623         u32 dma_lo, dma_hi, axi, idle;
624         int ret;
625
626         seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
627
628         ret = pm_runtime_get_sync(gpu->dev);
629         if (ret < 0)
630                 return ret;
631
632         dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
633         dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
634         axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
635         idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
636
637         verify_dma(gpu, &debug);
638
639         seq_puts(m, "\tfeatures\n");
640         seq_printf(m, "\t minor_features0: 0x%08x\n",
641                    gpu->identity.minor_features0);
642         seq_printf(m, "\t minor_features1: 0x%08x\n",
643                    gpu->identity.minor_features1);
644         seq_printf(m, "\t minor_features2: 0x%08x\n",
645                    gpu->identity.minor_features2);
646         seq_printf(m, "\t minor_features3: 0x%08x\n",
647                    gpu->identity.minor_features3);
648
649         seq_puts(m, "\tspecs\n");
650         seq_printf(m, "\t stream_count:  %d\n",
651                         gpu->identity.stream_count);
652         seq_printf(m, "\t register_max: %d\n",
653                         gpu->identity.register_max);
654         seq_printf(m, "\t thread_count: %d\n",
655                         gpu->identity.thread_count);
656         seq_printf(m, "\t vertex_cache_size: %d\n",
657                         gpu->identity.vertex_cache_size);
658         seq_printf(m, "\t shader_core_count: %d\n",
659                         gpu->identity.shader_core_count);
660         seq_printf(m, "\t pixel_pipes: %d\n",
661                         gpu->identity.pixel_pipes);
662         seq_printf(m, "\t vertex_output_buffer_size: %d\n",
663                         gpu->identity.vertex_output_buffer_size);
664         seq_printf(m, "\t buffer_size: %d\n",
665                         gpu->identity.buffer_size);
666         seq_printf(m, "\t instruction_count: %d\n",
667                         gpu->identity.instruction_count);
668         seq_printf(m, "\t num_constants: %d\n",
669                         gpu->identity.num_constants);
670
671         seq_printf(m, "\taxi: 0x%08x\n", axi);
672         seq_printf(m, "\tidle: 0x%08x\n", idle);
673         idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
674         if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
675                 seq_puts(m, "\t FE is not idle\n");
676         if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
677                 seq_puts(m, "\t DE is not idle\n");
678         if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
679                 seq_puts(m, "\t PE is not idle\n");
680         if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
681                 seq_puts(m, "\t SH is not idle\n");
682         if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
683                 seq_puts(m, "\t PA is not idle\n");
684         if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
685                 seq_puts(m, "\t SE is not idle\n");
686         if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
687                 seq_puts(m, "\t RA is not idle\n");
688         if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
689                 seq_puts(m, "\t TX is not idle\n");
690         if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
691                 seq_puts(m, "\t VG is not idle\n");
692         if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
693                 seq_puts(m, "\t IM is not idle\n");
694         if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
695                 seq_puts(m, "\t FP is not idle\n");
696         if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
697                 seq_puts(m, "\t TS is not idle\n");
698         if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
699                 seq_puts(m, "\t AXI low power mode\n");
700
701         if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
702                 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
703                 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
704                 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
705
706                 seq_puts(m, "\tMC\n");
707                 seq_printf(m, "\t read0: 0x%08x\n", read0);
708                 seq_printf(m, "\t read1: 0x%08x\n", read1);
709                 seq_printf(m, "\t write: 0x%08x\n", write);
710         }
711
712         seq_puts(m, "\tDMA ");
713
714         if (debug.address[0] == debug.address[1] &&
715             debug.state[0] == debug.state[1]) {
716                 seq_puts(m, "seems to be stuck\n");
717         } else if (debug.address[0] == debug.address[1]) {
718                 seq_puts(m, "adress is constant\n");
719         } else {
720                 seq_puts(m, "is runing\n");
721         }
722
723         seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
724         seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
725         seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
726         seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
727         seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
728                    dma_lo, dma_hi);
729
730         ret = 0;
731
732         pm_runtime_mark_last_busy(gpu->dev);
733         pm_runtime_put_autosuspend(gpu->dev);
734
735         return ret;
736 }
737 #endif
738
739 /*
740  * Power Management:
741  */
742 static int enable_clk(struct etnaviv_gpu *gpu)
743 {
744         if (gpu->clk_core)
745                 clk_prepare_enable(gpu->clk_core);
746         if (gpu->clk_shader)
747                 clk_prepare_enable(gpu->clk_shader);
748
749         return 0;
750 }
751
752 static int disable_clk(struct etnaviv_gpu *gpu)
753 {
754         if (gpu->clk_core)
755                 clk_disable_unprepare(gpu->clk_core);
756         if (gpu->clk_shader)
757                 clk_disable_unprepare(gpu->clk_shader);
758
759         return 0;
760 }
761
762 static int enable_axi(struct etnaviv_gpu *gpu)
763 {
764         if (gpu->clk_bus)
765                 clk_prepare_enable(gpu->clk_bus);
766
767         return 0;
768 }
769
770 static int disable_axi(struct etnaviv_gpu *gpu)
771 {
772         if (gpu->clk_bus)
773                 clk_disable_unprepare(gpu->clk_bus);
774
775         return 0;
776 }
777
778 /*
779  * Hangcheck detection for locked gpu:
780  */
781 static void recover_worker(struct work_struct *work)
782 {
783         struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
784                                                recover_work);
785         unsigned long flags;
786         unsigned int i;
787
788         dev_err(gpu->dev, "hangcheck recover!\n");
789
790         if (pm_runtime_get_sync(gpu->dev) < 0)
791                 return;
792
793         mutex_lock(&gpu->lock);
794
795         /* Only catch the first event, or when manually re-armed */
796         if (etnaviv_dump_core) {
797                 etnaviv_core_dump(gpu);
798                 etnaviv_dump_core = false;
799         }
800
801         etnaviv_hw_reset(gpu);
802
803         /* complete all events, the GPU won't do it after the reset */
804         spin_lock_irqsave(&gpu->event_spinlock, flags);
805         for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
806                 if (!gpu->event[i].used)
807                         continue;
808                 fence_signal(gpu->event[i].fence);
809                 gpu->event[i].fence = NULL;
810                 gpu->event[i].used = false;
811                 complete(&gpu->event_free);
812                 /*
813                  * Decrement the PM count for each stuck event. This is safe
814                  * even in atomic context as we use ASYNC RPM here.
815                  */
816                 pm_runtime_put_autosuspend(gpu->dev);
817         }
818         spin_unlock_irqrestore(&gpu->event_spinlock, flags);
819         gpu->completed_fence = gpu->active_fence;
820
821         etnaviv_gpu_hw_init(gpu);
822         gpu->switch_context = true;
823
824         mutex_unlock(&gpu->lock);
825         pm_runtime_mark_last_busy(gpu->dev);
826         pm_runtime_put_autosuspend(gpu->dev);
827
828         /* Retire the buffer objects in a work */
829         etnaviv_queue_work(gpu->drm, &gpu->retire_work);
830 }
831
832 static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
833 {
834         DBG("%s", dev_name(gpu->dev));
835         mod_timer(&gpu->hangcheck_timer,
836                   round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
837 }
838
839 static void hangcheck_handler(unsigned long data)
840 {
841         struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
842         u32 fence = gpu->completed_fence;
843         bool progress = false;
844
845         if (fence != gpu->hangcheck_fence) {
846                 gpu->hangcheck_fence = fence;
847                 progress = true;
848         }
849
850         if (!progress) {
851                 u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
852                 int change = dma_addr - gpu->hangcheck_dma_addr;
853
854                 if (change < 0 || change > 16) {
855                         gpu->hangcheck_dma_addr = dma_addr;
856                         progress = true;
857                 }
858         }
859
860         if (!progress && fence_after(gpu->active_fence, fence)) {
861                 dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
862                 dev_err(gpu->dev, "     completed fence: %u\n", fence);
863                 dev_err(gpu->dev, "     active fence: %u\n",
864                         gpu->active_fence);
865                 etnaviv_queue_work(gpu->drm, &gpu->recover_work);
866         }
867
868         /* if still more pending work, reset the hangcheck timer: */
869         if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
870                 hangcheck_timer_reset(gpu);
871 }
872
873 static void hangcheck_disable(struct etnaviv_gpu *gpu)
874 {
875         del_timer_sync(&gpu->hangcheck_timer);
876         cancel_work_sync(&gpu->recover_work);
877 }
878
879 /* fence object management */
880 struct etnaviv_fence {
881         struct etnaviv_gpu *gpu;
882         struct fence base;
883 };
884
885 static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
886 {
887         return container_of(fence, struct etnaviv_fence, base);
888 }
889
890 static const char *etnaviv_fence_get_driver_name(struct fence *fence)
891 {
892         return "etnaviv";
893 }
894
895 static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
896 {
897         struct etnaviv_fence *f = to_etnaviv_fence(fence);
898
899         return dev_name(f->gpu->dev);
900 }
901
902 static bool etnaviv_fence_enable_signaling(struct fence *fence)
903 {
904         return true;
905 }
906
907 static bool etnaviv_fence_signaled(struct fence *fence)
908 {
909         struct etnaviv_fence *f = to_etnaviv_fence(fence);
910
911         return fence_completed(f->gpu, f->base.seqno);
912 }
913
914 static void etnaviv_fence_release(struct fence *fence)
915 {
916         struct etnaviv_fence *f = to_etnaviv_fence(fence);
917
918         kfree_rcu(f, base.rcu);
919 }
920
921 static const struct fence_ops etnaviv_fence_ops = {
922         .get_driver_name = etnaviv_fence_get_driver_name,
923         .get_timeline_name = etnaviv_fence_get_timeline_name,
924         .enable_signaling = etnaviv_fence_enable_signaling,
925         .signaled = etnaviv_fence_signaled,
926         .wait = fence_default_wait,
927         .release = etnaviv_fence_release,
928 };
929
930 static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
931 {
932         struct etnaviv_fence *f;
933
934         f = kzalloc(sizeof(*f), GFP_KERNEL);
935         if (!f)
936                 return NULL;
937
938         f->gpu = gpu;
939
940         fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
941                    gpu->fence_context, ++gpu->next_fence);
942
943         return &f->base;
944 }
945
946 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
947         unsigned int context, bool exclusive)
948 {
949         struct reservation_object *robj = etnaviv_obj->resv;
950         struct reservation_object_list *fobj;
951         struct fence *fence;
952         int i, ret;
953
954         if (!exclusive) {
955                 ret = reservation_object_reserve_shared(robj);
956                 if (ret)
957                         return ret;
958         }
959
960         /*
961          * If we have any shared fences, then the exclusive fence
962          * should be ignored as it will already have been signalled.
963          */
964         fobj = reservation_object_get_list(robj);
965         if (!fobj || fobj->shared_count == 0) {
966                 /* Wait on any existing exclusive fence which isn't our own */
967                 fence = reservation_object_get_excl(robj);
968                 if (fence && fence->context != context) {
969                         ret = fence_wait(fence, true);
970                         if (ret)
971                                 return ret;
972                 }
973         }
974
975         if (!exclusive || !fobj)
976                 return 0;
977
978         for (i = 0; i < fobj->shared_count; i++) {
979                 fence = rcu_dereference_protected(fobj->shared[i],
980                                                 reservation_object_held(robj));
981                 if (fence->context != context) {
982                         ret = fence_wait(fence, true);
983                         if (ret)
984                                 return ret;
985                 }
986         }
987
988         return 0;
989 }
990
991 /*
992  * event management:
993  */
994
995 static unsigned int event_alloc(struct etnaviv_gpu *gpu)
996 {
997         unsigned long ret, flags;
998         unsigned int i, event = ~0U;
999
1000         ret = wait_for_completion_timeout(&gpu->event_free,
1001                                           msecs_to_jiffies(10 * 10000));
1002         if (!ret)
1003                 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1004
1005         spin_lock_irqsave(&gpu->event_spinlock, flags);
1006
1007         /* find first free event */
1008         for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
1009                 if (gpu->event[i].used == false) {
1010                         gpu->event[i].used = true;
1011                         event = i;
1012                         break;
1013                 }
1014         }
1015
1016         spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1017
1018         return event;
1019 }
1020
1021 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1022 {
1023         unsigned long flags;
1024
1025         spin_lock_irqsave(&gpu->event_spinlock, flags);
1026
1027         if (gpu->event[event].used == false) {
1028                 dev_warn(gpu->dev, "event %u is already marked as free",
1029                          event);
1030                 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1031         } else {
1032                 gpu->event[event].used = false;
1033                 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1034
1035                 complete(&gpu->event_free);
1036         }
1037 }
1038
1039 /*
1040  * Cmdstream submission/retirement:
1041  */
1042
1043 struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
1044         size_t nr_bos)
1045 {
1046         struct etnaviv_cmdbuf *cmdbuf;
1047         size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
1048                                  sizeof(*cmdbuf));
1049
1050         cmdbuf = kzalloc(sz, GFP_KERNEL);
1051         if (!cmdbuf)
1052                 return NULL;
1053
1054         cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
1055                                                GFP_KERNEL);
1056         if (!cmdbuf->vaddr) {
1057                 kfree(cmdbuf);
1058                 return NULL;
1059         }
1060
1061         cmdbuf->gpu = gpu;
1062         cmdbuf->size = size;
1063
1064         return cmdbuf;
1065 }
1066
1067 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
1068 {
1069         dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
1070                               cmdbuf->vaddr, cmdbuf->paddr);
1071         kfree(cmdbuf);
1072 }
1073
1074 static void retire_worker(struct work_struct *work)
1075 {
1076         struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1077                                                retire_work);
1078         u32 fence = gpu->completed_fence;
1079         struct etnaviv_cmdbuf *cmdbuf, *tmp;
1080         unsigned int i;
1081
1082         mutex_lock(&gpu->lock);
1083         list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
1084                 if (!fence_is_signaled(cmdbuf->fence))
1085                         break;
1086
1087                 list_del(&cmdbuf->node);
1088                 fence_put(cmdbuf->fence);
1089
1090                 for (i = 0; i < cmdbuf->nr_bos; i++) {
1091                         struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
1092
1093                         atomic_dec(&etnaviv_obj->gpu_active);
1094                         /* drop the refcount taken in etnaviv_gpu_submit */
1095                         etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
1096                 }
1097
1098                 etnaviv_gpu_cmdbuf_free(cmdbuf);
1099         }
1100
1101         gpu->retired_fence = fence;
1102
1103         mutex_unlock(&gpu->lock);
1104
1105         wake_up_all(&gpu->fence_event);
1106 }
1107
1108 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1109         u32 fence, struct timespec *timeout)
1110 {
1111         int ret;
1112
1113         if (fence_after(fence, gpu->next_fence)) {
1114                 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
1115                                 fence, gpu->next_fence);
1116                 return -EINVAL;
1117         }
1118
1119         if (!timeout) {
1120                 /* No timeout was requested: just test for completion */
1121                 ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
1122         } else {
1123                 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1124
1125                 ret = wait_event_interruptible_timeout(gpu->fence_event,
1126                                                 fence_completed(gpu, fence),
1127                                                 remaining);
1128                 if (ret == 0) {
1129                         DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
1130                                 fence, gpu->retired_fence,
1131                                 gpu->completed_fence);
1132                         ret = -ETIMEDOUT;
1133                 } else if (ret != -ERESTARTSYS) {
1134                         ret = 0;
1135                 }
1136         }
1137
1138         return ret;
1139 }
1140
1141 /*
1142  * Wait for an object to become inactive.  This, on it's own, is not race
1143  * free: the object is moved by the retire worker off the active list, and
1144  * then the iova is put.  Moreover, the object could be re-submitted just
1145  * after we notice that it's become inactive.
1146  *
1147  * Although the retirement happens under the gpu lock, we don't want to hold
1148  * that lock in this function while waiting.
1149  */
1150 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1151         struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1152 {
1153         unsigned long remaining;
1154         long ret;
1155
1156         if (!timeout)
1157                 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1158
1159         remaining = etnaviv_timeout_to_jiffies(timeout);
1160
1161         ret = wait_event_interruptible_timeout(gpu->fence_event,
1162                                                !is_active(etnaviv_obj),
1163                                                remaining);
1164         if (ret > 0) {
1165                 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
1166
1167                 /* Synchronise with the retire worker */
1168                 flush_workqueue(priv->wq);
1169                 return 0;
1170         } else if (ret == -ERESTARTSYS) {
1171                 return -ERESTARTSYS;
1172         } else {
1173                 return -ETIMEDOUT;
1174         }
1175 }
1176
1177 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
1178 {
1179         return pm_runtime_get_sync(gpu->dev);
1180 }
1181
1182 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
1183 {
1184         pm_runtime_mark_last_busy(gpu->dev);
1185         pm_runtime_put_autosuspend(gpu->dev);
1186 }
1187
1188 /* add bo's to gpu's ring, and kick gpu: */
1189 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1190         struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
1191 {
1192         struct fence *fence;
1193         unsigned int event, i;
1194         int ret;
1195
1196         ret = etnaviv_gpu_pm_get_sync(gpu);
1197         if (ret < 0)
1198                 return ret;
1199
1200         mutex_lock(&gpu->lock);
1201
1202         /*
1203          * TODO
1204          *
1205          * - flush
1206          * - data endian
1207          * - prefetch
1208          *
1209          */
1210
1211         event = event_alloc(gpu);
1212         if (unlikely(event == ~0U)) {
1213                 DRM_ERROR("no free event\n");
1214                 ret = -EBUSY;
1215                 goto out_unlock;
1216         }
1217
1218         fence = etnaviv_gpu_fence_alloc(gpu);
1219         if (!fence) {
1220                 event_free(gpu, event);
1221                 ret = -ENOMEM;
1222                 goto out_unlock;
1223         }
1224
1225         gpu->event[event].fence = fence;
1226         submit->fence = fence->seqno;
1227         gpu->active_fence = submit->fence;
1228
1229         if (gpu->lastctx != cmdbuf->ctx) {
1230                 gpu->mmu->need_flush = true;
1231                 gpu->switch_context = true;
1232                 gpu->lastctx = cmdbuf->ctx;
1233         }
1234
1235         etnaviv_buffer_queue(gpu, event, cmdbuf);
1236
1237         cmdbuf->fence = fence;
1238         list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
1239
1240         /* We're committed to adding this command buffer, hold a PM reference */
1241         pm_runtime_get_noresume(gpu->dev);
1242
1243         for (i = 0; i < submit->nr_bos; i++) {
1244                 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
1245                 u32 iova;
1246
1247                 /* Each cmdbuf takes a refcount on the iova */
1248                 etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
1249                 cmdbuf->bo[i] = etnaviv_obj;
1250                 atomic_inc(&etnaviv_obj->gpu_active);
1251
1252                 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
1253                         reservation_object_add_excl_fence(etnaviv_obj->resv,
1254                                                           fence);
1255                 else
1256                         reservation_object_add_shared_fence(etnaviv_obj->resv,
1257                                                             fence);
1258         }
1259         cmdbuf->nr_bos = submit->nr_bos;
1260         hangcheck_timer_reset(gpu);
1261         ret = 0;
1262
1263 out_unlock:
1264         mutex_unlock(&gpu->lock);
1265
1266         etnaviv_gpu_pm_put(gpu);
1267
1268         return ret;
1269 }
1270
1271 /*
1272  * Init/Cleanup:
1273  */
1274 static irqreturn_t irq_handler(int irq, void *data)
1275 {
1276         struct etnaviv_gpu *gpu = data;
1277         irqreturn_t ret = IRQ_NONE;
1278
1279         u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1280
1281         if (intr != 0) {
1282                 int event;
1283
1284                 pm_runtime_mark_last_busy(gpu->dev);
1285
1286                 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1287
1288                 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1289                         dev_err(gpu->dev, "AXI bus error\n");
1290                         intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1291                 }
1292
1293                 while ((event = ffs(intr)) != 0) {
1294                         struct fence *fence;
1295
1296                         event -= 1;
1297
1298                         intr &= ~(1 << event);
1299
1300                         dev_dbg(gpu->dev, "event %u\n", event);
1301
1302                         fence = gpu->event[event].fence;
1303                         gpu->event[event].fence = NULL;
1304                         fence_signal(fence);
1305
1306                         /*
1307                          * Events can be processed out of order.  Eg,
1308                          * - allocate and queue event 0
1309                          * - allocate event 1
1310                          * - event 0 completes, we process it
1311                          * - allocate and queue event 0
1312                          * - event 1 and event 0 complete
1313                          * we can end up processing event 0 first, then 1.
1314                          */
1315                         if (fence_after(fence->seqno, gpu->completed_fence))
1316                                 gpu->completed_fence = fence->seqno;
1317
1318                         event_free(gpu, event);
1319
1320                         /*
1321                          * We need to balance the runtime PM count caused by
1322                          * each submission.  Upon submission, we increment
1323                          * the runtime PM counter, and allocate one event.
1324                          * So here, we put the runtime PM count for each
1325                          * completed event.
1326                          */
1327                         pm_runtime_put_autosuspend(gpu->dev);
1328                 }
1329
1330                 /* Retire the buffer objects in a work */
1331                 etnaviv_queue_work(gpu->drm, &gpu->retire_work);
1332
1333                 ret = IRQ_HANDLED;
1334         }
1335
1336         return ret;
1337 }
1338
1339 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1340 {
1341         int ret;
1342
1343         ret = enable_clk(gpu);
1344         if (ret)
1345                 return ret;
1346
1347         ret = enable_axi(gpu);
1348         if (ret) {
1349                 disable_clk(gpu);
1350                 return ret;
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1357 {
1358         int ret;
1359
1360         ret = disable_axi(gpu);
1361         if (ret)
1362                 return ret;
1363
1364         ret = disable_clk(gpu);
1365         if (ret)
1366                 return ret;
1367
1368         return 0;
1369 }
1370
1371 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1372 {
1373         if (gpu->buffer) {
1374                 unsigned long timeout;
1375
1376                 /* Replace the last WAIT with END */
1377                 etnaviv_buffer_end(gpu);
1378
1379                 /*
1380                  * We know that only the FE is busy here, this should
1381                  * happen quickly (as the WAIT is only 200 cycles).  If
1382                  * we fail, just warn and continue.
1383                  */
1384                 timeout = jiffies + msecs_to_jiffies(100);
1385                 do {
1386                         u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1387
1388                         if ((idle & gpu->idle_mask) == gpu->idle_mask)
1389                                 break;
1390
1391                         if (time_is_before_jiffies(timeout)) {
1392                                 dev_warn(gpu->dev,
1393                                          "timed out waiting for idle: idle=0x%x\n",
1394                                          idle);
1395                                 break;
1396                         }
1397
1398                         udelay(5);
1399                 } while (1);
1400         }
1401
1402         return etnaviv_gpu_clk_disable(gpu);
1403 }
1404
1405 #ifdef CONFIG_PM
1406 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1407 {
1408         u32 clock;
1409         int ret;
1410
1411         ret = mutex_lock_killable(&gpu->lock);
1412         if (ret)
1413                 return ret;
1414
1415         clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
1416                 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1417
1418         etnaviv_gpu_load_clock(gpu, clock);
1419         etnaviv_gpu_hw_init(gpu);
1420
1421         gpu->switch_context = true;
1422
1423         mutex_unlock(&gpu->lock);
1424
1425         return 0;
1426 }
1427 #endif
1428
1429 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1430         void *data)
1431 {
1432         struct drm_device *drm = data;
1433         struct etnaviv_drm_private *priv = drm->dev_private;
1434         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1435         int ret;
1436
1437 #ifdef CONFIG_PM
1438         ret = pm_runtime_get_sync(gpu->dev);
1439 #else
1440         ret = etnaviv_gpu_clk_enable(gpu);
1441 #endif
1442         if (ret < 0)
1443                 return ret;
1444
1445         gpu->drm = drm;
1446         gpu->fence_context = fence_context_alloc(1);
1447         spin_lock_init(&gpu->fence_spinlock);
1448
1449         INIT_LIST_HEAD(&gpu->active_cmd_list);
1450         INIT_WORK(&gpu->retire_work, retire_worker);
1451         INIT_WORK(&gpu->recover_work, recover_worker);
1452         init_waitqueue_head(&gpu->fence_event);
1453
1454         setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
1455                         (unsigned long)gpu);
1456
1457         priv->gpu[priv->num_gpus++] = gpu;
1458
1459         pm_runtime_mark_last_busy(gpu->dev);
1460         pm_runtime_put_autosuspend(gpu->dev);
1461
1462         return 0;
1463 }
1464
1465 static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1466         void *data)
1467 {
1468         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1469
1470         DBG("%s", dev_name(gpu->dev));
1471
1472         hangcheck_disable(gpu);
1473
1474 #ifdef CONFIG_PM
1475         pm_runtime_get_sync(gpu->dev);
1476         pm_runtime_put_sync_suspend(gpu->dev);
1477 #else
1478         etnaviv_gpu_hw_suspend(gpu);
1479 #endif
1480
1481         if (gpu->buffer) {
1482                 etnaviv_gpu_cmdbuf_free(gpu->buffer);
1483                 gpu->buffer = NULL;
1484         }
1485
1486         if (gpu->mmu) {
1487                 etnaviv_iommu_destroy(gpu->mmu);
1488                 gpu->mmu = NULL;
1489         }
1490
1491         gpu->drm = NULL;
1492 }
1493
1494 static const struct component_ops gpu_ops = {
1495         .bind = etnaviv_gpu_bind,
1496         .unbind = etnaviv_gpu_unbind,
1497 };
1498
1499 static const struct of_device_id etnaviv_gpu_match[] = {
1500         {
1501                 .compatible = "vivante,gc"
1502         },
1503         { /* sentinel */ }
1504 };
1505
1506 static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1507 {
1508         struct device *dev = &pdev->dev;
1509         struct etnaviv_gpu *gpu;
1510         int err = 0;
1511
1512         gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1513         if (!gpu)
1514                 return -ENOMEM;
1515
1516         gpu->dev = &pdev->dev;
1517         mutex_init(&gpu->lock);
1518
1519         /*
1520          * Set the GPU base address to the start of physical memory.  This
1521          * ensures that if we have up to 2GB, the v1 MMU can address the
1522          * highest memory.  This is important as command buffers may be
1523          * allocated outside of this limit.
1524          */
1525         gpu->memory_base = PHYS_OFFSET;
1526
1527         /* Map registers: */
1528         gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1529         if (IS_ERR(gpu->mmio))
1530                 return PTR_ERR(gpu->mmio);
1531
1532         /* Get Interrupt: */
1533         gpu->irq = platform_get_irq(pdev, 0);
1534         if (gpu->irq < 0) {
1535                 err = gpu->irq;
1536                 dev_err(dev, "failed to get irq: %d\n", err);
1537                 goto fail;
1538         }
1539
1540         err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1541                                dev_name(gpu->dev), gpu);
1542         if (err) {
1543                 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1544                 goto fail;
1545         }
1546
1547         /* Get Clocks: */
1548         gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1549         DBG("clk_bus: %p", gpu->clk_bus);
1550         if (IS_ERR(gpu->clk_bus))
1551                 gpu->clk_bus = NULL;
1552
1553         gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1554         DBG("clk_core: %p", gpu->clk_core);
1555         if (IS_ERR(gpu->clk_core))
1556                 gpu->clk_core = NULL;
1557
1558         gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1559         DBG("clk_shader: %p", gpu->clk_shader);
1560         if (IS_ERR(gpu->clk_shader))
1561                 gpu->clk_shader = NULL;
1562
1563         /* TODO: figure out max mapped size */
1564         dev_set_drvdata(dev, gpu);
1565
1566         /*
1567          * We treat the device as initially suspended.  The runtime PM
1568          * autosuspend delay is rather arbitary: no measurements have
1569          * yet been performed to determine an appropriate value.
1570          */
1571         pm_runtime_use_autosuspend(gpu->dev);
1572         pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1573         pm_runtime_enable(gpu->dev);
1574
1575         err = component_add(&pdev->dev, &gpu_ops);
1576         if (err < 0) {
1577                 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1578                 goto fail;
1579         }
1580
1581         return 0;
1582
1583 fail:
1584         return err;
1585 }
1586
1587 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1588 {
1589         component_del(&pdev->dev, &gpu_ops);
1590         pm_runtime_disable(&pdev->dev);
1591         return 0;
1592 }
1593
1594 #ifdef CONFIG_PM
1595 static int etnaviv_gpu_rpm_suspend(struct device *dev)
1596 {
1597         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1598         u32 idle, mask;
1599
1600         /* If we have outstanding fences, we're not idle */
1601         if (gpu->completed_fence != gpu->active_fence)
1602                 return -EBUSY;
1603
1604         /* Check whether the hardware (except FE) is idle */
1605         mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1606         idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1607         if (idle != mask)
1608                 return -EBUSY;
1609
1610         return etnaviv_gpu_hw_suspend(gpu);
1611 }
1612
1613 static int etnaviv_gpu_rpm_resume(struct device *dev)
1614 {
1615         struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1616         int ret;
1617
1618         ret = etnaviv_gpu_clk_enable(gpu);
1619         if (ret)
1620                 return ret;
1621
1622         /* Re-initialise the basic hardware state */
1623         if (gpu->drm && gpu->buffer) {
1624                 ret = etnaviv_gpu_hw_resume(gpu);
1625                 if (ret) {
1626                         etnaviv_gpu_clk_disable(gpu);
1627                         return ret;
1628                 }
1629         }
1630
1631         return 0;
1632 }
1633 #endif
1634
1635 static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1636         SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1637                            NULL)
1638 };
1639
1640 struct platform_driver etnaviv_gpu_driver = {
1641         .driver = {
1642                 .name = "etnaviv-gpu",
1643                 .owner = THIS_MODULE,
1644                 .pm = &etnaviv_gpu_pm_ops,
1645                 .of_match_table = etnaviv_gpu_match,
1646         },
1647         .probe = etnaviv_gpu_platform_probe,
1648         .remove = etnaviv_gpu_platform_remove,
1649         .id_table = gpu_ids,
1650 };