Commit | Line | Data |
---|---|---|
a8c21a54 T |
1 | /* |
2 | * Copyright (C) 2015 Etnaviv Project | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/component.h> | |
f54d1867 | 18 | #include <linux/dma-fence.h> |
a8c21a54 T |
19 | #include <linux/moduleparam.h> |
20 | #include <linux/of_device.h> | |
bcdfb5e5 | 21 | #include <linux/thermal.h> |
ea1f5729 LS |
22 | |
23 | #include "etnaviv_cmdbuf.h" | |
a8c21a54 T |
24 | #include "etnaviv_dump.h" |
25 | #include "etnaviv_gpu.h" | |
26 | #include "etnaviv_gem.h" | |
27 | #include "etnaviv_mmu.h" | |
357713ce | 28 | #include "etnaviv_perfmon.h" |
e93b6dee | 29 | #include "etnaviv_sched.h" |
a8c21a54 T |
30 | #include "common.xml.h" |
31 | #include "state.xml.h" | |
32 | #include "state_hi.xml.h" | |
33 | #include "cmdstream.xml.h" | |
34 | ||
c09d7f79 LS |
35 | #ifndef PHYS_OFFSET |
36 | #define PHYS_OFFSET 0 | |
37 | #endif | |
38 | ||
a8c21a54 T |
39 | static const struct platform_device_id gpu_ids[] = { |
40 | { .name = "etnaviv-gpu,2d" }, | |
41 | { }, | |
42 | }; | |
43 | ||
a8c21a54 T |
44 | /* |
45 | * Driver functions: | |
46 | */ | |
47 | ||
48 | int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) | |
49 | { | |
50 | switch (param) { | |
51 | case ETNAVIV_PARAM_GPU_MODEL: | |
52 | *value = gpu->identity.model; | |
53 | break; | |
54 | ||
55 | case ETNAVIV_PARAM_GPU_REVISION: | |
56 | *value = gpu->identity.revision; | |
57 | break; | |
58 | ||
59 | case ETNAVIV_PARAM_GPU_FEATURES_0: | |
60 | *value = gpu->identity.features; | |
61 | break; | |
62 | ||
63 | case ETNAVIV_PARAM_GPU_FEATURES_1: | |
64 | *value = gpu->identity.minor_features0; | |
65 | break; | |
66 | ||
67 | case ETNAVIV_PARAM_GPU_FEATURES_2: | |
68 | *value = gpu->identity.minor_features1; | |
69 | break; | |
70 | ||
71 | case ETNAVIV_PARAM_GPU_FEATURES_3: | |
72 | *value = gpu->identity.minor_features2; | |
73 | break; | |
74 | ||
75 | case ETNAVIV_PARAM_GPU_FEATURES_4: | |
76 | *value = gpu->identity.minor_features3; | |
77 | break; | |
78 | ||
602eb489 RK |
79 | case ETNAVIV_PARAM_GPU_FEATURES_5: |
80 | *value = gpu->identity.minor_features4; | |
81 | break; | |
82 | ||
83 | case ETNAVIV_PARAM_GPU_FEATURES_6: | |
84 | *value = gpu->identity.minor_features5; | |
85 | break; | |
86 | ||
0538aaf9 LS |
87 | case ETNAVIV_PARAM_GPU_FEATURES_7: |
88 | *value = gpu->identity.minor_features6; | |
89 | break; | |
90 | ||
91 | case ETNAVIV_PARAM_GPU_FEATURES_8: | |
92 | *value = gpu->identity.minor_features7; | |
93 | break; | |
94 | ||
95 | case ETNAVIV_PARAM_GPU_FEATURES_9: | |
96 | *value = gpu->identity.minor_features8; | |
97 | break; | |
98 | ||
99 | case ETNAVIV_PARAM_GPU_FEATURES_10: | |
100 | *value = gpu->identity.minor_features9; | |
101 | break; | |
102 | ||
103 | case ETNAVIV_PARAM_GPU_FEATURES_11: | |
104 | *value = gpu->identity.minor_features10; | |
105 | break; | |
106 | ||
107 | case ETNAVIV_PARAM_GPU_FEATURES_12: | |
108 | *value = gpu->identity.minor_features11; | |
109 | break; | |
110 | ||
a8c21a54 T |
111 | case ETNAVIV_PARAM_GPU_STREAM_COUNT: |
112 | *value = gpu->identity.stream_count; | |
113 | break; | |
114 | ||
115 | case ETNAVIV_PARAM_GPU_REGISTER_MAX: | |
116 | *value = gpu->identity.register_max; | |
117 | break; | |
118 | ||
119 | case ETNAVIV_PARAM_GPU_THREAD_COUNT: | |
120 | *value = gpu->identity.thread_count; | |
121 | break; | |
122 | ||
123 | case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE: | |
124 | *value = gpu->identity.vertex_cache_size; | |
125 | break; | |
126 | ||
127 | case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT: | |
128 | *value = gpu->identity.shader_core_count; | |
129 | break; | |
130 | ||
131 | case ETNAVIV_PARAM_GPU_PIXEL_PIPES: | |
132 | *value = gpu->identity.pixel_pipes; | |
133 | break; | |
134 | ||
135 | case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE: | |
136 | *value = gpu->identity.vertex_output_buffer_size; | |
137 | break; | |
138 | ||
139 | case ETNAVIV_PARAM_GPU_BUFFER_SIZE: | |
140 | *value = gpu->identity.buffer_size; | |
141 | break; | |
142 | ||
143 | case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT: | |
144 | *value = gpu->identity.instruction_count; | |
145 | break; | |
146 | ||
147 | case ETNAVIV_PARAM_GPU_NUM_CONSTANTS: | |
148 | *value = gpu->identity.num_constants; | |
149 | break; | |
150 | ||
602eb489 RK |
151 | case ETNAVIV_PARAM_GPU_NUM_VARYINGS: |
152 | *value = gpu->identity.varyings_count; | |
153 | break; | |
154 | ||
a8c21a54 T |
155 | default: |
156 | DBG("%s: invalid param: %u", dev_name(gpu->dev), param); | |
157 | return -EINVAL; | |
158 | } | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
472f79dc RK |
163 | |
164 | #define etnaviv_is_model_rev(gpu, mod, rev) \ | |
165 | ((gpu)->identity.model == chipModel_##mod && \ | |
166 | (gpu)->identity.revision == rev) | |
52f36ba1 RK |
167 | #define etnaviv_field(val, field) \ |
168 | (((val) & field##__MASK) >> field##__SHIFT) | |
169 | ||
a8c21a54 T |
170 | static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) |
171 | { | |
172 | if (gpu->identity.minor_features0 & | |
173 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | |
602eb489 RK |
174 | u32 specs[4]; |
175 | unsigned int streams; | |
a8c21a54 T |
176 | |
177 | specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); | |
178 | specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); | |
602eb489 RK |
179 | specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3); |
180 | specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4); | |
a8c21a54 | 181 | |
52f36ba1 RK |
182 | gpu->identity.stream_count = etnaviv_field(specs[0], |
183 | VIVS_HI_CHIP_SPECS_STREAM_COUNT); | |
184 | gpu->identity.register_max = etnaviv_field(specs[0], | |
185 | VIVS_HI_CHIP_SPECS_REGISTER_MAX); | |
186 | gpu->identity.thread_count = etnaviv_field(specs[0], | |
187 | VIVS_HI_CHIP_SPECS_THREAD_COUNT); | |
188 | gpu->identity.vertex_cache_size = etnaviv_field(specs[0], | |
189 | VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE); | |
190 | gpu->identity.shader_core_count = etnaviv_field(specs[0], | |
191 | VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT); | |
192 | gpu->identity.pixel_pipes = etnaviv_field(specs[0], | |
193 | VIVS_HI_CHIP_SPECS_PIXEL_PIPES); | |
a8c21a54 | 194 | gpu->identity.vertex_output_buffer_size = |
52f36ba1 RK |
195 | etnaviv_field(specs[0], |
196 | VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE); | |
197 | ||
198 | gpu->identity.buffer_size = etnaviv_field(specs[1], | |
199 | VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE); | |
200 | gpu->identity.instruction_count = etnaviv_field(specs[1], | |
201 | VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT); | |
202 | gpu->identity.num_constants = etnaviv_field(specs[1], | |
203 | VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS); | |
602eb489 RK |
204 | |
205 | gpu->identity.varyings_count = etnaviv_field(specs[2], | |
206 | VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT); | |
207 | ||
208 | /* This overrides the value from older register if non-zero */ | |
209 | streams = etnaviv_field(specs[3], | |
210 | VIVS_HI_CHIP_SPECS_4_STREAM_COUNT); | |
211 | if (streams) | |
212 | gpu->identity.stream_count = streams; | |
a8c21a54 T |
213 | } |
214 | ||
215 | /* Fill in the stream count if not specified */ | |
216 | if (gpu->identity.stream_count == 0) { | |
217 | if (gpu->identity.model >= 0x1000) | |
218 | gpu->identity.stream_count = 4; | |
219 | else | |
220 | gpu->identity.stream_count = 1; | |
221 | } | |
222 | ||
223 | /* Convert the register max value */ | |
224 | if (gpu->identity.register_max) | |
225 | gpu->identity.register_max = 1 << gpu->identity.register_max; | |
507f8991 | 226 | else if (gpu->identity.model == chipModel_GC400) |
a8c21a54 T |
227 | gpu->identity.register_max = 32; |
228 | else | |
229 | gpu->identity.register_max = 64; | |
230 | ||
231 | /* Convert thread count */ | |
232 | if (gpu->identity.thread_count) | |
233 | gpu->identity.thread_count = 1 << gpu->identity.thread_count; | |
507f8991 | 234 | else if (gpu->identity.model == chipModel_GC400) |
a8c21a54 | 235 | gpu->identity.thread_count = 64; |
507f8991 RK |
236 | else if (gpu->identity.model == chipModel_GC500 || |
237 | gpu->identity.model == chipModel_GC530) | |
a8c21a54 T |
238 | gpu->identity.thread_count = 128; |
239 | else | |
240 | gpu->identity.thread_count = 256; | |
241 | ||
242 | if (gpu->identity.vertex_cache_size == 0) | |
243 | gpu->identity.vertex_cache_size = 8; | |
244 | ||
245 | if (gpu->identity.shader_core_count == 0) { | |
246 | if (gpu->identity.model >= 0x1000) | |
247 | gpu->identity.shader_core_count = 2; | |
248 | else | |
249 | gpu->identity.shader_core_count = 1; | |
250 | } | |
251 | ||
252 | if (gpu->identity.pixel_pipes == 0) | |
253 | gpu->identity.pixel_pipes = 1; | |
254 | ||
255 | /* Convert virtex buffer size */ | |
256 | if (gpu->identity.vertex_output_buffer_size) { | |
257 | gpu->identity.vertex_output_buffer_size = | |
258 | 1 << gpu->identity.vertex_output_buffer_size; | |
507f8991 | 259 | } else if (gpu->identity.model == chipModel_GC400) { |
a8c21a54 T |
260 | if (gpu->identity.revision < 0x4000) |
261 | gpu->identity.vertex_output_buffer_size = 512; | |
262 | else if (gpu->identity.revision < 0x4200) | |
263 | gpu->identity.vertex_output_buffer_size = 256; | |
264 | else | |
265 | gpu->identity.vertex_output_buffer_size = 128; | |
266 | } else { | |
267 | gpu->identity.vertex_output_buffer_size = 512; | |
268 | } | |
269 | ||
270 | switch (gpu->identity.instruction_count) { | |
271 | case 0: | |
472f79dc | 272 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) || |
507f8991 | 273 | gpu->identity.model == chipModel_GC880) |
a8c21a54 T |
274 | gpu->identity.instruction_count = 512; |
275 | else | |
276 | gpu->identity.instruction_count = 256; | |
277 | break; | |
278 | ||
279 | case 1: | |
280 | gpu->identity.instruction_count = 1024; | |
281 | break; | |
282 | ||
283 | case 2: | |
284 | gpu->identity.instruction_count = 2048; | |
285 | break; | |
286 | ||
287 | default: | |
288 | gpu->identity.instruction_count = 256; | |
289 | break; | |
290 | } | |
291 | ||
292 | if (gpu->identity.num_constants == 0) | |
293 | gpu->identity.num_constants = 168; | |
602eb489 RK |
294 | |
295 | if (gpu->identity.varyings_count == 0) { | |
296 | if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0) | |
297 | gpu->identity.varyings_count = 12; | |
298 | else | |
299 | gpu->identity.varyings_count = 8; | |
300 | } | |
301 | ||
302 | /* | |
303 | * For some cores, two varyings are consumed for position, so the | |
304 | * maximum varying count needs to be reduced by one. | |
305 | */ | |
306 | if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) || | |
307 | etnaviv_is_model_rev(gpu, GC4000, 0x5222) || | |
308 | etnaviv_is_model_rev(gpu, GC4000, 0x5245) || | |
309 | etnaviv_is_model_rev(gpu, GC4000, 0x5208) || | |
310 | etnaviv_is_model_rev(gpu, GC3000, 0x5435) || | |
311 | etnaviv_is_model_rev(gpu, GC2200, 0x5244) || | |
312 | etnaviv_is_model_rev(gpu, GC2100, 0x5108) || | |
313 | etnaviv_is_model_rev(gpu, GC2000, 0x5108) || | |
314 | etnaviv_is_model_rev(gpu, GC1500, 0x5246) || | |
315 | etnaviv_is_model_rev(gpu, GC880, 0x5107) || | |
316 | etnaviv_is_model_rev(gpu, GC880, 0x5106)) | |
317 | gpu->identity.varyings_count -= 1; | |
a8c21a54 T |
318 | } |
319 | ||
320 | static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |
321 | { | |
322 | u32 chipIdentity; | |
323 | ||
324 | chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); | |
325 | ||
326 | /* Special case for older graphic cores. */ | |
52f36ba1 | 327 | if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) { |
507f8991 | 328 | gpu->identity.model = chipModel_GC500; |
52f36ba1 RK |
329 | gpu->identity.revision = etnaviv_field(chipIdentity, |
330 | VIVS_HI_CHIP_IDENTITY_REVISION); | |
a8c21a54 T |
331 | } else { |
332 | ||
333 | gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); | |
334 | gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); | |
335 | ||
336 | /* | |
337 | * !!!! HACK ALERT !!!! | |
338 | * Because people change device IDs without letting software | |
339 | * know about it - here is the hack to make it all look the | |
340 | * same. Only for GC400 family. | |
341 | */ | |
342 | if ((gpu->identity.model & 0xff00) == 0x0400 && | |
507f8991 | 343 | gpu->identity.model != chipModel_GC420) { |
a8c21a54 T |
344 | gpu->identity.model = gpu->identity.model & 0x0400; |
345 | } | |
346 | ||
347 | /* Another special case */ | |
472f79dc | 348 | if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) { |
a8c21a54 T |
349 | u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); |
350 | u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); | |
351 | ||
352 | if (chipDate == 0x20080814 && chipTime == 0x12051100) { | |
353 | /* | |
354 | * This IP has an ECO; put the correct | |
355 | * revision in it. | |
356 | */ | |
357 | gpu->identity.revision = 0x1051; | |
358 | } | |
359 | } | |
12ff4bde LS |
360 | |
361 | /* | |
362 | * NXP likes to call the GPU on the i.MX6QP GC2000+, but in | |
363 | * reality it's just a re-branded GC3000. We can identify this | |
364 | * core by the upper half of the revision register being all 1. | |
365 | * Fix model/rev here, so all other places can refer to this | |
366 | * core by its real identity. | |
367 | */ | |
368 | if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) { | |
369 | gpu->identity.model = chipModel_GC3000; | |
370 | gpu->identity.revision &= 0xffff; | |
371 | } | |
a8c21a54 T |
372 | } |
373 | ||
374 | dev_info(gpu->dev, "model: GC%x, revision: %x\n", | |
375 | gpu->identity.model, gpu->identity.revision); | |
376 | ||
681c19c8 LS |
377 | /* |
378 | * If there is a match in the HWDB, we aren't interested in the | |
379 | * remaining register values, as they might be wrong. | |
380 | */ | |
381 | if (etnaviv_fill_identity_from_hwdb(gpu)) | |
382 | return; | |
383 | ||
a8c21a54 T |
384 | gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); |
385 | ||
386 | /* Disable fast clear on GC700. */ | |
507f8991 | 387 | if (gpu->identity.model == chipModel_GC700) |
a8c21a54 T |
388 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; |
389 | ||
507f8991 RK |
390 | if ((gpu->identity.model == chipModel_GC500 && |
391 | gpu->identity.revision < 2) || | |
392 | (gpu->identity.model == chipModel_GC300 && | |
393 | gpu->identity.revision < 0x2000)) { | |
a8c21a54 T |
394 | |
395 | /* | |
396 | * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these | |
397 | * registers. | |
398 | */ | |
399 | gpu->identity.minor_features0 = 0; | |
400 | gpu->identity.minor_features1 = 0; | |
401 | gpu->identity.minor_features2 = 0; | |
402 | gpu->identity.minor_features3 = 0; | |
602eb489 RK |
403 | gpu->identity.minor_features4 = 0; |
404 | gpu->identity.minor_features5 = 0; | |
a8c21a54 T |
405 | } else |
406 | gpu->identity.minor_features0 = | |
407 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); | |
408 | ||
409 | if (gpu->identity.minor_features0 & | |
410 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | |
411 | gpu->identity.minor_features1 = | |
412 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1); | |
413 | gpu->identity.minor_features2 = | |
414 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); | |
415 | gpu->identity.minor_features3 = | |
416 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); | |
602eb489 RK |
417 | gpu->identity.minor_features4 = |
418 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4); | |
419 | gpu->identity.minor_features5 = | |
420 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); | |
a8c21a54 T |
421 | } |
422 | ||
423 | /* GC600 idle register reports zero bits where modules aren't present */ | |
424 | if (gpu->identity.model == chipModel_GC600) { | |
425 | gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | | |
426 | VIVS_HI_IDLE_STATE_RA | | |
427 | VIVS_HI_IDLE_STATE_SE | | |
428 | VIVS_HI_IDLE_STATE_PA | | |
429 | VIVS_HI_IDLE_STATE_SH | | |
430 | VIVS_HI_IDLE_STATE_PE | | |
431 | VIVS_HI_IDLE_STATE_DE | | |
432 | VIVS_HI_IDLE_STATE_FE; | |
433 | } else { | |
434 | gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP; | |
435 | } | |
436 | ||
437 | etnaviv_hw_specs(gpu); | |
438 | } | |
439 | ||
440 | static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) | |
441 | { | |
442 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock | | |
443 | VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD); | |
444 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); | |
445 | } | |
446 | ||
bcdfb5e5 RK |
447 | static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) |
448 | { | |
d79fd1cc LS |
449 | if (gpu->identity.minor_features2 & |
450 | chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) { | |
451 | clk_set_rate(gpu->clk_core, | |
452 | gpu->base_rate_core >> gpu->freq_scale); | |
453 | clk_set_rate(gpu->clk_shader, | |
454 | gpu->base_rate_shader >> gpu->freq_scale); | |
455 | } else { | |
456 | unsigned int fscale = 1 << (6 - gpu->freq_scale); | |
6eb3ecc3 | 457 | u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); |
bcdfb5e5 | 458 | |
6eb3ecc3 LS |
459 | clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK; |
460 | clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); | |
d79fd1cc LS |
461 | etnaviv_gpu_load_clock(gpu, clock); |
462 | } | |
bcdfb5e5 RK |
463 | } |
464 | ||
a8c21a54 T |
465 | static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) |
466 | { | |
467 | u32 control, idle; | |
468 | unsigned long timeout; | |
469 | bool failed = true; | |
470 | ||
a8c21a54 T |
471 | /* We hope that the GPU resets in under one second */ |
472 | timeout = jiffies + msecs_to_jiffies(1000); | |
473 | ||
474 | while (time_is_after_jiffies(timeout)) { | |
a8c21a54 | 475 | /* enable clock */ |
6eb3ecc3 LS |
476 | unsigned int fscale = 1 << (6 - gpu->freq_scale); |
477 | control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); | |
478 | etnaviv_gpu_load_clock(gpu, control); | |
a8c21a54 T |
479 | |
480 | /* isolate the GPU. */ | |
481 | control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | |
482 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
483 | ||
484 | /* set soft reset. */ | |
485 | control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | |
486 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
487 | ||
488 | /* wait for reset. */ | |
40462179 | 489 | usleep_range(10, 20); |
a8c21a54 T |
490 | |
491 | /* reset soft reset bit. */ | |
492 | control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | |
493 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
494 | ||
495 | /* reset GPU isolation. */ | |
496 | control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | |
497 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
498 | ||
499 | /* read idle register. */ | |
500 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
501 | ||
502 | /* try reseting again if FE it not idle */ | |
503 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { | |
504 | dev_dbg(gpu->dev, "FE is not idle\n"); | |
505 | continue; | |
506 | } | |
507 | ||
508 | /* read reset register. */ | |
509 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
510 | ||
511 | /* is the GPU idle? */ | |
512 | if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) || | |
513 | ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) { | |
514 | dev_dbg(gpu->dev, "GPU is not idle\n"); | |
515 | continue; | |
516 | } | |
517 | ||
6eb3ecc3 LS |
518 | /* disable debug registers, as they are not normally needed */ |
519 | control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; | |
520 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
521 | ||
a8c21a54 T |
522 | failed = false; |
523 | break; | |
524 | } | |
525 | ||
526 | if (failed) { | |
527 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
528 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
529 | ||
530 | dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n", | |
531 | idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ", | |
532 | control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ", | |
533 | control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not "); | |
534 | ||
535 | return -EBUSY; | |
536 | } | |
537 | ||
538 | /* We rely on the GPU running, so program the clock */ | |
bcdfb5e5 | 539 | etnaviv_gpu_update_clock(gpu); |
a8c21a54 T |
540 | |
541 | return 0; | |
542 | } | |
543 | ||
7d0c6e71 RK |
544 | static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) |
545 | { | |
546 | u32 pmc, ppc; | |
547 | ||
548 | /* enable clock gating */ | |
549 | ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); | |
550 | ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; | |
551 | ||
552 | /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ | |
553 | if (gpu->identity.revision == 0x4301 || | |
554 | gpu->identity.revision == 0x4302) | |
555 | ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; | |
556 | ||
557 | gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); | |
558 | ||
559 | pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); | |
560 | ||
7cef6004 | 561 | /* Disable PA clock gating for GC400+ without bugfix except for GC420 */ |
7d0c6e71 | 562 | if (gpu->identity.model >= chipModel_GC400 && |
7cef6004 LS |
563 | gpu->identity.model != chipModel_GC420 && |
564 | !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12)) | |
7d0c6e71 RK |
565 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA; |
566 | ||
567 | /* | |
568 | * Disable PE clock gating on revs < 5.0.0.0 when HZ is | |
569 | * present without a bug fix. | |
570 | */ | |
571 | if (gpu->identity.revision < 0x5000 && | |
572 | gpu->identity.minor_features0 & chipMinorFeatures0_HZ && | |
573 | !(gpu->identity.minor_features1 & | |
574 | chipMinorFeatures1_DISABLE_PE_GATING)) | |
575 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE; | |
576 | ||
577 | if (gpu->identity.revision < 0x5422) | |
578 | pmc |= BIT(15); /* Unknown bit */ | |
579 | ||
7cef6004 LS |
580 | /* Disable TX clock gating on affected core revisions. */ |
581 | if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) || | |
582 | etnaviv_is_model_rev(gpu, GC2000, 0x5108)) | |
583 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX; | |
584 | ||
7d0c6e71 RK |
585 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; |
586 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; | |
587 | ||
588 | gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); | |
589 | } | |
590 | ||
229855b6 LS |
591 | void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) |
592 | { | |
593 | gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address); | |
594 | gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, | |
595 | VIVS_FE_COMMAND_CONTROL_ENABLE | | |
596 | VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch)); | |
597 | } | |
598 | ||
e17a0ded WL |
599 | static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) |
600 | { | |
601 | /* | |
602 | * Base value for VIVS_PM_PULSE_EATER register on models where it | |
603 | * cannot be read, extracted from vivante kernel driver. | |
604 | */ | |
605 | u32 pulse_eater = 0x01590880; | |
606 | ||
607 | if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) || | |
608 | etnaviv_is_model_rev(gpu, GC4000, 0x5222)) { | |
609 | pulse_eater |= BIT(23); | |
610 | ||
611 | } | |
612 | ||
613 | if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) || | |
614 | etnaviv_is_model_rev(gpu, GC1000, 0x5040)) { | |
615 | pulse_eater &= ~BIT(16); | |
616 | pulse_eater |= BIT(17); | |
617 | } | |
618 | ||
619 | if ((gpu->identity.revision > 0x5420) && | |
620 | (gpu->identity.features & chipFeatures_PIPE_3D)) | |
621 | { | |
622 | /* Performance fix: disable internal DFS */ | |
623 | pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER); | |
624 | pulse_eater |= BIT(18); | |
625 | } | |
626 | ||
627 | gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater); | |
628 | } | |
629 | ||
a8c21a54 T |
630 | static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) |
631 | { | |
632 | u16 prefetch; | |
633 | ||
472f79dc RK |
634 | if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) || |
635 | etnaviv_is_model_rev(gpu, GC320, 0x5220)) && | |
636 | gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) { | |
a8c21a54 T |
637 | u32 mc_memory_debug; |
638 | ||
639 | mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; | |
640 | ||
641 | if (gpu->identity.revision == 0x5007) | |
642 | mc_memory_debug |= 0x0c; | |
643 | else | |
644 | mc_memory_debug |= 0x08; | |
645 | ||
646 | gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); | |
647 | } | |
648 | ||
7d0c6e71 RK |
649 | /* enable module-level clock gating */ |
650 | etnaviv_gpu_enable_mlcg(gpu); | |
651 | ||
a8c21a54 T |
652 | /* |
653 | * Update GPU AXI cache atttribute to "cacheable, no allocate". | |
654 | * This is necessary to prevent the iMX6 SoC locking up. | |
655 | */ | |
656 | gpu_write(gpu, VIVS_HI_AXI_CONFIG, | |
657 | VIVS_HI_AXI_CONFIG_AWCACHE(2) | | |
658 | VIVS_HI_AXI_CONFIG_ARCACHE(2)); | |
659 | ||
660 | /* GC2000 rev 5108 needs a special bus config */ | |
472f79dc | 661 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) { |
a8c21a54 T |
662 | u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); |
663 | bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | | |
664 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); | |
665 | bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) | | |
666 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0); | |
667 | gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config); | |
668 | } | |
669 | ||
e17a0ded WL |
670 | /* setup the pulse eater */ |
671 | etnaviv_gpu_setup_pulse_eater(gpu); | |
672 | ||
99f861bc | 673 | /* setup the MMU */ |
e095c8fe | 674 | etnaviv_iommu_restore(gpu); |
a8c21a54 T |
675 | |
676 | /* Start command processor */ | |
677 | prefetch = etnaviv_buffer_init(gpu); | |
678 | ||
679 | gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); | |
2f9225db | 680 | etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer), |
229855b6 | 681 | prefetch); |
a8c21a54 T |
682 | } |
683 | ||
684 | int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |
685 | { | |
686 | int ret, i; | |
a8c21a54 T |
687 | |
688 | ret = pm_runtime_get_sync(gpu->dev); | |
1409df04 LS |
689 | if (ret < 0) { |
690 | dev_err(gpu->dev, "Failed to enable GPU power domain\n"); | |
a8c21a54 | 691 | return ret; |
1409df04 | 692 | } |
a8c21a54 T |
693 | |
694 | etnaviv_hw_identify(gpu); | |
695 | ||
696 | if (gpu->identity.model == 0) { | |
697 | dev_err(gpu->dev, "Unknown GPU model\n"); | |
f6427760 RK |
698 | ret = -ENXIO; |
699 | goto fail; | |
a8c21a54 T |
700 | } |
701 | ||
b98c6688 RK |
702 | /* Exclude VG cores with FE2.0 */ |
703 | if (gpu->identity.features & chipFeatures_PIPE_VG && | |
704 | gpu->identity.features & chipFeatures_FE20) { | |
705 | dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n"); | |
706 | ret = -ENXIO; | |
707 | goto fail; | |
708 | } | |
709 | ||
2144fff7 LS |
710 | /* |
711 | * Set the GPU linear window to be at the end of the DMA window, where | |
712 | * the CMA area is likely to reside. This ensures that we are able to | |
713 | * map the command buffers while having the linear window overlap as | |
714 | * much RAM as possible, so we can optimize mappings for other buffers. | |
715 | * | |
716 | * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads | |
717 | * to different views of the memory on the individual engines. | |
718 | */ | |
719 | if (!(gpu->identity.features & chipFeatures_PIPE_3D) || | |
720 | (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) { | |
721 | u32 dma_mask = (u32)dma_get_required_mask(gpu->dev); | |
722 | if (dma_mask < PHYS_OFFSET + SZ_2G) | |
723 | gpu->memory_base = PHYS_OFFSET; | |
724 | else | |
725 | gpu->memory_base = dma_mask - SZ_2G + 1; | |
1db01279 LS |
726 | } else if (PHYS_OFFSET >= SZ_2G) { |
727 | dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n"); | |
728 | gpu->memory_base = PHYS_OFFSET; | |
729 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; | |
2144fff7 LS |
730 | } |
731 | ||
a8c21a54 | 732 | ret = etnaviv_hw_reset(gpu); |
1409df04 LS |
733 | if (ret) { |
734 | dev_err(gpu->dev, "GPU reset failed\n"); | |
a8c21a54 | 735 | goto fail; |
1409df04 | 736 | } |
a8c21a54 | 737 | |
dd34bb96 LS |
738 | gpu->mmu = etnaviv_iommu_new(gpu); |
739 | if (IS_ERR(gpu->mmu)) { | |
1409df04 | 740 | dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n"); |
dd34bb96 | 741 | ret = PTR_ERR(gpu->mmu); |
a8c21a54 T |
742 | goto fail; |
743 | } | |
744 | ||
e66774dd LS |
745 | gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu); |
746 | if (IS_ERR(gpu->cmdbuf_suballoc)) { | |
747 | dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n"); | |
748 | ret = PTR_ERR(gpu->cmdbuf_suballoc); | |
749 | goto fail; | |
750 | } | |
751 | ||
a8c21a54 | 752 | /* Create buffer: */ |
2f9225db LS |
753 | ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer, |
754 | PAGE_SIZE); | |
755 | if (ret) { | |
a8c21a54 | 756 | dev_err(gpu->dev, "could not create command buffer\n"); |
45d16a6d | 757 | goto destroy_iommu; |
a8c21a54 | 758 | } |
acfee0ec LS |
759 | |
760 | if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && | |
2f9225db | 761 | etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) { |
a8c21a54 T |
762 | ret = -EINVAL; |
763 | dev_err(gpu->dev, | |
764 | "command buffer outside valid memory window\n"); | |
765 | goto free_buffer; | |
766 | } | |
767 | ||
768 | /* Setup event management */ | |
769 | spin_lock_init(&gpu->event_spinlock); | |
770 | init_completion(&gpu->event_free); | |
355502e0 CG |
771 | bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); |
772 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) | |
a8c21a54 | 773 | complete(&gpu->event_free); |
a8c21a54 T |
774 | |
775 | /* Now program the hardware */ | |
776 | mutex_lock(&gpu->lock); | |
777 | etnaviv_gpu_hw_init(gpu); | |
f6086311 | 778 | gpu->exec_state = -1; |
a8c21a54 T |
779 | mutex_unlock(&gpu->lock); |
780 | ||
781 | pm_runtime_mark_last_busy(gpu->dev); | |
782 | pm_runtime_put_autosuspend(gpu->dev); | |
783 | ||
784 | return 0; | |
785 | ||
786 | free_buffer: | |
2f9225db | 787 | etnaviv_cmdbuf_free(&gpu->buffer); |
45d16a6d LS |
788 | destroy_iommu: |
789 | etnaviv_iommu_destroy(gpu->mmu); | |
790 | gpu->mmu = NULL; | |
a8c21a54 T |
791 | fail: |
792 | pm_runtime_mark_last_busy(gpu->dev); | |
793 | pm_runtime_put_autosuspend(gpu->dev); | |
794 | ||
795 | return ret; | |
796 | } | |
797 | ||
798 | #ifdef CONFIG_DEBUG_FS | |
799 | struct dma_debug { | |
800 | u32 address[2]; | |
801 | u32 state[2]; | |
802 | }; | |
803 | ||
804 | static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) | |
805 | { | |
806 | u32 i; | |
807 | ||
808 | debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
809 | debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | |
810 | ||
811 | for (i = 0; i < 500; i++) { | |
812 | debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
813 | debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | |
814 | ||
815 | if (debug->address[0] != debug->address[1]) | |
816 | break; | |
817 | ||
818 | if (debug->state[0] != debug->state[1]) | |
819 | break; | |
820 | } | |
821 | } | |
822 | ||
823 | int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) | |
824 | { | |
825 | struct dma_debug debug; | |
826 | u32 dma_lo, dma_hi, axi, idle; | |
827 | int ret; | |
828 | ||
829 | seq_printf(m, "%s Status:\n", dev_name(gpu->dev)); | |
830 | ||
831 | ret = pm_runtime_get_sync(gpu->dev); | |
832 | if (ret < 0) | |
833 | return ret; | |
834 | ||
835 | dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); | |
836 | dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); | |
837 | axi = gpu_read(gpu, VIVS_HI_AXI_STATUS); | |
838 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
839 | ||
840 | verify_dma(gpu, &debug); | |
841 | ||
842 | seq_puts(m, "\tfeatures\n"); | |
3d9fc642 LS |
843 | seq_printf(m, "\t major_features: 0x%08x\n", |
844 | gpu->identity.features); | |
a8c21a54 T |
845 | seq_printf(m, "\t minor_features0: 0x%08x\n", |
846 | gpu->identity.minor_features0); | |
847 | seq_printf(m, "\t minor_features1: 0x%08x\n", | |
848 | gpu->identity.minor_features1); | |
849 | seq_printf(m, "\t minor_features2: 0x%08x\n", | |
850 | gpu->identity.minor_features2); | |
851 | seq_printf(m, "\t minor_features3: 0x%08x\n", | |
852 | gpu->identity.minor_features3); | |
602eb489 RK |
853 | seq_printf(m, "\t minor_features4: 0x%08x\n", |
854 | gpu->identity.minor_features4); | |
855 | seq_printf(m, "\t minor_features5: 0x%08x\n", | |
856 | gpu->identity.minor_features5); | |
0538aaf9 LS |
857 | seq_printf(m, "\t minor_features6: 0x%08x\n", |
858 | gpu->identity.minor_features6); | |
859 | seq_printf(m, "\t minor_features7: 0x%08x\n", | |
860 | gpu->identity.minor_features7); | |
861 | seq_printf(m, "\t minor_features8: 0x%08x\n", | |
862 | gpu->identity.minor_features8); | |
863 | seq_printf(m, "\t minor_features9: 0x%08x\n", | |
864 | gpu->identity.minor_features9); | |
865 | seq_printf(m, "\t minor_features10: 0x%08x\n", | |
866 | gpu->identity.minor_features10); | |
867 | seq_printf(m, "\t minor_features11: 0x%08x\n", | |
868 | gpu->identity.minor_features11); | |
a8c21a54 T |
869 | |
870 | seq_puts(m, "\tspecs\n"); | |
871 | seq_printf(m, "\t stream_count: %d\n", | |
872 | gpu->identity.stream_count); | |
873 | seq_printf(m, "\t register_max: %d\n", | |
874 | gpu->identity.register_max); | |
875 | seq_printf(m, "\t thread_count: %d\n", | |
876 | gpu->identity.thread_count); | |
877 | seq_printf(m, "\t vertex_cache_size: %d\n", | |
878 | gpu->identity.vertex_cache_size); | |
879 | seq_printf(m, "\t shader_core_count: %d\n", | |
880 | gpu->identity.shader_core_count); | |
881 | seq_printf(m, "\t pixel_pipes: %d\n", | |
882 | gpu->identity.pixel_pipes); | |
883 | seq_printf(m, "\t vertex_output_buffer_size: %d\n", | |
884 | gpu->identity.vertex_output_buffer_size); | |
885 | seq_printf(m, "\t buffer_size: %d\n", | |
886 | gpu->identity.buffer_size); | |
887 | seq_printf(m, "\t instruction_count: %d\n", | |
888 | gpu->identity.instruction_count); | |
889 | seq_printf(m, "\t num_constants: %d\n", | |
890 | gpu->identity.num_constants); | |
602eb489 RK |
891 | seq_printf(m, "\t varyings_count: %d\n", |
892 | gpu->identity.varyings_count); | |
a8c21a54 T |
893 | |
894 | seq_printf(m, "\taxi: 0x%08x\n", axi); | |
895 | seq_printf(m, "\tidle: 0x%08x\n", idle); | |
896 | idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP; | |
897 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) | |
898 | seq_puts(m, "\t FE is not idle\n"); | |
899 | if ((idle & VIVS_HI_IDLE_STATE_DE) == 0) | |
900 | seq_puts(m, "\t DE is not idle\n"); | |
901 | if ((idle & VIVS_HI_IDLE_STATE_PE) == 0) | |
902 | seq_puts(m, "\t PE is not idle\n"); | |
903 | if ((idle & VIVS_HI_IDLE_STATE_SH) == 0) | |
904 | seq_puts(m, "\t SH is not idle\n"); | |
905 | if ((idle & VIVS_HI_IDLE_STATE_PA) == 0) | |
906 | seq_puts(m, "\t PA is not idle\n"); | |
907 | if ((idle & VIVS_HI_IDLE_STATE_SE) == 0) | |
908 | seq_puts(m, "\t SE is not idle\n"); | |
909 | if ((idle & VIVS_HI_IDLE_STATE_RA) == 0) | |
910 | seq_puts(m, "\t RA is not idle\n"); | |
911 | if ((idle & VIVS_HI_IDLE_STATE_TX) == 0) | |
912 | seq_puts(m, "\t TX is not idle\n"); | |
913 | if ((idle & VIVS_HI_IDLE_STATE_VG) == 0) | |
914 | seq_puts(m, "\t VG is not idle\n"); | |
915 | if ((idle & VIVS_HI_IDLE_STATE_IM) == 0) | |
916 | seq_puts(m, "\t IM is not idle\n"); | |
917 | if ((idle & VIVS_HI_IDLE_STATE_FP) == 0) | |
918 | seq_puts(m, "\t FP is not idle\n"); | |
919 | if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) | |
920 | seq_puts(m, "\t TS is not idle\n"); | |
921 | if (idle & VIVS_HI_IDLE_STATE_AXI_LP) | |
922 | seq_puts(m, "\t AXI low power mode\n"); | |
923 | ||
924 | if (gpu->identity.features & chipFeatures_DEBUG_MODE) { | |
925 | u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0); | |
926 | u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1); | |
927 | u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE); | |
928 | ||
929 | seq_puts(m, "\tMC\n"); | |
930 | seq_printf(m, "\t read0: 0x%08x\n", read0); | |
931 | seq_printf(m, "\t read1: 0x%08x\n", read1); | |
932 | seq_printf(m, "\t write: 0x%08x\n", write); | |
933 | } | |
934 | ||
935 | seq_puts(m, "\tDMA "); | |
936 | ||
937 | if (debug.address[0] == debug.address[1] && | |
938 | debug.state[0] == debug.state[1]) { | |
939 | seq_puts(m, "seems to be stuck\n"); | |
940 | } else if (debug.address[0] == debug.address[1]) { | |
c01e0159 | 941 | seq_puts(m, "address is constant\n"); |
a8c21a54 | 942 | } else { |
c01e0159 | 943 | seq_puts(m, "is running\n"); |
a8c21a54 T |
944 | } |
945 | ||
946 | seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]); | |
947 | seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]); | |
948 | seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]); | |
949 | seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]); | |
950 | seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n", | |
951 | dma_lo, dma_hi); | |
952 | ||
953 | ret = 0; | |
954 | ||
955 | pm_runtime_mark_last_busy(gpu->dev); | |
956 | pm_runtime_put_autosuspend(gpu->dev); | |
957 | ||
958 | return ret; | |
959 | } | |
960 | #endif | |
961 | ||
6d7a20c0 | 962 | void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) |
a8c21a54 | 963 | { |
a8c21a54 | 964 | unsigned long flags; |
355502e0 | 965 | unsigned int i = 0; |
a8c21a54 | 966 | |
6d7a20c0 | 967 | dev_err(gpu->dev, "recover hung GPU!\n"); |
a8c21a54 T |
968 | |
969 | if (pm_runtime_get_sync(gpu->dev) < 0) | |
970 | return; | |
971 | ||
972 | mutex_lock(&gpu->lock); | |
973 | ||
a8c21a54 T |
974 | etnaviv_hw_reset(gpu); |
975 | ||
976 | /* complete all events, the GPU won't do it after the reset */ | |
977 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
6d7a20c0 | 978 | for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) |
a8c21a54 | 979 | complete(&gpu->event_free); |
355502e0 | 980 | bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); |
a8c21a54 T |
981 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); |
982 | gpu->completed_fence = gpu->active_fence; | |
983 | ||
984 | etnaviv_gpu_hw_init(gpu); | |
1b94a9b7 | 985 | gpu->lastctx = NULL; |
f6086311 | 986 | gpu->exec_state = -1; |
a8c21a54 T |
987 | |
988 | mutex_unlock(&gpu->lock); | |
989 | pm_runtime_mark_last_busy(gpu->dev); | |
990 | pm_runtime_put_autosuspend(gpu->dev); | |
a8c21a54 T |
991 | } |
992 | ||
a8c21a54 T |
993 | /* fence object management */ |
994 | struct etnaviv_fence { | |
995 | struct etnaviv_gpu *gpu; | |
f54d1867 | 996 | struct dma_fence base; |
a8c21a54 T |
997 | }; |
998 | ||
f54d1867 | 999 | static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence) |
a8c21a54 T |
1000 | { |
1001 | return container_of(fence, struct etnaviv_fence, base); | |
1002 | } | |
1003 | ||
f54d1867 | 1004 | static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence) |
a8c21a54 T |
1005 | { |
1006 | return "etnaviv"; | |
1007 | } | |
1008 | ||
f54d1867 | 1009 | static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence) |
a8c21a54 T |
1010 | { |
1011 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
1012 | ||
1013 | return dev_name(f->gpu->dev); | |
1014 | } | |
1015 | ||
f54d1867 | 1016 | static bool etnaviv_fence_enable_signaling(struct dma_fence *fence) |
a8c21a54 T |
1017 | { |
1018 | return true; | |
1019 | } | |
1020 | ||
f54d1867 | 1021 | static bool etnaviv_fence_signaled(struct dma_fence *fence) |
a8c21a54 T |
1022 | { |
1023 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
1024 | ||
1025 | return fence_completed(f->gpu, f->base.seqno); | |
1026 | } | |
1027 | ||
f54d1867 | 1028 | static void etnaviv_fence_release(struct dma_fence *fence) |
a8c21a54 T |
1029 | { |
1030 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
1031 | ||
1032 | kfree_rcu(f, base.rcu); | |
1033 | } | |
1034 | ||
f54d1867 | 1035 | static const struct dma_fence_ops etnaviv_fence_ops = { |
a8c21a54 T |
1036 | .get_driver_name = etnaviv_fence_get_driver_name, |
1037 | .get_timeline_name = etnaviv_fence_get_timeline_name, | |
1038 | .enable_signaling = etnaviv_fence_enable_signaling, | |
1039 | .signaled = etnaviv_fence_signaled, | |
f54d1867 | 1040 | .wait = dma_fence_default_wait, |
a8c21a54 T |
1041 | .release = etnaviv_fence_release, |
1042 | }; | |
1043 | ||
f54d1867 | 1044 | static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) |
a8c21a54 T |
1045 | { |
1046 | struct etnaviv_fence *f; | |
1047 | ||
b27734c2 LS |
1048 | /* |
1049 | * GPU lock must already be held, otherwise fence completion order might | |
1050 | * not match the seqno order assigned here. | |
1051 | */ | |
1052 | lockdep_assert_held(&gpu->lock); | |
1053 | ||
a8c21a54 T |
1054 | f = kzalloc(sizeof(*f), GFP_KERNEL); |
1055 | if (!f) | |
1056 | return NULL; | |
1057 | ||
1058 | f->gpu = gpu; | |
1059 | ||
f54d1867 CW |
1060 | dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, |
1061 | gpu->fence_context, ++gpu->next_fence); | |
a8c21a54 T |
1062 | |
1063 | return &f->base; | |
1064 | } | |
1065 | ||
a8c21a54 T |
1066 | /* |
1067 | * event management: | |
1068 | */ | |
1069 | ||
95a428c1 CG |
1070 | static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, |
1071 | unsigned int *events) | |
a8c21a54 | 1072 | { |
95a428c1 CG |
1073 | unsigned long flags, timeout = msecs_to_jiffies(10 * 10000); |
1074 | unsigned i, acquired = 0; | |
a8c21a54 | 1075 | |
95a428c1 CG |
1076 | for (i = 0; i < nr_events; i++) { |
1077 | unsigned long ret; | |
a8c21a54 | 1078 | |
95a428c1 | 1079 | ret = wait_for_completion_timeout(&gpu->event_free, timeout); |
a8c21a54 | 1080 | |
95a428c1 CG |
1081 | if (!ret) { |
1082 | dev_err(gpu->dev, "wait_for_completion_timeout failed"); | |
1083 | goto out; | |
a8c21a54 | 1084 | } |
95a428c1 CG |
1085 | |
1086 | acquired++; | |
1087 | timeout = ret; | |
1088 | } | |
a8c21a54 T |
1089 | |
1090 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
1091 | ||
95a428c1 CG |
1092 | for (i = 0; i < nr_events; i++) { |
1093 | int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS); | |
1094 | ||
1095 | events[i] = event; | |
547d340d | 1096 | memset(&gpu->event[event], 0, sizeof(struct etnaviv_event)); |
355502e0 | 1097 | set_bit(event, gpu->event_bitmap); |
a8c21a54 T |
1098 | } |
1099 | ||
1100 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1101 | ||
95a428c1 CG |
1102 | return 0; |
1103 | ||
1104 | out: | |
1105 | for (i = 0; i < acquired; i++) | |
1106 | complete(&gpu->event_free); | |
1107 | ||
1108 | return -EBUSY; | |
a8c21a54 T |
1109 | } |
1110 | ||
1111 | static void event_free(struct etnaviv_gpu *gpu, unsigned int event) | |
1112 | { | |
1113 | unsigned long flags; | |
1114 | ||
1115 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
1116 | ||
355502e0 | 1117 | if (!test_bit(event, gpu->event_bitmap)) { |
a8c21a54 T |
1118 | dev_warn(gpu->dev, "event %u is already marked as free", |
1119 | event); | |
1120 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1121 | } else { | |
355502e0 | 1122 | clear_bit(event, gpu->event_bitmap); |
a8c21a54 T |
1123 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); |
1124 | ||
1125 | complete(&gpu->event_free); | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | /* | |
1130 | * Cmdstream submission/retirement: | |
1131 | */ | |
a8c21a54 | 1132 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, |
8bc4d885 | 1133 | u32 id, struct timespec *timeout) |
a8c21a54 | 1134 | { |
8bc4d885 | 1135 | struct dma_fence *fence; |
a8c21a54 T |
1136 | int ret; |
1137 | ||
8bc4d885 | 1138 | /* |
e93b6dee | 1139 | * Look up the fence and take a reference. We might still find a fence |
8bc4d885 LS |
1140 | * whose refcount has already dropped to zero. dma_fence_get_rcu |
1141 | * pretends we didn't find a fence in that case. | |
1142 | */ | |
e93b6dee | 1143 | rcu_read_lock(); |
8bc4d885 LS |
1144 | fence = idr_find(&gpu->fence_idr, id); |
1145 | if (fence) | |
1146 | fence = dma_fence_get_rcu(fence); | |
e93b6dee | 1147 | rcu_read_unlock(); |
8bc4d885 LS |
1148 | |
1149 | if (!fence) | |
1150 | return 0; | |
a8c21a54 T |
1151 | |
1152 | if (!timeout) { | |
1153 | /* No timeout was requested: just test for completion */ | |
8bc4d885 | 1154 | ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; |
a8c21a54 T |
1155 | } else { |
1156 | unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); | |
1157 | ||
8bc4d885 LS |
1158 | ret = dma_fence_wait_timeout(fence, true, remaining); |
1159 | if (ret == 0) | |
a8c21a54 | 1160 | ret = -ETIMEDOUT; |
8bc4d885 | 1161 | else if (ret != -ERESTARTSYS) |
a8c21a54 | 1162 | ret = 0; |
8bc4d885 | 1163 | |
a8c21a54 T |
1164 | } |
1165 | ||
8bc4d885 | 1166 | dma_fence_put(fence); |
a8c21a54 T |
1167 | return ret; |
1168 | } | |
1169 | ||
1170 | /* | |
1171 | * Wait for an object to become inactive. This, on it's own, is not race | |
e93b6dee | 1172 | * free: the object is moved by the scheduler off the active list, and |
a8c21a54 T |
1173 | * then the iova is put. Moreover, the object could be re-submitted just |
1174 | * after we notice that it's become inactive. | |
1175 | * | |
1176 | * Although the retirement happens under the gpu lock, we don't want to hold | |
1177 | * that lock in this function while waiting. | |
1178 | */ | |
1179 | int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, | |
1180 | struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout) | |
1181 | { | |
1182 | unsigned long remaining; | |
1183 | long ret; | |
1184 | ||
1185 | if (!timeout) | |
1186 | return !is_active(etnaviv_obj) ? 0 : -EBUSY; | |
1187 | ||
1188 | remaining = etnaviv_timeout_to_jiffies(timeout); | |
1189 | ||
1190 | ret = wait_event_interruptible_timeout(gpu->fence_event, | |
1191 | !is_active(etnaviv_obj), | |
1192 | remaining); | |
fa67ac84 | 1193 | if (ret > 0) |
a8c21a54 | 1194 | return 0; |
fa67ac84 | 1195 | else if (ret == -ERESTARTSYS) |
a8c21a54 | 1196 | return -ERESTARTSYS; |
fa67ac84 | 1197 | else |
a8c21a54 | 1198 | return -ETIMEDOUT; |
a8c21a54 T |
1199 | } |
1200 | ||
68dc0b29 CG |
1201 | static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu, |
1202 | struct etnaviv_event *event, unsigned int flags) | |
1203 | { | |
ef146c00 | 1204 | const struct etnaviv_gem_submit *submit = event->submit; |
68dc0b29 CG |
1205 | unsigned int i; |
1206 | ||
ef146c00 LS |
1207 | for (i = 0; i < submit->nr_pmrs; i++) { |
1208 | const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; | |
68dc0b29 CG |
1209 | |
1210 | if (pmr->flags == flags) | |
7a9c0fe2 | 1211 | etnaviv_perfmon_process(gpu, pmr, submit->exec_state); |
68dc0b29 CG |
1212 | } |
1213 | } | |
1214 | ||
1215 | static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, | |
1216 | struct etnaviv_event *event) | |
1217 | { | |
2c8b0c5a CG |
1218 | u32 val; |
1219 | ||
1220 | /* disable clock gating */ | |
1221 | val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); | |
1222 | val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; | |
1223 | gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); | |
1224 | ||
04a7d18d CG |
1225 | /* enable debug register */ |
1226 | val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
1227 | val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; | |
1228 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); | |
1229 | ||
68dc0b29 CG |
1230 | sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); |
1231 | } | |
1232 | ||
1233 | static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, | |
1234 | struct etnaviv_event *event) | |
1235 | { | |
ef146c00 | 1236 | const struct etnaviv_gem_submit *submit = event->submit; |
68dc0b29 | 1237 | unsigned int i; |
2c8b0c5a | 1238 | u32 val; |
68dc0b29 CG |
1239 | |
1240 | sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); | |
1241 | ||
ef146c00 LS |
1242 | for (i = 0; i < submit->nr_pmrs; i++) { |
1243 | const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; | |
68dc0b29 CG |
1244 | |
1245 | *pmr->bo_vma = pmr->sequence; | |
1246 | } | |
2c8b0c5a | 1247 | |
04a7d18d CG |
1248 | /* disable debug register */ |
1249 | val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
1250 | val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; | |
1251 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); | |
1252 | ||
2c8b0c5a CG |
1253 | /* enable clock gating */ |
1254 | val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); | |
1255 | val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; | |
1256 | gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); | |
68dc0b29 CG |
1257 | } |
1258 | ||
1259 | ||
a8c21a54 | 1260 | /* add bo's to gpu's ring, and kick gpu: */ |
e93b6dee | 1261 | struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) |
a8c21a54 | 1262 | { |
e93b6dee LS |
1263 | struct etnaviv_gpu *gpu = submit->gpu; |
1264 | struct dma_fence *gpu_fence; | |
68dc0b29 | 1265 | unsigned int i, nr_events = 1, event[3]; |
a8c21a54 T |
1266 | int ret; |
1267 | ||
6d7a20c0 LS |
1268 | if (!submit->runtime_resumed) { |
1269 | ret = pm_runtime_get_sync(gpu->dev); | |
1270 | if (ret < 0) | |
1271 | return NULL; | |
1272 | submit->runtime_resumed = true; | |
1273 | } | |
a8c21a54 | 1274 | |
68dc0b29 CG |
1275 | /* |
1276 | * if there are performance monitor requests we need to have | |
1277 | * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE | |
1278 | * requests. | |
1279 | * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests | |
1280 | * and update the sequence number for userspace. | |
1281 | */ | |
ef146c00 | 1282 | if (submit->nr_pmrs) |
68dc0b29 CG |
1283 | nr_events = 3; |
1284 | ||
1285 | ret = event_alloc(gpu, nr_events, event); | |
95a428c1 | 1286 | if (ret) { |
68dc0b29 | 1287 | DRM_ERROR("no free events\n"); |
e93b6dee | 1288 | return NULL; |
a8c21a54 T |
1289 | } |
1290 | ||
f3cd1b06 LS |
1291 | mutex_lock(&gpu->lock); |
1292 | ||
e93b6dee LS |
1293 | gpu_fence = etnaviv_gpu_fence_alloc(gpu); |
1294 | if (!gpu_fence) { | |
68dc0b29 CG |
1295 | for (i = 0; i < nr_events; i++) |
1296 | event_free(gpu, event[i]); | |
1297 | ||
45abdf35 | 1298 | goto out_unlock; |
a8c21a54 T |
1299 | } |
1300 | ||
e93b6dee | 1301 | gpu->active_fence = gpu_fence->seqno; |
a8c21a54 | 1302 | |
ef146c00 | 1303 | if (submit->nr_pmrs) { |
68dc0b29 | 1304 | gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; |
ef146c00 LS |
1305 | kref_get(&submit->refcount); |
1306 | gpu->event[event[1]].submit = submit; | |
68dc0b29 CG |
1307 | etnaviv_sync_point_queue(gpu, event[1]); |
1308 | } | |
1309 | ||
e93b6dee | 1310 | gpu->event[event[0]].fence = gpu_fence; |
6d7a20c0 | 1311 | submit->cmdbuf.user_size = submit->cmdbuf.size - 8; |
2f9225db LS |
1312 | etnaviv_buffer_queue(gpu, submit->exec_state, event[0], |
1313 | &submit->cmdbuf); | |
68dc0b29 | 1314 | |
ef146c00 | 1315 | if (submit->nr_pmrs) { |
68dc0b29 | 1316 | gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post; |
ef146c00 LS |
1317 | kref_get(&submit->refcount); |
1318 | gpu->event[event[2]].submit = submit; | |
68dc0b29 CG |
1319 | etnaviv_sync_point_queue(gpu, event[2]); |
1320 | } | |
a8c21a54 | 1321 | |
45abdf35 | 1322 | out_unlock: |
a8c21a54 T |
1323 | mutex_unlock(&gpu->lock); |
1324 | ||
e93b6dee | 1325 | return gpu_fence; |
a8c21a54 T |
1326 | } |
1327 | ||
357713ce CG |
1328 | static void sync_point_worker(struct work_struct *work) |
1329 | { | |
1330 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, | |
1331 | sync_point_work); | |
b9a48aa7 LS |
1332 | struct etnaviv_event *event = &gpu->event[gpu->sync_point_event]; |
1333 | u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
357713ce | 1334 | |
b9a48aa7 | 1335 | event->sync_point(gpu, event); |
ef146c00 | 1336 | etnaviv_submit_put(event->submit); |
357713ce | 1337 | event_free(gpu, gpu->sync_point_event); |
b9a48aa7 LS |
1338 | |
1339 | /* restart FE last to avoid GPU and IRQ racing against this worker */ | |
1340 | etnaviv_gpu_start_fe(gpu, addr + 2, 2); | |
357713ce CG |
1341 | } |
1342 | ||
4df3000e LS |
1343 | static void dump_mmu_fault(struct etnaviv_gpu *gpu) |
1344 | { | |
1345 | u32 status = gpu_read(gpu, VIVS_MMUv2_STATUS); | |
1346 | int i; | |
1347 | ||
1348 | dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status); | |
1349 | ||
1350 | for (i = 0; i < 4; i++) { | |
1351 | if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4)))) | |
1352 | continue; | |
1353 | ||
1354 | dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i, | |
1355 | gpu_read(gpu, VIVS_MMUv2_EXCEPTION_ADDR(i))); | |
1356 | } | |
1357 | } | |
1358 | ||
a8c21a54 T |
1359 | static irqreturn_t irq_handler(int irq, void *data) |
1360 | { | |
1361 | struct etnaviv_gpu *gpu = data; | |
1362 | irqreturn_t ret = IRQ_NONE; | |
1363 | ||
1364 | u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); | |
1365 | ||
1366 | if (intr != 0) { | |
1367 | int event; | |
1368 | ||
1369 | pm_runtime_mark_last_busy(gpu->dev); | |
1370 | ||
1371 | dev_dbg(gpu->dev, "intr 0x%08x\n", intr); | |
1372 | ||
1373 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) { | |
1374 | dev_err(gpu->dev, "AXI bus error\n"); | |
1375 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR; | |
1376 | } | |
1377 | ||
128a9b1d | 1378 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) { |
4df3000e | 1379 | dump_mmu_fault(gpu); |
128a9b1d LS |
1380 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION; |
1381 | } | |
1382 | ||
a8c21a54 | 1383 | while ((event = ffs(intr)) != 0) { |
f54d1867 | 1384 | struct dma_fence *fence; |
a8c21a54 T |
1385 | |
1386 | event -= 1; | |
1387 | ||
1388 | intr &= ~(1 << event); | |
1389 | ||
1390 | dev_dbg(gpu->dev, "event %u\n", event); | |
1391 | ||
357713ce CG |
1392 | if (gpu->event[event].sync_point) { |
1393 | gpu->sync_point_event = event; | |
a7790d78 | 1394 | queue_work(gpu->wq, &gpu->sync_point_work); |
357713ce CG |
1395 | } |
1396 | ||
a8c21a54 | 1397 | fence = gpu->event[event].fence; |
68dc0b29 CG |
1398 | if (!fence) |
1399 | continue; | |
1400 | ||
a8c21a54 | 1401 | gpu->event[event].fence = NULL; |
a8c21a54 T |
1402 | |
1403 | /* | |
1404 | * Events can be processed out of order. Eg, | |
1405 | * - allocate and queue event 0 | |
1406 | * - allocate event 1 | |
1407 | * - event 0 completes, we process it | |
1408 | * - allocate and queue event 0 | |
1409 | * - event 1 and event 0 complete | |
1410 | * we can end up processing event 0 first, then 1. | |
1411 | */ | |
1412 | if (fence_after(fence->seqno, gpu->completed_fence)) | |
1413 | gpu->completed_fence = fence->seqno; | |
8bc4d885 | 1414 | dma_fence_signal(fence); |
a8c21a54 T |
1415 | |
1416 | event_free(gpu, event); | |
a8c21a54 T |
1417 | } |
1418 | ||
a8c21a54 T |
1419 | ret = IRQ_HANDLED; |
1420 | } | |
1421 | ||
1422 | return ret; | |
1423 | } | |
1424 | ||
1425 | static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) | |
1426 | { | |
1427 | int ret; | |
1428 | ||
65f037e8 LS |
1429 | if (gpu->clk_reg) { |
1430 | ret = clk_prepare_enable(gpu->clk_reg); | |
1431 | if (ret) | |
1432 | return ret; | |
1433 | } | |
1434 | ||
9c7310c0 LS |
1435 | if (gpu->clk_bus) { |
1436 | ret = clk_prepare_enable(gpu->clk_bus); | |
1437 | if (ret) | |
1438 | return ret; | |
1439 | } | |
a8c21a54 | 1440 | |
9c7310c0 LS |
1441 | if (gpu->clk_core) { |
1442 | ret = clk_prepare_enable(gpu->clk_core); | |
1443 | if (ret) | |
1444 | goto disable_clk_bus; | |
1445 | } | |
1446 | ||
1447 | if (gpu->clk_shader) { | |
1448 | ret = clk_prepare_enable(gpu->clk_shader); | |
1449 | if (ret) | |
1450 | goto disable_clk_core; | |
a8c21a54 T |
1451 | } |
1452 | ||
1453 | return 0; | |
9c7310c0 LS |
1454 | |
1455 | disable_clk_core: | |
1456 | if (gpu->clk_core) | |
1457 | clk_disable_unprepare(gpu->clk_core); | |
1458 | disable_clk_bus: | |
1459 | if (gpu->clk_bus) | |
1460 | clk_disable_unprepare(gpu->clk_bus); | |
1461 | ||
1462 | return ret; | |
a8c21a54 T |
1463 | } |
1464 | ||
1465 | static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu) | |
1466 | { | |
9c7310c0 LS |
1467 | if (gpu->clk_shader) |
1468 | clk_disable_unprepare(gpu->clk_shader); | |
1469 | if (gpu->clk_core) | |
1470 | clk_disable_unprepare(gpu->clk_core); | |
1471 | if (gpu->clk_bus) | |
1472 | clk_disable_unprepare(gpu->clk_bus); | |
65f037e8 LS |
1473 | if (gpu->clk_reg) |
1474 | clk_disable_unprepare(gpu->clk_reg); | |
a8c21a54 T |
1475 | |
1476 | return 0; | |
1477 | } | |
1478 | ||
b88163e3 LS |
1479 | int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) |
1480 | { | |
1481 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
1482 | ||
1483 | do { | |
1484 | u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
1485 | ||
1486 | if ((idle & gpu->idle_mask) == gpu->idle_mask) | |
1487 | return 0; | |
1488 | ||
1489 | if (time_is_before_jiffies(timeout)) { | |
1490 | dev_warn(gpu->dev, | |
1491 | "timed out waiting for idle: idle=0x%x\n", | |
1492 | idle); | |
1493 | return -ETIMEDOUT; | |
1494 | } | |
1495 | ||
1496 | udelay(5); | |
1497 | } while (1); | |
1498 | } | |
1499 | ||
a8c21a54 T |
1500 | static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) |
1501 | { | |
2f9225db | 1502 | if (gpu->buffer.suballoc) { |
a8c21a54 | 1503 | /* Replace the last WAIT with END */ |
40c27bde | 1504 | mutex_lock(&gpu->lock); |
a8c21a54 | 1505 | etnaviv_buffer_end(gpu); |
40c27bde | 1506 | mutex_unlock(&gpu->lock); |
a8c21a54 T |
1507 | |
1508 | /* | |
1509 | * We know that only the FE is busy here, this should | |
1510 | * happen quickly (as the WAIT is only 200 cycles). If | |
1511 | * we fail, just warn and continue. | |
1512 | */ | |
b88163e3 | 1513 | etnaviv_gpu_wait_idle(gpu, 100); |
a8c21a54 T |
1514 | } |
1515 | ||
1516 | return etnaviv_gpu_clk_disable(gpu); | |
1517 | } | |
1518 | ||
1519 | #ifdef CONFIG_PM | |
1520 | static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) | |
1521 | { | |
a8c21a54 T |
1522 | int ret; |
1523 | ||
1524 | ret = mutex_lock_killable(&gpu->lock); | |
1525 | if (ret) | |
1526 | return ret; | |
1527 | ||
bcdfb5e5 | 1528 | etnaviv_gpu_update_clock(gpu); |
a8c21a54 T |
1529 | etnaviv_gpu_hw_init(gpu); |
1530 | ||
4375ffff | 1531 | gpu->lastctx = NULL; |
f6086311 | 1532 | gpu->exec_state = -1; |
a8c21a54 T |
1533 | |
1534 | mutex_unlock(&gpu->lock); | |
1535 | ||
1536 | return 0; | |
1537 | } | |
1538 | #endif | |
1539 | ||
bcdfb5e5 RK |
1540 | static int |
1541 | etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev, | |
1542 | unsigned long *state) | |
1543 | { | |
1544 | *state = 6; | |
1545 | ||
1546 | return 0; | |
1547 | } | |
1548 | ||
1549 | static int | |
1550 | etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev, | |
1551 | unsigned long *state) | |
1552 | { | |
1553 | struct etnaviv_gpu *gpu = cdev->devdata; | |
1554 | ||
1555 | *state = gpu->freq_scale; | |
1556 | ||
1557 | return 0; | |
1558 | } | |
1559 | ||
1560 | static int | |
1561 | etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev, | |
1562 | unsigned long state) | |
1563 | { | |
1564 | struct etnaviv_gpu *gpu = cdev->devdata; | |
1565 | ||
1566 | mutex_lock(&gpu->lock); | |
1567 | gpu->freq_scale = state; | |
1568 | if (!pm_runtime_suspended(gpu->dev)) | |
1569 | etnaviv_gpu_update_clock(gpu); | |
1570 | mutex_unlock(&gpu->lock); | |
1571 | ||
1572 | return 0; | |
1573 | } | |
1574 | ||
1575 | static struct thermal_cooling_device_ops cooling_ops = { | |
1576 | .get_max_state = etnaviv_gpu_cooling_get_max_state, | |
1577 | .get_cur_state = etnaviv_gpu_cooling_get_cur_state, | |
1578 | .set_cur_state = etnaviv_gpu_cooling_set_cur_state, | |
1579 | }; | |
1580 | ||
a8c21a54 T |
1581 | static int etnaviv_gpu_bind(struct device *dev, struct device *master, |
1582 | void *data) | |
1583 | { | |
1584 | struct drm_device *drm = data; | |
1585 | struct etnaviv_drm_private *priv = drm->dev_private; | |
1586 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1587 | int ret; | |
1588 | ||
49b82c38 | 1589 | if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) { |
5247e2aa | 1590 | gpu->cooling = thermal_of_cooling_device_register(dev->of_node, |
bcdfb5e5 | 1591 | (char *)dev_name(dev), gpu, &cooling_ops); |
5247e2aa LS |
1592 | if (IS_ERR(gpu->cooling)) |
1593 | return PTR_ERR(gpu->cooling); | |
1594 | } | |
bcdfb5e5 | 1595 | |
a7790d78 LS |
1596 | gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0); |
1597 | if (!gpu->wq) { | |
e93b6dee LS |
1598 | ret = -ENOMEM; |
1599 | goto out_thermal; | |
a7790d78 LS |
1600 | } |
1601 | ||
e93b6dee LS |
1602 | ret = etnaviv_sched_init(gpu); |
1603 | if (ret) | |
1604 | goto out_workqueue; | |
1605 | ||
a8c21a54 T |
1606 | #ifdef CONFIG_PM |
1607 | ret = pm_runtime_get_sync(gpu->dev); | |
1608 | #else | |
1609 | ret = etnaviv_gpu_clk_enable(gpu); | |
1610 | #endif | |
e93b6dee LS |
1611 | if (ret < 0) |
1612 | goto out_sched; | |
1613 | ||
a8c21a54 T |
1614 | |
1615 | gpu->drm = drm; | |
f54d1867 | 1616 | gpu->fence_context = dma_fence_context_alloc(1); |
8bc4d885 | 1617 | idr_init(&gpu->fence_idr); |
a8c21a54 T |
1618 | spin_lock_init(&gpu->fence_spinlock); |
1619 | ||
357713ce | 1620 | INIT_WORK(&gpu->sync_point_work, sync_point_worker); |
a8c21a54 T |
1621 | init_waitqueue_head(&gpu->fence_event); |
1622 | ||
a8c21a54 T |
1623 | priv->gpu[priv->num_gpus++] = gpu; |
1624 | ||
1625 | pm_runtime_mark_last_busy(gpu->dev); | |
1626 | pm_runtime_put_autosuspend(gpu->dev); | |
1627 | ||
1628 | return 0; | |
e93b6dee LS |
1629 | |
1630 | out_sched: | |
1631 | etnaviv_sched_fini(gpu); | |
1632 | ||
1633 | out_workqueue: | |
1634 | destroy_workqueue(gpu->wq); | |
1635 | ||
1636 | out_thermal: | |
1637 | if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) | |
1638 | thermal_cooling_device_unregister(gpu->cooling); | |
1639 | ||
1640 | return ret; | |
a8c21a54 T |
1641 | } |
1642 | ||
1643 | static void etnaviv_gpu_unbind(struct device *dev, struct device *master, | |
1644 | void *data) | |
1645 | { | |
1646 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1647 | ||
1648 | DBG("%s", dev_name(gpu->dev)); | |
1649 | ||
a7790d78 LS |
1650 | flush_workqueue(gpu->wq); |
1651 | destroy_workqueue(gpu->wq); | |
1652 | ||
e93b6dee LS |
1653 | etnaviv_sched_fini(gpu); |
1654 | ||
a8c21a54 T |
1655 | #ifdef CONFIG_PM |
1656 | pm_runtime_get_sync(gpu->dev); | |
1657 | pm_runtime_put_sync_suspend(gpu->dev); | |
1658 | #else | |
1659 | etnaviv_gpu_hw_suspend(gpu); | |
1660 | #endif | |
1661 | ||
2f9225db LS |
1662 | if (gpu->buffer.suballoc) |
1663 | etnaviv_cmdbuf_free(&gpu->buffer); | |
a8c21a54 | 1664 | |
e66774dd LS |
1665 | if (gpu->cmdbuf_suballoc) { |
1666 | etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc); | |
1667 | gpu->cmdbuf_suballoc = NULL; | |
1668 | } | |
1669 | ||
a8c21a54 T |
1670 | if (gpu->mmu) { |
1671 | etnaviv_iommu_destroy(gpu->mmu); | |
1672 | gpu->mmu = NULL; | |
1673 | } | |
1674 | ||
1675 | gpu->drm = NULL; | |
8bc4d885 | 1676 | idr_destroy(&gpu->fence_idr); |
bcdfb5e5 | 1677 | |
49b82c38 PZ |
1678 | if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) |
1679 | thermal_cooling_device_unregister(gpu->cooling); | |
bcdfb5e5 | 1680 | gpu->cooling = NULL; |
a8c21a54 T |
1681 | } |
1682 | ||
1683 | static const struct component_ops gpu_ops = { | |
1684 | .bind = etnaviv_gpu_bind, | |
1685 | .unbind = etnaviv_gpu_unbind, | |
1686 | }; | |
1687 | ||
1688 | static const struct of_device_id etnaviv_gpu_match[] = { | |
1689 | { | |
1690 | .compatible = "vivante,gc" | |
1691 | }, | |
1692 | { /* sentinel */ } | |
1693 | }; | |
246774d1 | 1694 | MODULE_DEVICE_TABLE(of, etnaviv_gpu_match); |
a8c21a54 T |
1695 | |
1696 | static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | |
1697 | { | |
1698 | struct device *dev = &pdev->dev; | |
1699 | struct etnaviv_gpu *gpu; | |
dc227890 | 1700 | int err; |
a8c21a54 T |
1701 | |
1702 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); | |
1703 | if (!gpu) | |
1704 | return -ENOMEM; | |
1705 | ||
1706 | gpu->dev = &pdev->dev; | |
1707 | mutex_init(&gpu->lock); | |
e93b6dee | 1708 | mutex_init(&gpu->fence_idr_lock); |
a8c21a54 | 1709 | |
a8c21a54 T |
1710 | /* Map registers: */ |
1711 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); | |
1712 | if (IS_ERR(gpu->mmio)) | |
1713 | return PTR_ERR(gpu->mmio); | |
1714 | ||
1715 | /* Get Interrupt: */ | |
1716 | gpu->irq = platform_get_irq(pdev, 0); | |
1717 | if (gpu->irq < 0) { | |
db60eda3 FE |
1718 | dev_err(dev, "failed to get irq: %d\n", gpu->irq); |
1719 | return gpu->irq; | |
a8c21a54 T |
1720 | } |
1721 | ||
1722 | err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, | |
1723 | dev_name(gpu->dev), gpu); | |
1724 | if (err) { | |
1725 | dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); | |
db60eda3 | 1726 | return err; |
a8c21a54 T |
1727 | } |
1728 | ||
1729 | /* Get Clocks: */ | |
65f037e8 LS |
1730 | gpu->clk_reg = devm_clk_get(&pdev->dev, "reg"); |
1731 | DBG("clk_reg: %p", gpu->clk_reg); | |
1732 | if (IS_ERR(gpu->clk_reg)) | |
1733 | gpu->clk_reg = NULL; | |
1734 | ||
a8c21a54 T |
1735 | gpu->clk_bus = devm_clk_get(&pdev->dev, "bus"); |
1736 | DBG("clk_bus: %p", gpu->clk_bus); | |
1737 | if (IS_ERR(gpu->clk_bus)) | |
1738 | gpu->clk_bus = NULL; | |
1739 | ||
1740 | gpu->clk_core = devm_clk_get(&pdev->dev, "core"); | |
1741 | DBG("clk_core: %p", gpu->clk_core); | |
1742 | if (IS_ERR(gpu->clk_core)) | |
1743 | gpu->clk_core = NULL; | |
d79fd1cc | 1744 | gpu->base_rate_core = clk_get_rate(gpu->clk_core); |
a8c21a54 T |
1745 | |
1746 | gpu->clk_shader = devm_clk_get(&pdev->dev, "shader"); | |
1747 | DBG("clk_shader: %p", gpu->clk_shader); | |
1748 | if (IS_ERR(gpu->clk_shader)) | |
1749 | gpu->clk_shader = NULL; | |
d79fd1cc | 1750 | gpu->base_rate_shader = clk_get_rate(gpu->clk_shader); |
a8c21a54 T |
1751 | |
1752 | /* TODO: figure out max mapped size */ | |
1753 | dev_set_drvdata(dev, gpu); | |
1754 | ||
1755 | /* | |
1756 | * We treat the device as initially suspended. The runtime PM | |
1757 | * autosuspend delay is rather arbitary: no measurements have | |
1758 | * yet been performed to determine an appropriate value. | |
1759 | */ | |
1760 | pm_runtime_use_autosuspend(gpu->dev); | |
1761 | pm_runtime_set_autosuspend_delay(gpu->dev, 200); | |
1762 | pm_runtime_enable(gpu->dev); | |
1763 | ||
1764 | err = component_add(&pdev->dev, &gpu_ops); | |
1765 | if (err < 0) { | |
1766 | dev_err(&pdev->dev, "failed to register component: %d\n", err); | |
db60eda3 | 1767 | return err; |
a8c21a54 T |
1768 | } |
1769 | ||
1770 | return 0; | |
a8c21a54 T |
1771 | } |
1772 | ||
1773 | static int etnaviv_gpu_platform_remove(struct platform_device *pdev) | |
1774 | { | |
1775 | component_del(&pdev->dev, &gpu_ops); | |
1776 | pm_runtime_disable(&pdev->dev); | |
1777 | return 0; | |
1778 | } | |
1779 | ||
1780 | #ifdef CONFIG_PM | |
1781 | static int etnaviv_gpu_rpm_suspend(struct device *dev) | |
1782 | { | |
1783 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1784 | u32 idle, mask; | |
1785 | ||
1786 | /* If we have outstanding fences, we're not idle */ | |
1787 | if (gpu->completed_fence != gpu->active_fence) | |
1788 | return -EBUSY; | |
1789 | ||
1790 | /* Check whether the hardware (except FE) is idle */ | |
1791 | mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE; | |
1792 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; | |
1793 | if (idle != mask) | |
1794 | return -EBUSY; | |
1795 | ||
1796 | return etnaviv_gpu_hw_suspend(gpu); | |
1797 | } | |
1798 | ||
1799 | static int etnaviv_gpu_rpm_resume(struct device *dev) | |
1800 | { | |
1801 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1802 | int ret; | |
1803 | ||
1804 | ret = etnaviv_gpu_clk_enable(gpu); | |
1805 | if (ret) | |
1806 | return ret; | |
1807 | ||
1808 | /* Re-initialise the basic hardware state */ | |
2f9225db | 1809 | if (gpu->drm && gpu->buffer.suballoc) { |
a8c21a54 T |
1810 | ret = etnaviv_gpu_hw_resume(gpu); |
1811 | if (ret) { | |
1812 | etnaviv_gpu_clk_disable(gpu); | |
1813 | return ret; | |
1814 | } | |
1815 | } | |
1816 | ||
1817 | return 0; | |
1818 | } | |
1819 | #endif | |
1820 | ||
1821 | static const struct dev_pm_ops etnaviv_gpu_pm_ops = { | |
1822 | SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, | |
1823 | NULL) | |
1824 | }; | |
1825 | ||
1826 | struct platform_driver etnaviv_gpu_driver = { | |
1827 | .driver = { | |
1828 | .name = "etnaviv-gpu", | |
1829 | .owner = THIS_MODULE, | |
1830 | .pm = &etnaviv_gpu_pm_ops, | |
1831 | .of_match_table = etnaviv_gpu_match, | |
1832 | }, | |
1833 | .probe = etnaviv_gpu_platform_probe, | |
1834 | .remove = etnaviv_gpu_platform_remove, | |
1835 | .id_table = gpu_ids, | |
1836 | }; |