Commit | Line | Data |
---|---|---|
a8c21a54 T |
1 | /* |
2 | * Copyright (C) 2015 Etnaviv Project | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/component.h> | |
f54d1867 | 18 | #include <linux/dma-fence.h> |
a8c21a54 T |
19 | #include <linux/moduleparam.h> |
20 | #include <linux/of_device.h> | |
bcdfb5e5 | 21 | #include <linux/thermal.h> |
ea1f5729 LS |
22 | |
23 | #include "etnaviv_cmdbuf.h" | |
a8c21a54 T |
24 | #include "etnaviv_dump.h" |
25 | #include "etnaviv_gpu.h" | |
26 | #include "etnaviv_gem.h" | |
27 | #include "etnaviv_mmu.h" | |
a8c21a54 T |
28 | #include "common.xml.h" |
29 | #include "state.xml.h" | |
30 | #include "state_hi.xml.h" | |
31 | #include "cmdstream.xml.h" | |
32 | ||
33 | static const struct platform_device_id gpu_ids[] = { | |
34 | { .name = "etnaviv-gpu,2d" }, | |
35 | { }, | |
36 | }; | |
37 | ||
38 | static bool etnaviv_dump_core = true; | |
39 | module_param_named(dump_core, etnaviv_dump_core, bool, 0600); | |
40 | ||
41 | /* | |
42 | * Driver functions: | |
43 | */ | |
44 | ||
45 | int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) | |
46 | { | |
47 | switch (param) { | |
48 | case ETNAVIV_PARAM_GPU_MODEL: | |
49 | *value = gpu->identity.model; | |
50 | break; | |
51 | ||
52 | case ETNAVIV_PARAM_GPU_REVISION: | |
53 | *value = gpu->identity.revision; | |
54 | break; | |
55 | ||
56 | case ETNAVIV_PARAM_GPU_FEATURES_0: | |
57 | *value = gpu->identity.features; | |
58 | break; | |
59 | ||
60 | case ETNAVIV_PARAM_GPU_FEATURES_1: | |
61 | *value = gpu->identity.minor_features0; | |
62 | break; | |
63 | ||
64 | case ETNAVIV_PARAM_GPU_FEATURES_2: | |
65 | *value = gpu->identity.minor_features1; | |
66 | break; | |
67 | ||
68 | case ETNAVIV_PARAM_GPU_FEATURES_3: | |
69 | *value = gpu->identity.minor_features2; | |
70 | break; | |
71 | ||
72 | case ETNAVIV_PARAM_GPU_FEATURES_4: | |
73 | *value = gpu->identity.minor_features3; | |
74 | break; | |
75 | ||
602eb489 RK |
76 | case ETNAVIV_PARAM_GPU_FEATURES_5: |
77 | *value = gpu->identity.minor_features4; | |
78 | break; | |
79 | ||
80 | case ETNAVIV_PARAM_GPU_FEATURES_6: | |
81 | *value = gpu->identity.minor_features5; | |
82 | break; | |
83 | ||
a8c21a54 T |
84 | case ETNAVIV_PARAM_GPU_STREAM_COUNT: |
85 | *value = gpu->identity.stream_count; | |
86 | break; | |
87 | ||
88 | case ETNAVIV_PARAM_GPU_REGISTER_MAX: | |
89 | *value = gpu->identity.register_max; | |
90 | break; | |
91 | ||
92 | case ETNAVIV_PARAM_GPU_THREAD_COUNT: | |
93 | *value = gpu->identity.thread_count; | |
94 | break; | |
95 | ||
96 | case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE: | |
97 | *value = gpu->identity.vertex_cache_size; | |
98 | break; | |
99 | ||
100 | case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT: | |
101 | *value = gpu->identity.shader_core_count; | |
102 | break; | |
103 | ||
104 | case ETNAVIV_PARAM_GPU_PIXEL_PIPES: | |
105 | *value = gpu->identity.pixel_pipes; | |
106 | break; | |
107 | ||
108 | case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE: | |
109 | *value = gpu->identity.vertex_output_buffer_size; | |
110 | break; | |
111 | ||
112 | case ETNAVIV_PARAM_GPU_BUFFER_SIZE: | |
113 | *value = gpu->identity.buffer_size; | |
114 | break; | |
115 | ||
116 | case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT: | |
117 | *value = gpu->identity.instruction_count; | |
118 | break; | |
119 | ||
120 | case ETNAVIV_PARAM_GPU_NUM_CONSTANTS: | |
121 | *value = gpu->identity.num_constants; | |
122 | break; | |
123 | ||
602eb489 RK |
124 | case ETNAVIV_PARAM_GPU_NUM_VARYINGS: |
125 | *value = gpu->identity.varyings_count; | |
126 | break; | |
127 | ||
a8c21a54 T |
128 | default: |
129 | DBG("%s: invalid param: %u", dev_name(gpu->dev), param); | |
130 | return -EINVAL; | |
131 | } | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
472f79dc RK |
136 | |
137 | #define etnaviv_is_model_rev(gpu, mod, rev) \ | |
138 | ((gpu)->identity.model == chipModel_##mod && \ | |
139 | (gpu)->identity.revision == rev) | |
52f36ba1 RK |
140 | #define etnaviv_field(val, field) \ |
141 | (((val) & field##__MASK) >> field##__SHIFT) | |
142 | ||
a8c21a54 T |
143 | static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) |
144 | { | |
145 | if (gpu->identity.minor_features0 & | |
146 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | |
602eb489 RK |
147 | u32 specs[4]; |
148 | unsigned int streams; | |
a8c21a54 T |
149 | |
150 | specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); | |
151 | specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); | |
602eb489 RK |
152 | specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3); |
153 | specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4); | |
a8c21a54 | 154 | |
52f36ba1 RK |
155 | gpu->identity.stream_count = etnaviv_field(specs[0], |
156 | VIVS_HI_CHIP_SPECS_STREAM_COUNT); | |
157 | gpu->identity.register_max = etnaviv_field(specs[0], | |
158 | VIVS_HI_CHIP_SPECS_REGISTER_MAX); | |
159 | gpu->identity.thread_count = etnaviv_field(specs[0], | |
160 | VIVS_HI_CHIP_SPECS_THREAD_COUNT); | |
161 | gpu->identity.vertex_cache_size = etnaviv_field(specs[0], | |
162 | VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE); | |
163 | gpu->identity.shader_core_count = etnaviv_field(specs[0], | |
164 | VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT); | |
165 | gpu->identity.pixel_pipes = etnaviv_field(specs[0], | |
166 | VIVS_HI_CHIP_SPECS_PIXEL_PIPES); | |
a8c21a54 | 167 | gpu->identity.vertex_output_buffer_size = |
52f36ba1 RK |
168 | etnaviv_field(specs[0], |
169 | VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE); | |
170 | ||
171 | gpu->identity.buffer_size = etnaviv_field(specs[1], | |
172 | VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE); | |
173 | gpu->identity.instruction_count = etnaviv_field(specs[1], | |
174 | VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT); | |
175 | gpu->identity.num_constants = etnaviv_field(specs[1], | |
176 | VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS); | |
602eb489 RK |
177 | |
178 | gpu->identity.varyings_count = etnaviv_field(specs[2], | |
179 | VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT); | |
180 | ||
181 | /* This overrides the value from older register if non-zero */ | |
182 | streams = etnaviv_field(specs[3], | |
183 | VIVS_HI_CHIP_SPECS_4_STREAM_COUNT); | |
184 | if (streams) | |
185 | gpu->identity.stream_count = streams; | |
a8c21a54 T |
186 | } |
187 | ||
188 | /* Fill in the stream count if not specified */ | |
189 | if (gpu->identity.stream_count == 0) { | |
190 | if (gpu->identity.model >= 0x1000) | |
191 | gpu->identity.stream_count = 4; | |
192 | else | |
193 | gpu->identity.stream_count = 1; | |
194 | } | |
195 | ||
196 | /* Convert the register max value */ | |
197 | if (gpu->identity.register_max) | |
198 | gpu->identity.register_max = 1 << gpu->identity.register_max; | |
507f8991 | 199 | else if (gpu->identity.model == chipModel_GC400) |
a8c21a54 T |
200 | gpu->identity.register_max = 32; |
201 | else | |
202 | gpu->identity.register_max = 64; | |
203 | ||
204 | /* Convert thread count */ | |
205 | if (gpu->identity.thread_count) | |
206 | gpu->identity.thread_count = 1 << gpu->identity.thread_count; | |
507f8991 | 207 | else if (gpu->identity.model == chipModel_GC400) |
a8c21a54 | 208 | gpu->identity.thread_count = 64; |
507f8991 RK |
209 | else if (gpu->identity.model == chipModel_GC500 || |
210 | gpu->identity.model == chipModel_GC530) | |
a8c21a54 T |
211 | gpu->identity.thread_count = 128; |
212 | else | |
213 | gpu->identity.thread_count = 256; | |
214 | ||
215 | if (gpu->identity.vertex_cache_size == 0) | |
216 | gpu->identity.vertex_cache_size = 8; | |
217 | ||
218 | if (gpu->identity.shader_core_count == 0) { | |
219 | if (gpu->identity.model >= 0x1000) | |
220 | gpu->identity.shader_core_count = 2; | |
221 | else | |
222 | gpu->identity.shader_core_count = 1; | |
223 | } | |
224 | ||
225 | if (gpu->identity.pixel_pipes == 0) | |
226 | gpu->identity.pixel_pipes = 1; | |
227 | ||
228 | /* Convert virtex buffer size */ | |
229 | if (gpu->identity.vertex_output_buffer_size) { | |
230 | gpu->identity.vertex_output_buffer_size = | |
231 | 1 << gpu->identity.vertex_output_buffer_size; | |
507f8991 | 232 | } else if (gpu->identity.model == chipModel_GC400) { |
a8c21a54 T |
233 | if (gpu->identity.revision < 0x4000) |
234 | gpu->identity.vertex_output_buffer_size = 512; | |
235 | else if (gpu->identity.revision < 0x4200) | |
236 | gpu->identity.vertex_output_buffer_size = 256; | |
237 | else | |
238 | gpu->identity.vertex_output_buffer_size = 128; | |
239 | } else { | |
240 | gpu->identity.vertex_output_buffer_size = 512; | |
241 | } | |
242 | ||
243 | switch (gpu->identity.instruction_count) { | |
244 | case 0: | |
472f79dc | 245 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) || |
507f8991 | 246 | gpu->identity.model == chipModel_GC880) |
a8c21a54 T |
247 | gpu->identity.instruction_count = 512; |
248 | else | |
249 | gpu->identity.instruction_count = 256; | |
250 | break; | |
251 | ||
252 | case 1: | |
253 | gpu->identity.instruction_count = 1024; | |
254 | break; | |
255 | ||
256 | case 2: | |
257 | gpu->identity.instruction_count = 2048; | |
258 | break; | |
259 | ||
260 | default: | |
261 | gpu->identity.instruction_count = 256; | |
262 | break; | |
263 | } | |
264 | ||
265 | if (gpu->identity.num_constants == 0) | |
266 | gpu->identity.num_constants = 168; | |
602eb489 RK |
267 | |
268 | if (gpu->identity.varyings_count == 0) { | |
269 | if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0) | |
270 | gpu->identity.varyings_count = 12; | |
271 | else | |
272 | gpu->identity.varyings_count = 8; | |
273 | } | |
274 | ||
275 | /* | |
276 | * For some cores, two varyings are consumed for position, so the | |
277 | * maximum varying count needs to be reduced by one. | |
278 | */ | |
279 | if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) || | |
280 | etnaviv_is_model_rev(gpu, GC4000, 0x5222) || | |
281 | etnaviv_is_model_rev(gpu, GC4000, 0x5245) || | |
282 | etnaviv_is_model_rev(gpu, GC4000, 0x5208) || | |
283 | etnaviv_is_model_rev(gpu, GC3000, 0x5435) || | |
284 | etnaviv_is_model_rev(gpu, GC2200, 0x5244) || | |
285 | etnaviv_is_model_rev(gpu, GC2100, 0x5108) || | |
286 | etnaviv_is_model_rev(gpu, GC2000, 0x5108) || | |
287 | etnaviv_is_model_rev(gpu, GC1500, 0x5246) || | |
288 | etnaviv_is_model_rev(gpu, GC880, 0x5107) || | |
289 | etnaviv_is_model_rev(gpu, GC880, 0x5106)) | |
290 | gpu->identity.varyings_count -= 1; | |
a8c21a54 T |
291 | } |
292 | ||
293 | static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |
294 | { | |
295 | u32 chipIdentity; | |
296 | ||
297 | chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); | |
298 | ||
299 | /* Special case for older graphic cores. */ | |
52f36ba1 | 300 | if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) { |
507f8991 | 301 | gpu->identity.model = chipModel_GC500; |
52f36ba1 RK |
302 | gpu->identity.revision = etnaviv_field(chipIdentity, |
303 | VIVS_HI_CHIP_IDENTITY_REVISION); | |
a8c21a54 T |
304 | } else { |
305 | ||
306 | gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); | |
307 | gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); | |
308 | ||
309 | /* | |
310 | * !!!! HACK ALERT !!!! | |
311 | * Because people change device IDs without letting software | |
312 | * know about it - here is the hack to make it all look the | |
313 | * same. Only for GC400 family. | |
314 | */ | |
315 | if ((gpu->identity.model & 0xff00) == 0x0400 && | |
507f8991 | 316 | gpu->identity.model != chipModel_GC420) { |
a8c21a54 T |
317 | gpu->identity.model = gpu->identity.model & 0x0400; |
318 | } | |
319 | ||
320 | /* Another special case */ | |
472f79dc | 321 | if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) { |
a8c21a54 T |
322 | u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); |
323 | u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); | |
324 | ||
325 | if (chipDate == 0x20080814 && chipTime == 0x12051100) { | |
326 | /* | |
327 | * This IP has an ECO; put the correct | |
328 | * revision in it. | |
329 | */ | |
330 | gpu->identity.revision = 0x1051; | |
331 | } | |
332 | } | |
12ff4bde LS |
333 | |
334 | /* | |
335 | * NXP likes to call the GPU on the i.MX6QP GC2000+, but in | |
336 | * reality it's just a re-branded GC3000. We can identify this | |
337 | * core by the upper half of the revision register being all 1. | |
338 | * Fix model/rev here, so all other places can refer to this | |
339 | * core by its real identity. | |
340 | */ | |
341 | if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) { | |
342 | gpu->identity.model = chipModel_GC3000; | |
343 | gpu->identity.revision &= 0xffff; | |
344 | } | |
a8c21a54 T |
345 | } |
346 | ||
347 | dev_info(gpu->dev, "model: GC%x, revision: %x\n", | |
348 | gpu->identity.model, gpu->identity.revision); | |
349 | ||
350 | gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); | |
351 | ||
352 | /* Disable fast clear on GC700. */ | |
507f8991 | 353 | if (gpu->identity.model == chipModel_GC700) |
a8c21a54 T |
354 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; |
355 | ||
507f8991 RK |
356 | if ((gpu->identity.model == chipModel_GC500 && |
357 | gpu->identity.revision < 2) || | |
358 | (gpu->identity.model == chipModel_GC300 && | |
359 | gpu->identity.revision < 0x2000)) { | |
a8c21a54 T |
360 | |
361 | /* | |
362 | * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these | |
363 | * registers. | |
364 | */ | |
365 | gpu->identity.minor_features0 = 0; | |
366 | gpu->identity.minor_features1 = 0; | |
367 | gpu->identity.minor_features2 = 0; | |
368 | gpu->identity.minor_features3 = 0; | |
602eb489 RK |
369 | gpu->identity.minor_features4 = 0; |
370 | gpu->identity.minor_features5 = 0; | |
a8c21a54 T |
371 | } else |
372 | gpu->identity.minor_features0 = | |
373 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); | |
374 | ||
375 | if (gpu->identity.minor_features0 & | |
376 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | |
377 | gpu->identity.minor_features1 = | |
378 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1); | |
379 | gpu->identity.minor_features2 = | |
380 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); | |
381 | gpu->identity.minor_features3 = | |
382 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); | |
602eb489 RK |
383 | gpu->identity.minor_features4 = |
384 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4); | |
385 | gpu->identity.minor_features5 = | |
386 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); | |
a8c21a54 T |
387 | } |
388 | ||
389 | /* GC600 idle register reports zero bits where modules aren't present */ | |
390 | if (gpu->identity.model == chipModel_GC600) { | |
391 | gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | | |
392 | VIVS_HI_IDLE_STATE_RA | | |
393 | VIVS_HI_IDLE_STATE_SE | | |
394 | VIVS_HI_IDLE_STATE_PA | | |
395 | VIVS_HI_IDLE_STATE_SH | | |
396 | VIVS_HI_IDLE_STATE_PE | | |
397 | VIVS_HI_IDLE_STATE_DE | | |
398 | VIVS_HI_IDLE_STATE_FE; | |
399 | } else { | |
400 | gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP; | |
401 | } | |
402 | ||
403 | etnaviv_hw_specs(gpu); | |
404 | } | |
405 | ||
406 | static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) | |
407 | { | |
408 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock | | |
409 | VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD); | |
410 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); | |
411 | } | |
412 | ||
bcdfb5e5 RK |
413 | static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) |
414 | { | |
415 | unsigned int fscale = 1 << (6 - gpu->freq_scale); | |
416 | u32 clock; | |
417 | ||
418 | clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | |
419 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); | |
420 | ||
421 | etnaviv_gpu_load_clock(gpu, clock); | |
422 | } | |
423 | ||
a8c21a54 T |
424 | static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) |
425 | { | |
426 | u32 control, idle; | |
427 | unsigned long timeout; | |
428 | bool failed = true; | |
429 | ||
430 | /* TODO | |
431 | * | |
432 | * - clock gating | |
433 | * - puls eater | |
434 | * - what about VG? | |
435 | */ | |
436 | ||
437 | /* We hope that the GPU resets in under one second */ | |
438 | timeout = jiffies + msecs_to_jiffies(1000); | |
439 | ||
440 | while (time_is_after_jiffies(timeout)) { | |
a8c21a54 | 441 | /* enable clock */ |
bcdfb5e5 RK |
442 | etnaviv_gpu_update_clock(gpu); |
443 | ||
444 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
a8c21a54 T |
445 | |
446 | /* Wait for stable clock. Vivante's code waited for 1ms */ | |
447 | usleep_range(1000, 10000); | |
448 | ||
449 | /* isolate the GPU. */ | |
450 | control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | |
451 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
452 | ||
453 | /* set soft reset. */ | |
454 | control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | |
455 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
456 | ||
457 | /* wait for reset. */ | |
458 | msleep(1); | |
459 | ||
460 | /* reset soft reset bit. */ | |
461 | control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | |
462 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
463 | ||
464 | /* reset GPU isolation. */ | |
465 | control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | |
466 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
467 | ||
468 | /* read idle register. */ | |
469 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
470 | ||
471 | /* try reseting again if FE it not idle */ | |
472 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { | |
473 | dev_dbg(gpu->dev, "FE is not idle\n"); | |
474 | continue; | |
475 | } | |
476 | ||
477 | /* read reset register. */ | |
478 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
479 | ||
480 | /* is the GPU idle? */ | |
481 | if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) || | |
482 | ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) { | |
483 | dev_dbg(gpu->dev, "GPU is not idle\n"); | |
484 | continue; | |
485 | } | |
486 | ||
487 | failed = false; | |
488 | break; | |
489 | } | |
490 | ||
491 | if (failed) { | |
492 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
493 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
494 | ||
495 | dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n", | |
496 | idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ", | |
497 | control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ", | |
498 | control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not "); | |
499 | ||
500 | return -EBUSY; | |
501 | } | |
502 | ||
503 | /* We rely on the GPU running, so program the clock */ | |
bcdfb5e5 | 504 | etnaviv_gpu_update_clock(gpu); |
a8c21a54 T |
505 | |
506 | return 0; | |
507 | } | |
508 | ||
7d0c6e71 RK |
509 | static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) |
510 | { | |
511 | u32 pmc, ppc; | |
512 | ||
513 | /* enable clock gating */ | |
514 | ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); | |
515 | ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; | |
516 | ||
517 | /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ | |
518 | if (gpu->identity.revision == 0x4301 || | |
519 | gpu->identity.revision == 0x4302) | |
520 | ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; | |
521 | ||
522 | gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); | |
523 | ||
524 | pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); | |
525 | ||
526 | /* Disable PA clock gating for GC400+ except for GC420 */ | |
527 | if (gpu->identity.model >= chipModel_GC400 && | |
528 | gpu->identity.model != chipModel_GC420) | |
529 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA; | |
530 | ||
531 | /* | |
532 | * Disable PE clock gating on revs < 5.0.0.0 when HZ is | |
533 | * present without a bug fix. | |
534 | */ | |
535 | if (gpu->identity.revision < 0x5000 && | |
536 | gpu->identity.minor_features0 & chipMinorFeatures0_HZ && | |
537 | !(gpu->identity.minor_features1 & | |
538 | chipMinorFeatures1_DISABLE_PE_GATING)) | |
539 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE; | |
540 | ||
541 | if (gpu->identity.revision < 0x5422) | |
542 | pmc |= BIT(15); /* Unknown bit */ | |
543 | ||
544 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; | |
545 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; | |
546 | ||
547 | gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); | |
548 | } | |
549 | ||
229855b6 LS |
550 | void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) |
551 | { | |
552 | gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address); | |
553 | gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, | |
554 | VIVS_FE_COMMAND_CONTROL_ENABLE | | |
555 | VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch)); | |
556 | } | |
557 | ||
e17a0ded WL |
558 | static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) |
559 | { | |
560 | /* | |
561 | * Base value for VIVS_PM_PULSE_EATER register on models where it | |
562 | * cannot be read, extracted from vivante kernel driver. | |
563 | */ | |
564 | u32 pulse_eater = 0x01590880; | |
565 | ||
566 | if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) || | |
567 | etnaviv_is_model_rev(gpu, GC4000, 0x5222)) { | |
568 | pulse_eater |= BIT(23); | |
569 | ||
570 | } | |
571 | ||
572 | if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) || | |
573 | etnaviv_is_model_rev(gpu, GC1000, 0x5040)) { | |
574 | pulse_eater &= ~BIT(16); | |
575 | pulse_eater |= BIT(17); | |
576 | } | |
577 | ||
578 | if ((gpu->identity.revision > 0x5420) && | |
579 | (gpu->identity.features & chipFeatures_PIPE_3D)) | |
580 | { | |
581 | /* Performance fix: disable internal DFS */ | |
582 | pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER); | |
583 | pulse_eater |= BIT(18); | |
584 | } | |
585 | ||
586 | gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater); | |
587 | } | |
588 | ||
a8c21a54 T |
589 | static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) |
590 | { | |
591 | u16 prefetch; | |
592 | ||
472f79dc RK |
593 | if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) || |
594 | etnaviv_is_model_rev(gpu, GC320, 0x5220)) && | |
595 | gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) { | |
a8c21a54 T |
596 | u32 mc_memory_debug; |
597 | ||
598 | mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; | |
599 | ||
600 | if (gpu->identity.revision == 0x5007) | |
601 | mc_memory_debug |= 0x0c; | |
602 | else | |
603 | mc_memory_debug |= 0x08; | |
604 | ||
605 | gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); | |
606 | } | |
607 | ||
7d0c6e71 RK |
608 | /* enable module-level clock gating */ |
609 | etnaviv_gpu_enable_mlcg(gpu); | |
610 | ||
a8c21a54 T |
611 | /* |
612 | * Update GPU AXI cache atttribute to "cacheable, no allocate". | |
613 | * This is necessary to prevent the iMX6 SoC locking up. | |
614 | */ | |
615 | gpu_write(gpu, VIVS_HI_AXI_CONFIG, | |
616 | VIVS_HI_AXI_CONFIG_AWCACHE(2) | | |
617 | VIVS_HI_AXI_CONFIG_ARCACHE(2)); | |
618 | ||
619 | /* GC2000 rev 5108 needs a special bus config */ | |
472f79dc | 620 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) { |
a8c21a54 T |
621 | u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); |
622 | bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | | |
623 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); | |
624 | bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) | | |
625 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0); | |
626 | gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config); | |
627 | } | |
628 | ||
e17a0ded WL |
629 | /* setup the pulse eater */ |
630 | etnaviv_gpu_setup_pulse_eater(gpu); | |
631 | ||
99f861bc | 632 | /* setup the MMU */ |
e095c8fe | 633 | etnaviv_iommu_restore(gpu); |
a8c21a54 T |
634 | |
635 | /* Start command processor */ | |
636 | prefetch = etnaviv_buffer_init(gpu); | |
637 | ||
638 | gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); | |
c3ef4b8c | 639 | etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer), |
229855b6 | 640 | prefetch); |
a8c21a54 T |
641 | } |
642 | ||
643 | int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |
644 | { | |
645 | int ret, i; | |
a8c21a54 T |
646 | |
647 | ret = pm_runtime_get_sync(gpu->dev); | |
1409df04 LS |
648 | if (ret < 0) { |
649 | dev_err(gpu->dev, "Failed to enable GPU power domain\n"); | |
a8c21a54 | 650 | return ret; |
1409df04 | 651 | } |
a8c21a54 T |
652 | |
653 | etnaviv_hw_identify(gpu); | |
654 | ||
655 | if (gpu->identity.model == 0) { | |
656 | dev_err(gpu->dev, "Unknown GPU model\n"); | |
f6427760 RK |
657 | ret = -ENXIO; |
658 | goto fail; | |
a8c21a54 T |
659 | } |
660 | ||
b98c6688 RK |
661 | /* Exclude VG cores with FE2.0 */ |
662 | if (gpu->identity.features & chipFeatures_PIPE_VG && | |
663 | gpu->identity.features & chipFeatures_FE20) { | |
664 | dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n"); | |
665 | ret = -ENXIO; | |
666 | goto fail; | |
667 | } | |
668 | ||
2144fff7 LS |
669 | /* |
670 | * Set the GPU linear window to be at the end of the DMA window, where | |
671 | * the CMA area is likely to reside. This ensures that we are able to | |
672 | * map the command buffers while having the linear window overlap as | |
673 | * much RAM as possible, so we can optimize mappings for other buffers. | |
674 | * | |
675 | * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads | |
676 | * to different views of the memory on the individual engines. | |
677 | */ | |
678 | if (!(gpu->identity.features & chipFeatures_PIPE_3D) || | |
679 | (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) { | |
680 | u32 dma_mask = (u32)dma_get_required_mask(gpu->dev); | |
681 | if (dma_mask < PHYS_OFFSET + SZ_2G) | |
682 | gpu->memory_base = PHYS_OFFSET; | |
683 | else | |
684 | gpu->memory_base = dma_mask - SZ_2G + 1; | |
1db01279 LS |
685 | } else if (PHYS_OFFSET >= SZ_2G) { |
686 | dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n"); | |
687 | gpu->memory_base = PHYS_OFFSET; | |
688 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; | |
2144fff7 LS |
689 | } |
690 | ||
a8c21a54 | 691 | ret = etnaviv_hw_reset(gpu); |
1409df04 LS |
692 | if (ret) { |
693 | dev_err(gpu->dev, "GPU reset failed\n"); | |
a8c21a54 | 694 | goto fail; |
1409df04 | 695 | } |
a8c21a54 | 696 | |
dd34bb96 LS |
697 | gpu->mmu = etnaviv_iommu_new(gpu); |
698 | if (IS_ERR(gpu->mmu)) { | |
1409df04 | 699 | dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n"); |
dd34bb96 | 700 | ret = PTR_ERR(gpu->mmu); |
a8c21a54 T |
701 | goto fail; |
702 | } | |
703 | ||
e66774dd LS |
704 | gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu); |
705 | if (IS_ERR(gpu->cmdbuf_suballoc)) { | |
706 | dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n"); | |
707 | ret = PTR_ERR(gpu->cmdbuf_suballoc); | |
708 | goto fail; | |
709 | } | |
710 | ||
a8c21a54 | 711 | /* Create buffer: */ |
e66774dd | 712 | gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0); |
a8c21a54 T |
713 | if (!gpu->buffer) { |
714 | ret = -ENOMEM; | |
715 | dev_err(gpu->dev, "could not create command buffer\n"); | |
45d16a6d | 716 | goto destroy_iommu; |
a8c21a54 | 717 | } |
acfee0ec LS |
718 | |
719 | if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && | |
c3ef4b8c | 720 | etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) { |
a8c21a54 T |
721 | ret = -EINVAL; |
722 | dev_err(gpu->dev, | |
723 | "command buffer outside valid memory window\n"); | |
724 | goto free_buffer; | |
725 | } | |
726 | ||
727 | /* Setup event management */ | |
728 | spin_lock_init(&gpu->event_spinlock); | |
729 | init_completion(&gpu->event_free); | |
730 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | |
731 | gpu->event[i].used = false; | |
732 | complete(&gpu->event_free); | |
733 | } | |
734 | ||
735 | /* Now program the hardware */ | |
736 | mutex_lock(&gpu->lock); | |
737 | etnaviv_gpu_hw_init(gpu); | |
f6086311 | 738 | gpu->exec_state = -1; |
a8c21a54 T |
739 | mutex_unlock(&gpu->lock); |
740 | ||
741 | pm_runtime_mark_last_busy(gpu->dev); | |
742 | pm_runtime_put_autosuspend(gpu->dev); | |
743 | ||
744 | return 0; | |
745 | ||
746 | free_buffer: | |
ea1f5729 | 747 | etnaviv_cmdbuf_free(gpu->buffer); |
a8c21a54 | 748 | gpu->buffer = NULL; |
45d16a6d LS |
749 | destroy_iommu: |
750 | etnaviv_iommu_destroy(gpu->mmu); | |
751 | gpu->mmu = NULL; | |
a8c21a54 T |
752 | fail: |
753 | pm_runtime_mark_last_busy(gpu->dev); | |
754 | pm_runtime_put_autosuspend(gpu->dev); | |
755 | ||
756 | return ret; | |
757 | } | |
758 | ||
759 | #ifdef CONFIG_DEBUG_FS | |
760 | struct dma_debug { | |
761 | u32 address[2]; | |
762 | u32 state[2]; | |
763 | }; | |
764 | ||
765 | static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) | |
766 | { | |
767 | u32 i; | |
768 | ||
769 | debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
770 | debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | |
771 | ||
772 | for (i = 0; i < 500; i++) { | |
773 | debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
774 | debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | |
775 | ||
776 | if (debug->address[0] != debug->address[1]) | |
777 | break; | |
778 | ||
779 | if (debug->state[0] != debug->state[1]) | |
780 | break; | |
781 | } | |
782 | } | |
783 | ||
784 | int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) | |
785 | { | |
786 | struct dma_debug debug; | |
787 | u32 dma_lo, dma_hi, axi, idle; | |
788 | int ret; | |
789 | ||
790 | seq_printf(m, "%s Status:\n", dev_name(gpu->dev)); | |
791 | ||
792 | ret = pm_runtime_get_sync(gpu->dev); | |
793 | if (ret < 0) | |
794 | return ret; | |
795 | ||
796 | dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); | |
797 | dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); | |
798 | axi = gpu_read(gpu, VIVS_HI_AXI_STATUS); | |
799 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
800 | ||
801 | verify_dma(gpu, &debug); | |
802 | ||
803 | seq_puts(m, "\tfeatures\n"); | |
804 | seq_printf(m, "\t minor_features0: 0x%08x\n", | |
805 | gpu->identity.minor_features0); | |
806 | seq_printf(m, "\t minor_features1: 0x%08x\n", | |
807 | gpu->identity.minor_features1); | |
808 | seq_printf(m, "\t minor_features2: 0x%08x\n", | |
809 | gpu->identity.minor_features2); | |
810 | seq_printf(m, "\t minor_features3: 0x%08x\n", | |
811 | gpu->identity.minor_features3); | |
602eb489 RK |
812 | seq_printf(m, "\t minor_features4: 0x%08x\n", |
813 | gpu->identity.minor_features4); | |
814 | seq_printf(m, "\t minor_features5: 0x%08x\n", | |
815 | gpu->identity.minor_features5); | |
a8c21a54 T |
816 | |
817 | seq_puts(m, "\tspecs\n"); | |
818 | seq_printf(m, "\t stream_count: %d\n", | |
819 | gpu->identity.stream_count); | |
820 | seq_printf(m, "\t register_max: %d\n", | |
821 | gpu->identity.register_max); | |
822 | seq_printf(m, "\t thread_count: %d\n", | |
823 | gpu->identity.thread_count); | |
824 | seq_printf(m, "\t vertex_cache_size: %d\n", | |
825 | gpu->identity.vertex_cache_size); | |
826 | seq_printf(m, "\t shader_core_count: %d\n", | |
827 | gpu->identity.shader_core_count); | |
828 | seq_printf(m, "\t pixel_pipes: %d\n", | |
829 | gpu->identity.pixel_pipes); | |
830 | seq_printf(m, "\t vertex_output_buffer_size: %d\n", | |
831 | gpu->identity.vertex_output_buffer_size); | |
832 | seq_printf(m, "\t buffer_size: %d\n", | |
833 | gpu->identity.buffer_size); | |
834 | seq_printf(m, "\t instruction_count: %d\n", | |
835 | gpu->identity.instruction_count); | |
836 | seq_printf(m, "\t num_constants: %d\n", | |
837 | gpu->identity.num_constants); | |
602eb489 RK |
838 | seq_printf(m, "\t varyings_count: %d\n", |
839 | gpu->identity.varyings_count); | |
a8c21a54 T |
840 | |
841 | seq_printf(m, "\taxi: 0x%08x\n", axi); | |
842 | seq_printf(m, "\tidle: 0x%08x\n", idle); | |
843 | idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP; | |
844 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) | |
845 | seq_puts(m, "\t FE is not idle\n"); | |
846 | if ((idle & VIVS_HI_IDLE_STATE_DE) == 0) | |
847 | seq_puts(m, "\t DE is not idle\n"); | |
848 | if ((idle & VIVS_HI_IDLE_STATE_PE) == 0) | |
849 | seq_puts(m, "\t PE is not idle\n"); | |
850 | if ((idle & VIVS_HI_IDLE_STATE_SH) == 0) | |
851 | seq_puts(m, "\t SH is not idle\n"); | |
852 | if ((idle & VIVS_HI_IDLE_STATE_PA) == 0) | |
853 | seq_puts(m, "\t PA is not idle\n"); | |
854 | if ((idle & VIVS_HI_IDLE_STATE_SE) == 0) | |
855 | seq_puts(m, "\t SE is not idle\n"); | |
856 | if ((idle & VIVS_HI_IDLE_STATE_RA) == 0) | |
857 | seq_puts(m, "\t RA is not idle\n"); | |
858 | if ((idle & VIVS_HI_IDLE_STATE_TX) == 0) | |
859 | seq_puts(m, "\t TX is not idle\n"); | |
860 | if ((idle & VIVS_HI_IDLE_STATE_VG) == 0) | |
861 | seq_puts(m, "\t VG is not idle\n"); | |
862 | if ((idle & VIVS_HI_IDLE_STATE_IM) == 0) | |
863 | seq_puts(m, "\t IM is not idle\n"); | |
864 | if ((idle & VIVS_HI_IDLE_STATE_FP) == 0) | |
865 | seq_puts(m, "\t FP is not idle\n"); | |
866 | if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) | |
867 | seq_puts(m, "\t TS is not idle\n"); | |
868 | if (idle & VIVS_HI_IDLE_STATE_AXI_LP) | |
869 | seq_puts(m, "\t AXI low power mode\n"); | |
870 | ||
871 | if (gpu->identity.features & chipFeatures_DEBUG_MODE) { | |
872 | u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0); | |
873 | u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1); | |
874 | u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE); | |
875 | ||
876 | seq_puts(m, "\tMC\n"); | |
877 | seq_printf(m, "\t read0: 0x%08x\n", read0); | |
878 | seq_printf(m, "\t read1: 0x%08x\n", read1); | |
879 | seq_printf(m, "\t write: 0x%08x\n", write); | |
880 | } | |
881 | ||
882 | seq_puts(m, "\tDMA "); | |
883 | ||
884 | if (debug.address[0] == debug.address[1] && | |
885 | debug.state[0] == debug.state[1]) { | |
886 | seq_puts(m, "seems to be stuck\n"); | |
887 | } else if (debug.address[0] == debug.address[1]) { | |
c01e0159 | 888 | seq_puts(m, "address is constant\n"); |
a8c21a54 | 889 | } else { |
c01e0159 | 890 | seq_puts(m, "is running\n"); |
a8c21a54 T |
891 | } |
892 | ||
893 | seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]); | |
894 | seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]); | |
895 | seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]); | |
896 | seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]); | |
897 | seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n", | |
898 | dma_lo, dma_hi); | |
899 | ||
900 | ret = 0; | |
901 | ||
902 | pm_runtime_mark_last_busy(gpu->dev); | |
903 | pm_runtime_put_autosuspend(gpu->dev); | |
904 | ||
905 | return ret; | |
906 | } | |
907 | #endif | |
908 | ||
a8c21a54 T |
909 | /* |
910 | * Hangcheck detection for locked gpu: | |
911 | */ | |
912 | static void recover_worker(struct work_struct *work) | |
913 | { | |
914 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, | |
915 | recover_work); | |
916 | unsigned long flags; | |
917 | unsigned int i; | |
918 | ||
919 | dev_err(gpu->dev, "hangcheck recover!\n"); | |
920 | ||
921 | if (pm_runtime_get_sync(gpu->dev) < 0) | |
922 | return; | |
923 | ||
924 | mutex_lock(&gpu->lock); | |
925 | ||
926 | /* Only catch the first event, or when manually re-armed */ | |
927 | if (etnaviv_dump_core) { | |
928 | etnaviv_core_dump(gpu); | |
929 | etnaviv_dump_core = false; | |
930 | } | |
931 | ||
932 | etnaviv_hw_reset(gpu); | |
933 | ||
934 | /* complete all events, the GPU won't do it after the reset */ | |
935 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
936 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | |
937 | if (!gpu->event[i].used) | |
938 | continue; | |
f54d1867 | 939 | dma_fence_signal(gpu->event[i].fence); |
a8c21a54 T |
940 | gpu->event[i].fence = NULL; |
941 | gpu->event[i].used = false; | |
942 | complete(&gpu->event_free); | |
a8c21a54 T |
943 | } |
944 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
945 | gpu->completed_fence = gpu->active_fence; | |
946 | ||
947 | etnaviv_gpu_hw_init(gpu); | |
1b94a9b7 | 948 | gpu->lastctx = NULL; |
f6086311 | 949 | gpu->exec_state = -1; |
a8c21a54 T |
950 | |
951 | mutex_unlock(&gpu->lock); | |
952 | pm_runtime_mark_last_busy(gpu->dev); | |
953 | pm_runtime_put_autosuspend(gpu->dev); | |
954 | ||
955 | /* Retire the buffer objects in a work */ | |
956 | etnaviv_queue_work(gpu->drm, &gpu->retire_work); | |
957 | } | |
958 | ||
959 | static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) | |
960 | { | |
961 | DBG("%s", dev_name(gpu->dev)); | |
962 | mod_timer(&gpu->hangcheck_timer, | |
963 | round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES)); | |
964 | } | |
965 | ||
966 | static void hangcheck_handler(unsigned long data) | |
967 | { | |
968 | struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data; | |
969 | u32 fence = gpu->completed_fence; | |
970 | bool progress = false; | |
971 | ||
972 | if (fence != gpu->hangcheck_fence) { | |
973 | gpu->hangcheck_fence = fence; | |
974 | progress = true; | |
975 | } | |
976 | ||
977 | if (!progress) { | |
978 | u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
979 | int change = dma_addr - gpu->hangcheck_dma_addr; | |
980 | ||
981 | if (change < 0 || change > 16) { | |
982 | gpu->hangcheck_dma_addr = dma_addr; | |
983 | progress = true; | |
984 | } | |
985 | } | |
986 | ||
987 | if (!progress && fence_after(gpu->active_fence, fence)) { | |
988 | dev_err(gpu->dev, "hangcheck detected gpu lockup!\n"); | |
989 | dev_err(gpu->dev, " completed fence: %u\n", fence); | |
990 | dev_err(gpu->dev, " active fence: %u\n", | |
991 | gpu->active_fence); | |
992 | etnaviv_queue_work(gpu->drm, &gpu->recover_work); | |
993 | } | |
994 | ||
995 | /* if still more pending work, reset the hangcheck timer: */ | |
996 | if (fence_after(gpu->active_fence, gpu->hangcheck_fence)) | |
997 | hangcheck_timer_reset(gpu); | |
998 | } | |
999 | ||
1000 | static void hangcheck_disable(struct etnaviv_gpu *gpu) | |
1001 | { | |
1002 | del_timer_sync(&gpu->hangcheck_timer); | |
1003 | cancel_work_sync(&gpu->recover_work); | |
1004 | } | |
1005 | ||
1006 | /* fence object management */ | |
1007 | struct etnaviv_fence { | |
1008 | struct etnaviv_gpu *gpu; | |
f54d1867 | 1009 | struct dma_fence base; |
a8c21a54 T |
1010 | }; |
1011 | ||
f54d1867 | 1012 | static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence) |
a8c21a54 T |
1013 | { |
1014 | return container_of(fence, struct etnaviv_fence, base); | |
1015 | } | |
1016 | ||
f54d1867 | 1017 | static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence) |
a8c21a54 T |
1018 | { |
1019 | return "etnaviv"; | |
1020 | } | |
1021 | ||
f54d1867 | 1022 | static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence) |
a8c21a54 T |
1023 | { |
1024 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
1025 | ||
1026 | return dev_name(f->gpu->dev); | |
1027 | } | |
1028 | ||
f54d1867 | 1029 | static bool etnaviv_fence_enable_signaling(struct dma_fence *fence) |
a8c21a54 T |
1030 | { |
1031 | return true; | |
1032 | } | |
1033 | ||
f54d1867 | 1034 | static bool etnaviv_fence_signaled(struct dma_fence *fence) |
a8c21a54 T |
1035 | { |
1036 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
1037 | ||
1038 | return fence_completed(f->gpu, f->base.seqno); | |
1039 | } | |
1040 | ||
f54d1867 | 1041 | static void etnaviv_fence_release(struct dma_fence *fence) |
a8c21a54 T |
1042 | { |
1043 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
1044 | ||
1045 | kfree_rcu(f, base.rcu); | |
1046 | } | |
1047 | ||
f54d1867 | 1048 | static const struct dma_fence_ops etnaviv_fence_ops = { |
a8c21a54 T |
1049 | .get_driver_name = etnaviv_fence_get_driver_name, |
1050 | .get_timeline_name = etnaviv_fence_get_timeline_name, | |
1051 | .enable_signaling = etnaviv_fence_enable_signaling, | |
1052 | .signaled = etnaviv_fence_signaled, | |
f54d1867 | 1053 | .wait = dma_fence_default_wait, |
a8c21a54 T |
1054 | .release = etnaviv_fence_release, |
1055 | }; | |
1056 | ||
f54d1867 | 1057 | static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) |
a8c21a54 T |
1058 | { |
1059 | struct etnaviv_fence *f; | |
1060 | ||
b27734c2 LS |
1061 | /* |
1062 | * GPU lock must already be held, otherwise fence completion order might | |
1063 | * not match the seqno order assigned here. | |
1064 | */ | |
1065 | lockdep_assert_held(&gpu->lock); | |
1066 | ||
a8c21a54 T |
1067 | f = kzalloc(sizeof(*f), GFP_KERNEL); |
1068 | if (!f) | |
1069 | return NULL; | |
1070 | ||
1071 | f->gpu = gpu; | |
1072 | ||
f54d1867 CW |
1073 | dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, |
1074 | gpu->fence_context, ++gpu->next_fence); | |
a8c21a54 T |
1075 | |
1076 | return &f->base; | |
1077 | } | |
1078 | ||
1079 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |
9ad59fea | 1080 | unsigned int context, bool exclusive, bool explicit) |
a8c21a54 T |
1081 | { |
1082 | struct reservation_object *robj = etnaviv_obj->resv; | |
1083 | struct reservation_object_list *fobj; | |
f54d1867 | 1084 | struct dma_fence *fence; |
a8c21a54 T |
1085 | int i, ret; |
1086 | ||
1087 | if (!exclusive) { | |
1088 | ret = reservation_object_reserve_shared(robj); | |
1089 | if (ret) | |
1090 | return ret; | |
1091 | } | |
1092 | ||
9ad59fea PZ |
1093 | if (explicit) |
1094 | return 0; | |
1095 | ||
a8c21a54 T |
1096 | /* |
1097 | * If we have any shared fences, then the exclusive fence | |
1098 | * should be ignored as it will already have been signalled. | |
1099 | */ | |
1100 | fobj = reservation_object_get_list(robj); | |
1101 | if (!fobj || fobj->shared_count == 0) { | |
1102 | /* Wait on any existing exclusive fence which isn't our own */ | |
1103 | fence = reservation_object_get_excl(robj); | |
1104 | if (fence && fence->context != context) { | |
f54d1867 | 1105 | ret = dma_fence_wait(fence, true); |
a8c21a54 T |
1106 | if (ret) |
1107 | return ret; | |
1108 | } | |
1109 | } | |
1110 | ||
1111 | if (!exclusive || !fobj) | |
1112 | return 0; | |
1113 | ||
1114 | for (i = 0; i < fobj->shared_count; i++) { | |
1115 | fence = rcu_dereference_protected(fobj->shared[i], | |
1116 | reservation_object_held(robj)); | |
1117 | if (fence->context != context) { | |
f54d1867 | 1118 | ret = dma_fence_wait(fence, true); |
a8c21a54 T |
1119 | if (ret) |
1120 | return ret; | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | /* | |
1128 | * event management: | |
1129 | */ | |
1130 | ||
1131 | static unsigned int event_alloc(struct etnaviv_gpu *gpu) | |
1132 | { | |
1133 | unsigned long ret, flags; | |
1134 | unsigned int i, event = ~0U; | |
1135 | ||
1136 | ret = wait_for_completion_timeout(&gpu->event_free, | |
1137 | msecs_to_jiffies(10 * 10000)); | |
1138 | if (!ret) | |
1139 | dev_err(gpu->dev, "wait_for_completion_timeout failed"); | |
1140 | ||
1141 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
1142 | ||
1143 | /* find first free event */ | |
1144 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | |
1145 | if (gpu->event[i].used == false) { | |
1146 | gpu->event[i].used = true; | |
1147 | event = i; | |
1148 | break; | |
1149 | } | |
1150 | } | |
1151 | ||
1152 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1153 | ||
1154 | return event; | |
1155 | } | |
1156 | ||
1157 | static void event_free(struct etnaviv_gpu *gpu, unsigned int event) | |
1158 | { | |
1159 | unsigned long flags; | |
1160 | ||
1161 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
1162 | ||
1163 | if (gpu->event[event].used == false) { | |
1164 | dev_warn(gpu->dev, "event %u is already marked as free", | |
1165 | event); | |
1166 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1167 | } else { | |
1168 | gpu->event[event].used = false; | |
1169 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1170 | ||
1171 | complete(&gpu->event_free); | |
1172 | } | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * Cmdstream submission/retirement: | |
1177 | */ | |
1178 | ||
a8c21a54 T |
1179 | static void retire_worker(struct work_struct *work) |
1180 | { | |
1181 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, | |
1182 | retire_work); | |
1183 | u32 fence = gpu->completed_fence; | |
1184 | struct etnaviv_cmdbuf *cmdbuf, *tmp; | |
1185 | unsigned int i; | |
1186 | ||
1187 | mutex_lock(&gpu->lock); | |
1188 | list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { | |
f54d1867 | 1189 | if (!dma_fence_is_signaled(cmdbuf->fence)) |
a8c21a54 T |
1190 | break; |
1191 | ||
1192 | list_del(&cmdbuf->node); | |
f54d1867 | 1193 | dma_fence_put(cmdbuf->fence); |
a8c21a54 T |
1194 | |
1195 | for (i = 0; i < cmdbuf->nr_bos; i++) { | |
b6325f40 RK |
1196 | struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; |
1197 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | |
a8c21a54 T |
1198 | |
1199 | atomic_dec(&etnaviv_obj->gpu_active); | |
1200 | /* drop the refcount taken in etnaviv_gpu_submit */ | |
b6325f40 | 1201 | etnaviv_gem_mapping_unreference(mapping); |
a8c21a54 T |
1202 | } |
1203 | ||
ea1f5729 | 1204 | etnaviv_cmdbuf_free(cmdbuf); |
d9fd0c7d LS |
1205 | /* |
1206 | * We need to balance the runtime PM count caused by | |
1207 | * each submission. Upon submission, we increment | |
1208 | * the runtime PM counter, and allocate one event. | |
1209 | * So here, we put the runtime PM count for each | |
1210 | * completed event. | |
1211 | */ | |
1212 | pm_runtime_put_autosuspend(gpu->dev); | |
a8c21a54 T |
1213 | } |
1214 | ||
1215 | gpu->retired_fence = fence; | |
1216 | ||
1217 | mutex_unlock(&gpu->lock); | |
1218 | ||
1219 | wake_up_all(&gpu->fence_event); | |
1220 | } | |
1221 | ||
1222 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, | |
1223 | u32 fence, struct timespec *timeout) | |
1224 | { | |
1225 | int ret; | |
1226 | ||
1227 | if (fence_after(fence, gpu->next_fence)) { | |
1228 | DRM_ERROR("waiting on invalid fence: %u (of %u)\n", | |
1229 | fence, gpu->next_fence); | |
1230 | return -EINVAL; | |
1231 | } | |
1232 | ||
1233 | if (!timeout) { | |
1234 | /* No timeout was requested: just test for completion */ | |
1235 | ret = fence_completed(gpu, fence) ? 0 : -EBUSY; | |
1236 | } else { | |
1237 | unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); | |
1238 | ||
1239 | ret = wait_event_interruptible_timeout(gpu->fence_event, | |
1240 | fence_completed(gpu, fence), | |
1241 | remaining); | |
1242 | if (ret == 0) { | |
1243 | DBG("timeout waiting for fence: %u (retired: %u completed: %u)", | |
1244 | fence, gpu->retired_fence, | |
1245 | gpu->completed_fence); | |
1246 | ret = -ETIMEDOUT; | |
1247 | } else if (ret != -ERESTARTSYS) { | |
1248 | ret = 0; | |
1249 | } | |
1250 | } | |
1251 | ||
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | /* | |
1256 | * Wait for an object to become inactive. This, on it's own, is not race | |
1257 | * free: the object is moved by the retire worker off the active list, and | |
1258 | * then the iova is put. Moreover, the object could be re-submitted just | |
1259 | * after we notice that it's become inactive. | |
1260 | * | |
1261 | * Although the retirement happens under the gpu lock, we don't want to hold | |
1262 | * that lock in this function while waiting. | |
1263 | */ | |
1264 | int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, | |
1265 | struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout) | |
1266 | { | |
1267 | unsigned long remaining; | |
1268 | long ret; | |
1269 | ||
1270 | if (!timeout) | |
1271 | return !is_active(etnaviv_obj) ? 0 : -EBUSY; | |
1272 | ||
1273 | remaining = etnaviv_timeout_to_jiffies(timeout); | |
1274 | ||
1275 | ret = wait_event_interruptible_timeout(gpu->fence_event, | |
1276 | !is_active(etnaviv_obj), | |
1277 | remaining); | |
1278 | if (ret > 0) { | |
1279 | struct etnaviv_drm_private *priv = gpu->drm->dev_private; | |
1280 | ||
1281 | /* Synchronise with the retire worker */ | |
1282 | flush_workqueue(priv->wq); | |
1283 | return 0; | |
1284 | } else if (ret == -ERESTARTSYS) { | |
1285 | return -ERESTARTSYS; | |
1286 | } else { | |
1287 | return -ETIMEDOUT; | |
1288 | } | |
1289 | } | |
1290 | ||
1291 | int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu) | |
1292 | { | |
1293 | return pm_runtime_get_sync(gpu->dev); | |
1294 | } | |
1295 | ||
1296 | void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) | |
1297 | { | |
1298 | pm_runtime_mark_last_busy(gpu->dev); | |
1299 | pm_runtime_put_autosuspend(gpu->dev); | |
1300 | } | |
1301 | ||
1302 | /* add bo's to gpu's ring, and kick gpu: */ | |
1303 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |
1304 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) | |
1305 | { | |
f54d1867 | 1306 | struct dma_fence *fence; |
a8c21a54 T |
1307 | unsigned int event, i; |
1308 | int ret; | |
1309 | ||
1310 | ret = etnaviv_gpu_pm_get_sync(gpu); | |
1311 | if (ret < 0) | |
1312 | return ret; | |
1313 | ||
a8c21a54 T |
1314 | /* |
1315 | * TODO | |
1316 | * | |
1317 | * - flush | |
1318 | * - data endian | |
1319 | * - prefetch | |
1320 | * | |
1321 | */ | |
1322 | ||
1323 | event = event_alloc(gpu); | |
1324 | if (unlikely(event == ~0U)) { | |
1325 | DRM_ERROR("no free event\n"); | |
1326 | ret = -EBUSY; | |
d9853490 | 1327 | goto out_pm_put; |
a8c21a54 T |
1328 | } |
1329 | ||
1330 | fence = etnaviv_gpu_fence_alloc(gpu); | |
1331 | if (!fence) { | |
1332 | event_free(gpu, event); | |
1333 | ret = -ENOMEM; | |
d9853490 | 1334 | goto out_pm_put; |
a8c21a54 T |
1335 | } |
1336 | ||
d9853490 LS |
1337 | mutex_lock(&gpu->lock); |
1338 | ||
a8c21a54 | 1339 | gpu->event[event].fence = fence; |
6e2b98cf LS |
1340 | submit->fence = dma_fence_get(fence); |
1341 | gpu->active_fence = submit->fence->seqno; | |
a8c21a54 T |
1342 | |
1343 | if (gpu->lastctx != cmdbuf->ctx) { | |
1344 | gpu->mmu->need_flush = true; | |
1345 | gpu->switch_context = true; | |
1346 | gpu->lastctx = cmdbuf->ctx; | |
1347 | } | |
1348 | ||
1349 | etnaviv_buffer_queue(gpu, event, cmdbuf); | |
1350 | ||
1351 | cmdbuf->fence = fence; | |
1352 | list_add_tail(&cmdbuf->node, &gpu->active_cmd_list); | |
1353 | ||
1354 | /* We're committed to adding this command buffer, hold a PM reference */ | |
1355 | pm_runtime_get_noresume(gpu->dev); | |
1356 | ||
1357 | for (i = 0; i < submit->nr_bos; i++) { | |
1358 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | |
a8c21a54 | 1359 | |
b6325f40 RK |
1360 | /* Each cmdbuf takes a refcount on the mapping */ |
1361 | etnaviv_gem_mapping_reference(submit->bos[i].mapping); | |
1362 | cmdbuf->bo_map[i] = submit->bos[i].mapping; | |
a8c21a54 T |
1363 | atomic_inc(&etnaviv_obj->gpu_active); |
1364 | ||
1365 | if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) | |
1366 | reservation_object_add_excl_fence(etnaviv_obj->resv, | |
1367 | fence); | |
1368 | else | |
1369 | reservation_object_add_shared_fence(etnaviv_obj->resv, | |
1370 | fence); | |
1371 | } | |
1372 | cmdbuf->nr_bos = submit->nr_bos; | |
1373 | hangcheck_timer_reset(gpu); | |
1374 | ret = 0; | |
1375 | ||
a8c21a54 T |
1376 | mutex_unlock(&gpu->lock); |
1377 | ||
d9853490 | 1378 | out_pm_put: |
a8c21a54 T |
1379 | etnaviv_gpu_pm_put(gpu); |
1380 | ||
1381 | return ret; | |
1382 | } | |
1383 | ||
1384 | /* | |
1385 | * Init/Cleanup: | |
1386 | */ | |
1387 | static irqreturn_t irq_handler(int irq, void *data) | |
1388 | { | |
1389 | struct etnaviv_gpu *gpu = data; | |
1390 | irqreturn_t ret = IRQ_NONE; | |
1391 | ||
1392 | u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); | |
1393 | ||
1394 | if (intr != 0) { | |
1395 | int event; | |
1396 | ||
1397 | pm_runtime_mark_last_busy(gpu->dev); | |
1398 | ||
1399 | dev_dbg(gpu->dev, "intr 0x%08x\n", intr); | |
1400 | ||
1401 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) { | |
1402 | dev_err(gpu->dev, "AXI bus error\n"); | |
1403 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR; | |
1404 | } | |
1405 | ||
128a9b1d LS |
1406 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) { |
1407 | int i; | |
1408 | ||
1409 | dev_err_ratelimited(gpu->dev, | |
1410 | "MMU fault status 0x%08x\n", | |
1411 | gpu_read(gpu, VIVS_MMUv2_STATUS)); | |
1412 | for (i = 0; i < 4; i++) { | |
1413 | dev_err_ratelimited(gpu->dev, | |
1414 | "MMU %d fault addr 0x%08x\n", | |
1415 | i, gpu_read(gpu, | |
1416 | VIVS_MMUv2_EXCEPTION_ADDR(i))); | |
1417 | } | |
1418 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION; | |
1419 | } | |
1420 | ||
a8c21a54 | 1421 | while ((event = ffs(intr)) != 0) { |
f54d1867 | 1422 | struct dma_fence *fence; |
a8c21a54 T |
1423 | |
1424 | event -= 1; | |
1425 | ||
1426 | intr &= ~(1 << event); | |
1427 | ||
1428 | dev_dbg(gpu->dev, "event %u\n", event); | |
1429 | ||
1430 | fence = gpu->event[event].fence; | |
1431 | gpu->event[event].fence = NULL; | |
f54d1867 | 1432 | dma_fence_signal(fence); |
a8c21a54 T |
1433 | |
1434 | /* | |
1435 | * Events can be processed out of order. Eg, | |
1436 | * - allocate and queue event 0 | |
1437 | * - allocate event 1 | |
1438 | * - event 0 completes, we process it | |
1439 | * - allocate and queue event 0 | |
1440 | * - event 1 and event 0 complete | |
1441 | * we can end up processing event 0 first, then 1. | |
1442 | */ | |
1443 | if (fence_after(fence->seqno, gpu->completed_fence)) | |
1444 | gpu->completed_fence = fence->seqno; | |
1445 | ||
1446 | event_free(gpu, event); | |
a8c21a54 T |
1447 | } |
1448 | ||
1449 | /* Retire the buffer objects in a work */ | |
1450 | etnaviv_queue_work(gpu->drm, &gpu->retire_work); | |
1451 | ||
1452 | ret = IRQ_HANDLED; | |
1453 | } | |
1454 | ||
1455 | return ret; | |
1456 | } | |
1457 | ||
1458 | static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) | |
1459 | { | |
1460 | int ret; | |
1461 | ||
9c7310c0 LS |
1462 | if (gpu->clk_bus) { |
1463 | ret = clk_prepare_enable(gpu->clk_bus); | |
1464 | if (ret) | |
1465 | return ret; | |
1466 | } | |
a8c21a54 | 1467 | |
9c7310c0 LS |
1468 | if (gpu->clk_core) { |
1469 | ret = clk_prepare_enable(gpu->clk_core); | |
1470 | if (ret) | |
1471 | goto disable_clk_bus; | |
1472 | } | |
1473 | ||
1474 | if (gpu->clk_shader) { | |
1475 | ret = clk_prepare_enable(gpu->clk_shader); | |
1476 | if (ret) | |
1477 | goto disable_clk_core; | |
a8c21a54 T |
1478 | } |
1479 | ||
1480 | return 0; | |
9c7310c0 LS |
1481 | |
1482 | disable_clk_core: | |
1483 | if (gpu->clk_core) | |
1484 | clk_disable_unprepare(gpu->clk_core); | |
1485 | disable_clk_bus: | |
1486 | if (gpu->clk_bus) | |
1487 | clk_disable_unprepare(gpu->clk_bus); | |
1488 | ||
1489 | return ret; | |
a8c21a54 T |
1490 | } |
1491 | ||
1492 | static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu) | |
1493 | { | |
9c7310c0 LS |
1494 | if (gpu->clk_shader) |
1495 | clk_disable_unprepare(gpu->clk_shader); | |
1496 | if (gpu->clk_core) | |
1497 | clk_disable_unprepare(gpu->clk_core); | |
1498 | if (gpu->clk_bus) | |
1499 | clk_disable_unprepare(gpu->clk_bus); | |
a8c21a54 T |
1500 | |
1501 | return 0; | |
1502 | } | |
1503 | ||
b88163e3 LS |
1504 | int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) |
1505 | { | |
1506 | unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); | |
1507 | ||
1508 | do { | |
1509 | u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
1510 | ||
1511 | if ((idle & gpu->idle_mask) == gpu->idle_mask) | |
1512 | return 0; | |
1513 | ||
1514 | if (time_is_before_jiffies(timeout)) { | |
1515 | dev_warn(gpu->dev, | |
1516 | "timed out waiting for idle: idle=0x%x\n", | |
1517 | idle); | |
1518 | return -ETIMEDOUT; | |
1519 | } | |
1520 | ||
1521 | udelay(5); | |
1522 | } while (1); | |
1523 | } | |
1524 | ||
a8c21a54 T |
1525 | static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) |
1526 | { | |
1527 | if (gpu->buffer) { | |
a8c21a54 T |
1528 | /* Replace the last WAIT with END */ |
1529 | etnaviv_buffer_end(gpu); | |
1530 | ||
1531 | /* | |
1532 | * We know that only the FE is busy here, this should | |
1533 | * happen quickly (as the WAIT is only 200 cycles). If | |
1534 | * we fail, just warn and continue. | |
1535 | */ | |
b88163e3 | 1536 | etnaviv_gpu_wait_idle(gpu, 100); |
a8c21a54 T |
1537 | } |
1538 | ||
1539 | return etnaviv_gpu_clk_disable(gpu); | |
1540 | } | |
1541 | ||
1542 | #ifdef CONFIG_PM | |
1543 | static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) | |
1544 | { | |
a8c21a54 T |
1545 | int ret; |
1546 | ||
1547 | ret = mutex_lock_killable(&gpu->lock); | |
1548 | if (ret) | |
1549 | return ret; | |
1550 | ||
bcdfb5e5 | 1551 | etnaviv_gpu_update_clock(gpu); |
a8c21a54 T |
1552 | etnaviv_gpu_hw_init(gpu); |
1553 | ||
1554 | gpu->switch_context = true; | |
f6086311 | 1555 | gpu->exec_state = -1; |
a8c21a54 T |
1556 | |
1557 | mutex_unlock(&gpu->lock); | |
1558 | ||
1559 | return 0; | |
1560 | } | |
1561 | #endif | |
1562 | ||
bcdfb5e5 RK |
1563 | static int |
1564 | etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev, | |
1565 | unsigned long *state) | |
1566 | { | |
1567 | *state = 6; | |
1568 | ||
1569 | return 0; | |
1570 | } | |
1571 | ||
1572 | static int | |
1573 | etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev, | |
1574 | unsigned long *state) | |
1575 | { | |
1576 | struct etnaviv_gpu *gpu = cdev->devdata; | |
1577 | ||
1578 | *state = gpu->freq_scale; | |
1579 | ||
1580 | return 0; | |
1581 | } | |
1582 | ||
1583 | static int | |
1584 | etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev, | |
1585 | unsigned long state) | |
1586 | { | |
1587 | struct etnaviv_gpu *gpu = cdev->devdata; | |
1588 | ||
1589 | mutex_lock(&gpu->lock); | |
1590 | gpu->freq_scale = state; | |
1591 | if (!pm_runtime_suspended(gpu->dev)) | |
1592 | etnaviv_gpu_update_clock(gpu); | |
1593 | mutex_unlock(&gpu->lock); | |
1594 | ||
1595 | return 0; | |
1596 | } | |
1597 | ||
1598 | static struct thermal_cooling_device_ops cooling_ops = { | |
1599 | .get_max_state = etnaviv_gpu_cooling_get_max_state, | |
1600 | .get_cur_state = etnaviv_gpu_cooling_get_cur_state, | |
1601 | .set_cur_state = etnaviv_gpu_cooling_set_cur_state, | |
1602 | }; | |
1603 | ||
a8c21a54 T |
1604 | static int etnaviv_gpu_bind(struct device *dev, struct device *master, |
1605 | void *data) | |
1606 | { | |
1607 | struct drm_device *drm = data; | |
1608 | struct etnaviv_drm_private *priv = drm->dev_private; | |
1609 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1610 | int ret; | |
1611 | ||
bcdfb5e5 RK |
1612 | gpu->cooling = thermal_of_cooling_device_register(dev->of_node, |
1613 | (char *)dev_name(dev), gpu, &cooling_ops); | |
1614 | if (IS_ERR(gpu->cooling)) | |
1615 | return PTR_ERR(gpu->cooling); | |
1616 | ||
a8c21a54 T |
1617 | #ifdef CONFIG_PM |
1618 | ret = pm_runtime_get_sync(gpu->dev); | |
1619 | #else | |
1620 | ret = etnaviv_gpu_clk_enable(gpu); | |
1621 | #endif | |
bcdfb5e5 RK |
1622 | if (ret < 0) { |
1623 | thermal_cooling_device_unregister(gpu->cooling); | |
a8c21a54 | 1624 | return ret; |
bcdfb5e5 | 1625 | } |
a8c21a54 T |
1626 | |
1627 | gpu->drm = drm; | |
f54d1867 | 1628 | gpu->fence_context = dma_fence_context_alloc(1); |
a8c21a54 T |
1629 | spin_lock_init(&gpu->fence_spinlock); |
1630 | ||
1631 | INIT_LIST_HEAD(&gpu->active_cmd_list); | |
1632 | INIT_WORK(&gpu->retire_work, retire_worker); | |
1633 | INIT_WORK(&gpu->recover_work, recover_worker); | |
1634 | init_waitqueue_head(&gpu->fence_event); | |
1635 | ||
946dd8d5 LS |
1636 | setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler, |
1637 | (unsigned long)gpu); | |
a8c21a54 T |
1638 | |
1639 | priv->gpu[priv->num_gpus++] = gpu; | |
1640 | ||
1641 | pm_runtime_mark_last_busy(gpu->dev); | |
1642 | pm_runtime_put_autosuspend(gpu->dev); | |
1643 | ||
1644 | return 0; | |
1645 | } | |
1646 | ||
1647 | static void etnaviv_gpu_unbind(struct device *dev, struct device *master, | |
1648 | void *data) | |
1649 | { | |
1650 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1651 | ||
1652 | DBG("%s", dev_name(gpu->dev)); | |
1653 | ||
1654 | hangcheck_disable(gpu); | |
1655 | ||
1656 | #ifdef CONFIG_PM | |
1657 | pm_runtime_get_sync(gpu->dev); | |
1658 | pm_runtime_put_sync_suspend(gpu->dev); | |
1659 | #else | |
1660 | etnaviv_gpu_hw_suspend(gpu); | |
1661 | #endif | |
1662 | ||
1663 | if (gpu->buffer) { | |
ea1f5729 | 1664 | etnaviv_cmdbuf_free(gpu->buffer); |
a8c21a54 T |
1665 | gpu->buffer = NULL; |
1666 | } | |
1667 | ||
e66774dd LS |
1668 | if (gpu->cmdbuf_suballoc) { |
1669 | etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc); | |
1670 | gpu->cmdbuf_suballoc = NULL; | |
1671 | } | |
1672 | ||
a8c21a54 T |
1673 | if (gpu->mmu) { |
1674 | etnaviv_iommu_destroy(gpu->mmu); | |
1675 | gpu->mmu = NULL; | |
1676 | } | |
1677 | ||
1678 | gpu->drm = NULL; | |
bcdfb5e5 RK |
1679 | |
1680 | thermal_cooling_device_unregister(gpu->cooling); | |
1681 | gpu->cooling = NULL; | |
a8c21a54 T |
1682 | } |
1683 | ||
1684 | static const struct component_ops gpu_ops = { | |
1685 | .bind = etnaviv_gpu_bind, | |
1686 | .unbind = etnaviv_gpu_unbind, | |
1687 | }; | |
1688 | ||
1689 | static const struct of_device_id etnaviv_gpu_match[] = { | |
1690 | { | |
1691 | .compatible = "vivante,gc" | |
1692 | }, | |
1693 | { /* sentinel */ } | |
1694 | }; | |
1695 | ||
1696 | static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | |
1697 | { | |
1698 | struct device *dev = &pdev->dev; | |
1699 | struct etnaviv_gpu *gpu; | |
dc227890 | 1700 | int err; |
a8c21a54 T |
1701 | |
1702 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); | |
1703 | if (!gpu) | |
1704 | return -ENOMEM; | |
1705 | ||
1706 | gpu->dev = &pdev->dev; | |
1707 | mutex_init(&gpu->lock); | |
1708 | ||
a8c21a54 T |
1709 | /* Map registers: */ |
1710 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); | |
1711 | if (IS_ERR(gpu->mmio)) | |
1712 | return PTR_ERR(gpu->mmio); | |
1713 | ||
1714 | /* Get Interrupt: */ | |
1715 | gpu->irq = platform_get_irq(pdev, 0); | |
1716 | if (gpu->irq < 0) { | |
db60eda3 FE |
1717 | dev_err(dev, "failed to get irq: %d\n", gpu->irq); |
1718 | return gpu->irq; | |
a8c21a54 T |
1719 | } |
1720 | ||
1721 | err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, | |
1722 | dev_name(gpu->dev), gpu); | |
1723 | if (err) { | |
1724 | dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); | |
db60eda3 | 1725 | return err; |
a8c21a54 T |
1726 | } |
1727 | ||
1728 | /* Get Clocks: */ | |
1729 | gpu->clk_bus = devm_clk_get(&pdev->dev, "bus"); | |
1730 | DBG("clk_bus: %p", gpu->clk_bus); | |
1731 | if (IS_ERR(gpu->clk_bus)) | |
1732 | gpu->clk_bus = NULL; | |
1733 | ||
1734 | gpu->clk_core = devm_clk_get(&pdev->dev, "core"); | |
1735 | DBG("clk_core: %p", gpu->clk_core); | |
1736 | if (IS_ERR(gpu->clk_core)) | |
1737 | gpu->clk_core = NULL; | |
1738 | ||
1739 | gpu->clk_shader = devm_clk_get(&pdev->dev, "shader"); | |
1740 | DBG("clk_shader: %p", gpu->clk_shader); | |
1741 | if (IS_ERR(gpu->clk_shader)) | |
1742 | gpu->clk_shader = NULL; | |
1743 | ||
1744 | /* TODO: figure out max mapped size */ | |
1745 | dev_set_drvdata(dev, gpu); | |
1746 | ||
1747 | /* | |
1748 | * We treat the device as initially suspended. The runtime PM | |
1749 | * autosuspend delay is rather arbitary: no measurements have | |
1750 | * yet been performed to determine an appropriate value. | |
1751 | */ | |
1752 | pm_runtime_use_autosuspend(gpu->dev); | |
1753 | pm_runtime_set_autosuspend_delay(gpu->dev, 200); | |
1754 | pm_runtime_enable(gpu->dev); | |
1755 | ||
1756 | err = component_add(&pdev->dev, &gpu_ops); | |
1757 | if (err < 0) { | |
1758 | dev_err(&pdev->dev, "failed to register component: %d\n", err); | |
db60eda3 | 1759 | return err; |
a8c21a54 T |
1760 | } |
1761 | ||
1762 | return 0; | |
a8c21a54 T |
1763 | } |
1764 | ||
1765 | static int etnaviv_gpu_platform_remove(struct platform_device *pdev) | |
1766 | { | |
1767 | component_del(&pdev->dev, &gpu_ops); | |
1768 | pm_runtime_disable(&pdev->dev); | |
1769 | return 0; | |
1770 | } | |
1771 | ||
1772 | #ifdef CONFIG_PM | |
1773 | static int etnaviv_gpu_rpm_suspend(struct device *dev) | |
1774 | { | |
1775 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1776 | u32 idle, mask; | |
1777 | ||
1778 | /* If we have outstanding fences, we're not idle */ | |
1779 | if (gpu->completed_fence != gpu->active_fence) | |
1780 | return -EBUSY; | |
1781 | ||
1782 | /* Check whether the hardware (except FE) is idle */ | |
1783 | mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE; | |
1784 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; | |
1785 | if (idle != mask) | |
1786 | return -EBUSY; | |
1787 | ||
1788 | return etnaviv_gpu_hw_suspend(gpu); | |
1789 | } | |
1790 | ||
1791 | static int etnaviv_gpu_rpm_resume(struct device *dev) | |
1792 | { | |
1793 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1794 | int ret; | |
1795 | ||
1796 | ret = etnaviv_gpu_clk_enable(gpu); | |
1797 | if (ret) | |
1798 | return ret; | |
1799 | ||
1800 | /* Re-initialise the basic hardware state */ | |
1801 | if (gpu->drm && gpu->buffer) { | |
1802 | ret = etnaviv_gpu_hw_resume(gpu); | |
1803 | if (ret) { | |
1804 | etnaviv_gpu_clk_disable(gpu); | |
1805 | return ret; | |
1806 | } | |
1807 | } | |
1808 | ||
1809 | return 0; | |
1810 | } | |
1811 | #endif | |
1812 | ||
1813 | static const struct dev_pm_ops etnaviv_gpu_pm_ops = { | |
1814 | SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, | |
1815 | NULL) | |
1816 | }; | |
1817 | ||
1818 | struct platform_driver etnaviv_gpu_driver = { | |
1819 | .driver = { | |
1820 | .name = "etnaviv-gpu", | |
1821 | .owner = THIS_MODULE, | |
1822 | .pm = &etnaviv_gpu_pm_ops, | |
1823 | .of_match_table = etnaviv_gpu_match, | |
1824 | }, | |
1825 | .probe = etnaviv_gpu_platform_probe, | |
1826 | .remove = etnaviv_gpu_platform_remove, | |
1827 | .id_table = gpu_ids, | |
1828 | }; |