Commit | Line | Data |
---|---|---|
a8c21a54 T |
1 | /* |
2 | * Copyright (C) 2015 Etnaviv Project | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/component.h> | |
18 | #include <linux/fence.h> | |
19 | #include <linux/moduleparam.h> | |
20 | #include <linux/of_device.h> | |
21 | #include "etnaviv_dump.h" | |
22 | #include "etnaviv_gpu.h" | |
23 | #include "etnaviv_gem.h" | |
24 | #include "etnaviv_mmu.h" | |
25 | #include "etnaviv_iommu.h" | |
26 | #include "etnaviv_iommu_v2.h" | |
27 | #include "common.xml.h" | |
28 | #include "state.xml.h" | |
29 | #include "state_hi.xml.h" | |
30 | #include "cmdstream.xml.h" | |
31 | ||
32 | static const struct platform_device_id gpu_ids[] = { | |
33 | { .name = "etnaviv-gpu,2d" }, | |
34 | { }, | |
35 | }; | |
36 | ||
37 | static bool etnaviv_dump_core = true; | |
38 | module_param_named(dump_core, etnaviv_dump_core, bool, 0600); | |
39 | ||
40 | /* | |
41 | * Driver functions: | |
42 | */ | |
43 | ||
44 | int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) | |
45 | { | |
46 | switch (param) { | |
47 | case ETNAVIV_PARAM_GPU_MODEL: | |
48 | *value = gpu->identity.model; | |
49 | break; | |
50 | ||
51 | case ETNAVIV_PARAM_GPU_REVISION: | |
52 | *value = gpu->identity.revision; | |
53 | break; | |
54 | ||
55 | case ETNAVIV_PARAM_GPU_FEATURES_0: | |
56 | *value = gpu->identity.features; | |
57 | break; | |
58 | ||
59 | case ETNAVIV_PARAM_GPU_FEATURES_1: | |
60 | *value = gpu->identity.minor_features0; | |
61 | break; | |
62 | ||
63 | case ETNAVIV_PARAM_GPU_FEATURES_2: | |
64 | *value = gpu->identity.minor_features1; | |
65 | break; | |
66 | ||
67 | case ETNAVIV_PARAM_GPU_FEATURES_3: | |
68 | *value = gpu->identity.minor_features2; | |
69 | break; | |
70 | ||
71 | case ETNAVIV_PARAM_GPU_FEATURES_4: | |
72 | *value = gpu->identity.minor_features3; | |
73 | break; | |
74 | ||
75 | case ETNAVIV_PARAM_GPU_STREAM_COUNT: | |
76 | *value = gpu->identity.stream_count; | |
77 | break; | |
78 | ||
79 | case ETNAVIV_PARAM_GPU_REGISTER_MAX: | |
80 | *value = gpu->identity.register_max; | |
81 | break; | |
82 | ||
83 | case ETNAVIV_PARAM_GPU_THREAD_COUNT: | |
84 | *value = gpu->identity.thread_count; | |
85 | break; | |
86 | ||
87 | case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE: | |
88 | *value = gpu->identity.vertex_cache_size; | |
89 | break; | |
90 | ||
91 | case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT: | |
92 | *value = gpu->identity.shader_core_count; | |
93 | break; | |
94 | ||
95 | case ETNAVIV_PARAM_GPU_PIXEL_PIPES: | |
96 | *value = gpu->identity.pixel_pipes; | |
97 | break; | |
98 | ||
99 | case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE: | |
100 | *value = gpu->identity.vertex_output_buffer_size; | |
101 | break; | |
102 | ||
103 | case ETNAVIV_PARAM_GPU_BUFFER_SIZE: | |
104 | *value = gpu->identity.buffer_size; | |
105 | break; | |
106 | ||
107 | case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT: | |
108 | *value = gpu->identity.instruction_count; | |
109 | break; | |
110 | ||
111 | case ETNAVIV_PARAM_GPU_NUM_CONSTANTS: | |
112 | *value = gpu->identity.num_constants; | |
113 | break; | |
114 | ||
115 | default: | |
116 | DBG("%s: invalid param: %u", dev_name(gpu->dev), param); | |
117 | return -EINVAL; | |
118 | } | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
123 | static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | |
124 | { | |
125 | if (gpu->identity.minor_features0 & | |
126 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | |
127 | u32 specs[2]; | |
128 | ||
129 | specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); | |
130 | specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); | |
131 | ||
132 | gpu->identity.stream_count = | |
133 | (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) | |
134 | >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT; | |
135 | gpu->identity.register_max = | |
136 | (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) | |
137 | >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT; | |
138 | gpu->identity.thread_count = | |
139 | (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) | |
140 | >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT; | |
141 | gpu->identity.vertex_cache_size = | |
142 | (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) | |
143 | >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT; | |
144 | gpu->identity.shader_core_count = | |
145 | (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) | |
146 | >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT; | |
147 | gpu->identity.pixel_pipes = | |
148 | (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) | |
149 | >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT; | |
150 | gpu->identity.vertex_output_buffer_size = | |
151 | (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) | |
152 | >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT; | |
153 | ||
154 | gpu->identity.buffer_size = | |
155 | (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) | |
156 | >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT; | |
157 | gpu->identity.instruction_count = | |
158 | (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) | |
159 | >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT; | |
160 | gpu->identity.num_constants = | |
161 | (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) | |
162 | >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT; | |
163 | } | |
164 | ||
165 | /* Fill in the stream count if not specified */ | |
166 | if (gpu->identity.stream_count == 0) { | |
167 | if (gpu->identity.model >= 0x1000) | |
168 | gpu->identity.stream_count = 4; | |
169 | else | |
170 | gpu->identity.stream_count = 1; | |
171 | } | |
172 | ||
173 | /* Convert the register max value */ | |
174 | if (gpu->identity.register_max) | |
175 | gpu->identity.register_max = 1 << gpu->identity.register_max; | |
176 | else if (gpu->identity.model == 0x0400) | |
177 | gpu->identity.register_max = 32; | |
178 | else | |
179 | gpu->identity.register_max = 64; | |
180 | ||
181 | /* Convert thread count */ | |
182 | if (gpu->identity.thread_count) | |
183 | gpu->identity.thread_count = 1 << gpu->identity.thread_count; | |
184 | else if (gpu->identity.model == 0x0400) | |
185 | gpu->identity.thread_count = 64; | |
186 | else if (gpu->identity.model == 0x0500 || | |
187 | gpu->identity.model == 0x0530) | |
188 | gpu->identity.thread_count = 128; | |
189 | else | |
190 | gpu->identity.thread_count = 256; | |
191 | ||
192 | if (gpu->identity.vertex_cache_size == 0) | |
193 | gpu->identity.vertex_cache_size = 8; | |
194 | ||
195 | if (gpu->identity.shader_core_count == 0) { | |
196 | if (gpu->identity.model >= 0x1000) | |
197 | gpu->identity.shader_core_count = 2; | |
198 | else | |
199 | gpu->identity.shader_core_count = 1; | |
200 | } | |
201 | ||
202 | if (gpu->identity.pixel_pipes == 0) | |
203 | gpu->identity.pixel_pipes = 1; | |
204 | ||
205 | /* Convert virtex buffer size */ | |
206 | if (gpu->identity.vertex_output_buffer_size) { | |
207 | gpu->identity.vertex_output_buffer_size = | |
208 | 1 << gpu->identity.vertex_output_buffer_size; | |
209 | } else if (gpu->identity.model == 0x0400) { | |
210 | if (gpu->identity.revision < 0x4000) | |
211 | gpu->identity.vertex_output_buffer_size = 512; | |
212 | else if (gpu->identity.revision < 0x4200) | |
213 | gpu->identity.vertex_output_buffer_size = 256; | |
214 | else | |
215 | gpu->identity.vertex_output_buffer_size = 128; | |
216 | } else { | |
217 | gpu->identity.vertex_output_buffer_size = 512; | |
218 | } | |
219 | ||
220 | switch (gpu->identity.instruction_count) { | |
221 | case 0: | |
222 | if ((gpu->identity.model == 0x2000 && | |
223 | gpu->identity.revision == 0x5108) || | |
224 | gpu->identity.model == 0x880) | |
225 | gpu->identity.instruction_count = 512; | |
226 | else | |
227 | gpu->identity.instruction_count = 256; | |
228 | break; | |
229 | ||
230 | case 1: | |
231 | gpu->identity.instruction_count = 1024; | |
232 | break; | |
233 | ||
234 | case 2: | |
235 | gpu->identity.instruction_count = 2048; | |
236 | break; | |
237 | ||
238 | default: | |
239 | gpu->identity.instruction_count = 256; | |
240 | break; | |
241 | } | |
242 | ||
243 | if (gpu->identity.num_constants == 0) | |
244 | gpu->identity.num_constants = 168; | |
245 | } | |
246 | ||
247 | static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |
248 | { | |
249 | u32 chipIdentity; | |
250 | ||
251 | chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); | |
252 | ||
253 | /* Special case for older graphic cores. */ | |
c33246d7 LS |
254 | if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) |
255 | >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) { | |
a8c21a54 | 256 | gpu->identity.model = 0x500; /* gc500 */ |
c33246d7 LS |
257 | gpu->identity.revision = |
258 | (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK) | |
259 | >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT; | |
a8c21a54 T |
260 | } else { |
261 | ||
262 | gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); | |
263 | gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); | |
264 | ||
265 | /* | |
266 | * !!!! HACK ALERT !!!! | |
267 | * Because people change device IDs without letting software | |
268 | * know about it - here is the hack to make it all look the | |
269 | * same. Only for GC400 family. | |
270 | */ | |
271 | if ((gpu->identity.model & 0xff00) == 0x0400 && | |
272 | gpu->identity.model != 0x0420) { | |
273 | gpu->identity.model = gpu->identity.model & 0x0400; | |
274 | } | |
275 | ||
276 | /* Another special case */ | |
277 | if (gpu->identity.model == 0x300 && | |
278 | gpu->identity.revision == 0x2201) { | |
279 | u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); | |
280 | u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); | |
281 | ||
282 | if (chipDate == 0x20080814 && chipTime == 0x12051100) { | |
283 | /* | |
284 | * This IP has an ECO; put the correct | |
285 | * revision in it. | |
286 | */ | |
287 | gpu->identity.revision = 0x1051; | |
288 | } | |
289 | } | |
290 | } | |
291 | ||
292 | dev_info(gpu->dev, "model: GC%x, revision: %x\n", | |
293 | gpu->identity.model, gpu->identity.revision); | |
294 | ||
295 | gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); | |
296 | ||
297 | /* Disable fast clear on GC700. */ | |
298 | if (gpu->identity.model == 0x700) | |
299 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; | |
300 | ||
301 | if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) || | |
302 | (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) { | |
303 | ||
304 | /* | |
305 | * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these | |
306 | * registers. | |
307 | */ | |
308 | gpu->identity.minor_features0 = 0; | |
309 | gpu->identity.minor_features1 = 0; | |
310 | gpu->identity.minor_features2 = 0; | |
311 | gpu->identity.minor_features3 = 0; | |
312 | } else | |
313 | gpu->identity.minor_features0 = | |
314 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); | |
315 | ||
316 | if (gpu->identity.minor_features0 & | |
317 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | |
318 | gpu->identity.minor_features1 = | |
319 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1); | |
320 | gpu->identity.minor_features2 = | |
321 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); | |
322 | gpu->identity.minor_features3 = | |
323 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); | |
324 | } | |
325 | ||
326 | /* GC600 idle register reports zero bits where modules aren't present */ | |
327 | if (gpu->identity.model == chipModel_GC600) { | |
328 | gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | | |
329 | VIVS_HI_IDLE_STATE_RA | | |
330 | VIVS_HI_IDLE_STATE_SE | | |
331 | VIVS_HI_IDLE_STATE_PA | | |
332 | VIVS_HI_IDLE_STATE_SH | | |
333 | VIVS_HI_IDLE_STATE_PE | | |
334 | VIVS_HI_IDLE_STATE_DE | | |
335 | VIVS_HI_IDLE_STATE_FE; | |
336 | } else { | |
337 | gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP; | |
338 | } | |
339 | ||
340 | etnaviv_hw_specs(gpu); | |
341 | } | |
342 | ||
343 | static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) | |
344 | { | |
345 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock | | |
346 | VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD); | |
347 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); | |
348 | } | |
349 | ||
350 | static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) | |
351 | { | |
352 | u32 control, idle; | |
353 | unsigned long timeout; | |
354 | bool failed = true; | |
355 | ||
356 | /* TODO | |
357 | * | |
358 | * - clock gating | |
359 | * - puls eater | |
360 | * - what about VG? | |
361 | */ | |
362 | ||
363 | /* We hope that the GPU resets in under one second */ | |
364 | timeout = jiffies + msecs_to_jiffies(1000); | |
365 | ||
366 | while (time_is_after_jiffies(timeout)) { | |
367 | control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | |
368 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | |
369 | ||
370 | /* enable clock */ | |
371 | etnaviv_gpu_load_clock(gpu, control); | |
372 | ||
373 | /* Wait for stable clock. Vivante's code waited for 1ms */ | |
374 | usleep_range(1000, 10000); | |
375 | ||
376 | /* isolate the GPU. */ | |
377 | control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | |
378 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
379 | ||
380 | /* set soft reset. */ | |
381 | control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | |
382 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
383 | ||
384 | /* wait for reset. */ | |
385 | msleep(1); | |
386 | ||
387 | /* reset soft reset bit. */ | |
388 | control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; | |
389 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
390 | ||
391 | /* reset GPU isolation. */ | |
392 | control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; | |
393 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); | |
394 | ||
395 | /* read idle register. */ | |
396 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
397 | ||
398 | /* try reseting again if FE it not idle */ | |
399 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { | |
400 | dev_dbg(gpu->dev, "FE is not idle\n"); | |
401 | continue; | |
402 | } | |
403 | ||
404 | /* read reset register. */ | |
405 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
406 | ||
407 | /* is the GPU idle? */ | |
408 | if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) || | |
409 | ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) { | |
410 | dev_dbg(gpu->dev, "GPU is not idle\n"); | |
411 | continue; | |
412 | } | |
413 | ||
414 | failed = false; | |
415 | break; | |
416 | } | |
417 | ||
418 | if (failed) { | |
419 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
420 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | |
421 | ||
422 | dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n", | |
423 | idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ", | |
424 | control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ", | |
425 | control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not "); | |
426 | ||
427 | return -EBUSY; | |
428 | } | |
429 | ||
430 | /* We rely on the GPU running, so program the clock */ | |
431 | control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | |
432 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | |
433 | ||
434 | /* enable clock */ | |
435 | etnaviv_gpu_load_clock(gpu, control); | |
436 | ||
437 | return 0; | |
438 | } | |
439 | ||
440 | static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) | |
441 | { | |
442 | u16 prefetch; | |
443 | ||
444 | if (gpu->identity.model == chipModel_GC320 && | |
445 | gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 && | |
446 | (gpu->identity.revision == 0x5007 || | |
447 | gpu->identity.revision == 0x5220)) { | |
448 | u32 mc_memory_debug; | |
449 | ||
450 | mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; | |
451 | ||
452 | if (gpu->identity.revision == 0x5007) | |
453 | mc_memory_debug |= 0x0c; | |
454 | else | |
455 | mc_memory_debug |= 0x08; | |
456 | ||
457 | gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); | |
458 | } | |
459 | ||
460 | /* | |
461 | * Update GPU AXI cache atttribute to "cacheable, no allocate". | |
462 | * This is necessary to prevent the iMX6 SoC locking up. | |
463 | */ | |
464 | gpu_write(gpu, VIVS_HI_AXI_CONFIG, | |
465 | VIVS_HI_AXI_CONFIG_AWCACHE(2) | | |
466 | VIVS_HI_AXI_CONFIG_ARCACHE(2)); | |
467 | ||
468 | /* GC2000 rev 5108 needs a special bus config */ | |
469 | if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) { | |
470 | u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); | |
471 | bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | | |
472 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); | |
473 | bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) | | |
474 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0); | |
475 | gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config); | |
476 | } | |
477 | ||
478 | /* set base addresses */ | |
479 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base); | |
480 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base); | |
481 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base); | |
482 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base); | |
483 | gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); | |
484 | ||
485 | /* setup the MMU page table pointers */ | |
486 | etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain); | |
487 | ||
488 | /* Start command processor */ | |
489 | prefetch = etnaviv_buffer_init(gpu); | |
490 | ||
491 | gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); | |
492 | gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, | |
493 | gpu->buffer->paddr - gpu->memory_base); | |
494 | gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, | |
495 | VIVS_FE_COMMAND_CONTROL_ENABLE | | |
496 | VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch)); | |
497 | } | |
498 | ||
499 | int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |
500 | { | |
501 | int ret, i; | |
502 | struct iommu_domain *iommu; | |
503 | enum etnaviv_iommu_version version; | |
504 | bool mmuv2; | |
505 | ||
506 | ret = pm_runtime_get_sync(gpu->dev); | |
507 | if (ret < 0) | |
508 | return ret; | |
509 | ||
510 | etnaviv_hw_identify(gpu); | |
511 | ||
512 | if (gpu->identity.model == 0) { | |
513 | dev_err(gpu->dev, "Unknown GPU model\n"); | |
f6427760 RK |
514 | ret = -ENXIO; |
515 | goto fail; | |
a8c21a54 T |
516 | } |
517 | ||
b98c6688 RK |
518 | /* Exclude VG cores with FE2.0 */ |
519 | if (gpu->identity.features & chipFeatures_PIPE_VG && | |
520 | gpu->identity.features & chipFeatures_FE20) { | |
521 | dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n"); | |
522 | ret = -ENXIO; | |
523 | goto fail; | |
524 | } | |
525 | ||
a8c21a54 T |
526 | ret = etnaviv_hw_reset(gpu); |
527 | if (ret) | |
528 | goto fail; | |
529 | ||
530 | /* Setup IOMMU.. eventually we will (I think) do this once per context | |
531 | * and have separate page tables per context. For now, to keep things | |
532 | * simple and to get something working, just use a single address space: | |
533 | */ | |
534 | mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION; | |
535 | dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2); | |
536 | ||
537 | if (!mmuv2) { | |
538 | iommu = etnaviv_iommu_domain_alloc(gpu); | |
539 | version = ETNAVIV_IOMMU_V1; | |
540 | } else { | |
541 | iommu = etnaviv_iommu_v2_domain_alloc(gpu); | |
542 | version = ETNAVIV_IOMMU_V2; | |
543 | } | |
544 | ||
545 | if (!iommu) { | |
546 | ret = -ENOMEM; | |
547 | goto fail; | |
548 | } | |
549 | ||
550 | /* TODO: we will leak here memory - fix it! */ | |
551 | ||
552 | gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); | |
553 | if (!gpu->mmu) { | |
554 | ret = -ENOMEM; | |
555 | goto fail; | |
556 | } | |
557 | ||
558 | /* Create buffer: */ | |
559 | gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0); | |
560 | if (!gpu->buffer) { | |
561 | ret = -ENOMEM; | |
562 | dev_err(gpu->dev, "could not create command buffer\n"); | |
563 | goto fail; | |
564 | } | |
565 | if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { | |
566 | ret = -EINVAL; | |
567 | dev_err(gpu->dev, | |
568 | "command buffer outside valid memory window\n"); | |
569 | goto free_buffer; | |
570 | } | |
571 | ||
572 | /* Setup event management */ | |
573 | spin_lock_init(&gpu->event_spinlock); | |
574 | init_completion(&gpu->event_free); | |
575 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | |
576 | gpu->event[i].used = false; | |
577 | complete(&gpu->event_free); | |
578 | } | |
579 | ||
580 | /* Now program the hardware */ | |
581 | mutex_lock(&gpu->lock); | |
582 | etnaviv_gpu_hw_init(gpu); | |
583 | mutex_unlock(&gpu->lock); | |
584 | ||
585 | pm_runtime_mark_last_busy(gpu->dev); | |
586 | pm_runtime_put_autosuspend(gpu->dev); | |
587 | ||
588 | return 0; | |
589 | ||
590 | free_buffer: | |
591 | etnaviv_gpu_cmdbuf_free(gpu->buffer); | |
592 | gpu->buffer = NULL; | |
593 | fail: | |
594 | pm_runtime_mark_last_busy(gpu->dev); | |
595 | pm_runtime_put_autosuspend(gpu->dev); | |
596 | ||
597 | return ret; | |
598 | } | |
599 | ||
600 | #ifdef CONFIG_DEBUG_FS | |
601 | struct dma_debug { | |
602 | u32 address[2]; | |
603 | u32 state[2]; | |
604 | }; | |
605 | ||
606 | static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) | |
607 | { | |
608 | u32 i; | |
609 | ||
610 | debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
611 | debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | |
612 | ||
613 | for (i = 0; i < 500; i++) { | |
614 | debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
615 | debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); | |
616 | ||
617 | if (debug->address[0] != debug->address[1]) | |
618 | break; | |
619 | ||
620 | if (debug->state[0] != debug->state[1]) | |
621 | break; | |
622 | } | |
623 | } | |
624 | ||
625 | int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) | |
626 | { | |
627 | struct dma_debug debug; | |
628 | u32 dma_lo, dma_hi, axi, idle; | |
629 | int ret; | |
630 | ||
631 | seq_printf(m, "%s Status:\n", dev_name(gpu->dev)); | |
632 | ||
633 | ret = pm_runtime_get_sync(gpu->dev); | |
634 | if (ret < 0) | |
635 | return ret; | |
636 | ||
637 | dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); | |
638 | dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); | |
639 | axi = gpu_read(gpu, VIVS_HI_AXI_STATUS); | |
640 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
641 | ||
642 | verify_dma(gpu, &debug); | |
643 | ||
644 | seq_puts(m, "\tfeatures\n"); | |
645 | seq_printf(m, "\t minor_features0: 0x%08x\n", | |
646 | gpu->identity.minor_features0); | |
647 | seq_printf(m, "\t minor_features1: 0x%08x\n", | |
648 | gpu->identity.minor_features1); | |
649 | seq_printf(m, "\t minor_features2: 0x%08x\n", | |
650 | gpu->identity.minor_features2); | |
651 | seq_printf(m, "\t minor_features3: 0x%08x\n", | |
652 | gpu->identity.minor_features3); | |
653 | ||
654 | seq_puts(m, "\tspecs\n"); | |
655 | seq_printf(m, "\t stream_count: %d\n", | |
656 | gpu->identity.stream_count); | |
657 | seq_printf(m, "\t register_max: %d\n", | |
658 | gpu->identity.register_max); | |
659 | seq_printf(m, "\t thread_count: %d\n", | |
660 | gpu->identity.thread_count); | |
661 | seq_printf(m, "\t vertex_cache_size: %d\n", | |
662 | gpu->identity.vertex_cache_size); | |
663 | seq_printf(m, "\t shader_core_count: %d\n", | |
664 | gpu->identity.shader_core_count); | |
665 | seq_printf(m, "\t pixel_pipes: %d\n", | |
666 | gpu->identity.pixel_pipes); | |
667 | seq_printf(m, "\t vertex_output_buffer_size: %d\n", | |
668 | gpu->identity.vertex_output_buffer_size); | |
669 | seq_printf(m, "\t buffer_size: %d\n", | |
670 | gpu->identity.buffer_size); | |
671 | seq_printf(m, "\t instruction_count: %d\n", | |
672 | gpu->identity.instruction_count); | |
673 | seq_printf(m, "\t num_constants: %d\n", | |
674 | gpu->identity.num_constants); | |
675 | ||
676 | seq_printf(m, "\taxi: 0x%08x\n", axi); | |
677 | seq_printf(m, "\tidle: 0x%08x\n", idle); | |
678 | idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP; | |
679 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) | |
680 | seq_puts(m, "\t FE is not idle\n"); | |
681 | if ((idle & VIVS_HI_IDLE_STATE_DE) == 0) | |
682 | seq_puts(m, "\t DE is not idle\n"); | |
683 | if ((idle & VIVS_HI_IDLE_STATE_PE) == 0) | |
684 | seq_puts(m, "\t PE is not idle\n"); | |
685 | if ((idle & VIVS_HI_IDLE_STATE_SH) == 0) | |
686 | seq_puts(m, "\t SH is not idle\n"); | |
687 | if ((idle & VIVS_HI_IDLE_STATE_PA) == 0) | |
688 | seq_puts(m, "\t PA is not idle\n"); | |
689 | if ((idle & VIVS_HI_IDLE_STATE_SE) == 0) | |
690 | seq_puts(m, "\t SE is not idle\n"); | |
691 | if ((idle & VIVS_HI_IDLE_STATE_RA) == 0) | |
692 | seq_puts(m, "\t RA is not idle\n"); | |
693 | if ((idle & VIVS_HI_IDLE_STATE_TX) == 0) | |
694 | seq_puts(m, "\t TX is not idle\n"); | |
695 | if ((idle & VIVS_HI_IDLE_STATE_VG) == 0) | |
696 | seq_puts(m, "\t VG is not idle\n"); | |
697 | if ((idle & VIVS_HI_IDLE_STATE_IM) == 0) | |
698 | seq_puts(m, "\t IM is not idle\n"); | |
699 | if ((idle & VIVS_HI_IDLE_STATE_FP) == 0) | |
700 | seq_puts(m, "\t FP is not idle\n"); | |
701 | if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) | |
702 | seq_puts(m, "\t TS is not idle\n"); | |
703 | if (idle & VIVS_HI_IDLE_STATE_AXI_LP) | |
704 | seq_puts(m, "\t AXI low power mode\n"); | |
705 | ||
706 | if (gpu->identity.features & chipFeatures_DEBUG_MODE) { | |
707 | u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0); | |
708 | u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1); | |
709 | u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE); | |
710 | ||
711 | seq_puts(m, "\tMC\n"); | |
712 | seq_printf(m, "\t read0: 0x%08x\n", read0); | |
713 | seq_printf(m, "\t read1: 0x%08x\n", read1); | |
714 | seq_printf(m, "\t write: 0x%08x\n", write); | |
715 | } | |
716 | ||
717 | seq_puts(m, "\tDMA "); | |
718 | ||
719 | if (debug.address[0] == debug.address[1] && | |
720 | debug.state[0] == debug.state[1]) { | |
721 | seq_puts(m, "seems to be stuck\n"); | |
722 | } else if (debug.address[0] == debug.address[1]) { | |
723 | seq_puts(m, "adress is constant\n"); | |
724 | } else { | |
725 | seq_puts(m, "is runing\n"); | |
726 | } | |
727 | ||
728 | seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]); | |
729 | seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]); | |
730 | seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]); | |
731 | seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]); | |
732 | seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n", | |
733 | dma_lo, dma_hi); | |
734 | ||
735 | ret = 0; | |
736 | ||
737 | pm_runtime_mark_last_busy(gpu->dev); | |
738 | pm_runtime_put_autosuspend(gpu->dev); | |
739 | ||
740 | return ret; | |
741 | } | |
742 | #endif | |
743 | ||
744 | /* | |
745 | * Power Management: | |
746 | */ | |
747 | static int enable_clk(struct etnaviv_gpu *gpu) | |
748 | { | |
749 | if (gpu->clk_core) | |
750 | clk_prepare_enable(gpu->clk_core); | |
751 | if (gpu->clk_shader) | |
752 | clk_prepare_enable(gpu->clk_shader); | |
753 | ||
754 | return 0; | |
755 | } | |
756 | ||
757 | static int disable_clk(struct etnaviv_gpu *gpu) | |
758 | { | |
759 | if (gpu->clk_core) | |
760 | clk_disable_unprepare(gpu->clk_core); | |
761 | if (gpu->clk_shader) | |
762 | clk_disable_unprepare(gpu->clk_shader); | |
763 | ||
764 | return 0; | |
765 | } | |
766 | ||
767 | static int enable_axi(struct etnaviv_gpu *gpu) | |
768 | { | |
769 | if (gpu->clk_bus) | |
770 | clk_prepare_enable(gpu->clk_bus); | |
771 | ||
772 | return 0; | |
773 | } | |
774 | ||
775 | static int disable_axi(struct etnaviv_gpu *gpu) | |
776 | { | |
777 | if (gpu->clk_bus) | |
778 | clk_disable_unprepare(gpu->clk_bus); | |
779 | ||
780 | return 0; | |
781 | } | |
782 | ||
783 | /* | |
784 | * Hangcheck detection for locked gpu: | |
785 | */ | |
786 | static void recover_worker(struct work_struct *work) | |
787 | { | |
788 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, | |
789 | recover_work); | |
790 | unsigned long flags; | |
791 | unsigned int i; | |
792 | ||
793 | dev_err(gpu->dev, "hangcheck recover!\n"); | |
794 | ||
795 | if (pm_runtime_get_sync(gpu->dev) < 0) | |
796 | return; | |
797 | ||
798 | mutex_lock(&gpu->lock); | |
799 | ||
800 | /* Only catch the first event, or when manually re-armed */ | |
801 | if (etnaviv_dump_core) { | |
802 | etnaviv_core_dump(gpu); | |
803 | etnaviv_dump_core = false; | |
804 | } | |
805 | ||
806 | etnaviv_hw_reset(gpu); | |
807 | ||
808 | /* complete all events, the GPU won't do it after the reset */ | |
809 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
810 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | |
811 | if (!gpu->event[i].used) | |
812 | continue; | |
813 | fence_signal(gpu->event[i].fence); | |
814 | gpu->event[i].fence = NULL; | |
815 | gpu->event[i].used = false; | |
816 | complete(&gpu->event_free); | |
817 | /* | |
818 | * Decrement the PM count for each stuck event. This is safe | |
819 | * even in atomic context as we use ASYNC RPM here. | |
820 | */ | |
821 | pm_runtime_put_autosuspend(gpu->dev); | |
822 | } | |
823 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
824 | gpu->completed_fence = gpu->active_fence; | |
825 | ||
826 | etnaviv_gpu_hw_init(gpu); | |
827 | gpu->switch_context = true; | |
828 | ||
829 | mutex_unlock(&gpu->lock); | |
830 | pm_runtime_mark_last_busy(gpu->dev); | |
831 | pm_runtime_put_autosuspend(gpu->dev); | |
832 | ||
833 | /* Retire the buffer objects in a work */ | |
834 | etnaviv_queue_work(gpu->drm, &gpu->retire_work); | |
835 | } | |
836 | ||
837 | static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) | |
838 | { | |
839 | DBG("%s", dev_name(gpu->dev)); | |
840 | mod_timer(&gpu->hangcheck_timer, | |
841 | round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES)); | |
842 | } | |
843 | ||
844 | static void hangcheck_handler(unsigned long data) | |
845 | { | |
846 | struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data; | |
847 | u32 fence = gpu->completed_fence; | |
848 | bool progress = false; | |
849 | ||
850 | if (fence != gpu->hangcheck_fence) { | |
851 | gpu->hangcheck_fence = fence; | |
852 | progress = true; | |
853 | } | |
854 | ||
855 | if (!progress) { | |
856 | u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); | |
857 | int change = dma_addr - gpu->hangcheck_dma_addr; | |
858 | ||
859 | if (change < 0 || change > 16) { | |
860 | gpu->hangcheck_dma_addr = dma_addr; | |
861 | progress = true; | |
862 | } | |
863 | } | |
864 | ||
865 | if (!progress && fence_after(gpu->active_fence, fence)) { | |
866 | dev_err(gpu->dev, "hangcheck detected gpu lockup!\n"); | |
867 | dev_err(gpu->dev, " completed fence: %u\n", fence); | |
868 | dev_err(gpu->dev, " active fence: %u\n", | |
869 | gpu->active_fence); | |
870 | etnaviv_queue_work(gpu->drm, &gpu->recover_work); | |
871 | } | |
872 | ||
873 | /* if still more pending work, reset the hangcheck timer: */ | |
874 | if (fence_after(gpu->active_fence, gpu->hangcheck_fence)) | |
875 | hangcheck_timer_reset(gpu); | |
876 | } | |
877 | ||
878 | static void hangcheck_disable(struct etnaviv_gpu *gpu) | |
879 | { | |
880 | del_timer_sync(&gpu->hangcheck_timer); | |
881 | cancel_work_sync(&gpu->recover_work); | |
882 | } | |
883 | ||
884 | /* fence object management */ | |
885 | struct etnaviv_fence { | |
886 | struct etnaviv_gpu *gpu; | |
887 | struct fence base; | |
888 | }; | |
889 | ||
890 | static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) | |
891 | { | |
892 | return container_of(fence, struct etnaviv_fence, base); | |
893 | } | |
894 | ||
895 | static const char *etnaviv_fence_get_driver_name(struct fence *fence) | |
896 | { | |
897 | return "etnaviv"; | |
898 | } | |
899 | ||
900 | static const char *etnaviv_fence_get_timeline_name(struct fence *fence) | |
901 | { | |
902 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
903 | ||
904 | return dev_name(f->gpu->dev); | |
905 | } | |
906 | ||
907 | static bool etnaviv_fence_enable_signaling(struct fence *fence) | |
908 | { | |
909 | return true; | |
910 | } | |
911 | ||
912 | static bool etnaviv_fence_signaled(struct fence *fence) | |
913 | { | |
914 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
915 | ||
916 | return fence_completed(f->gpu, f->base.seqno); | |
917 | } | |
918 | ||
919 | static void etnaviv_fence_release(struct fence *fence) | |
920 | { | |
921 | struct etnaviv_fence *f = to_etnaviv_fence(fence); | |
922 | ||
923 | kfree_rcu(f, base.rcu); | |
924 | } | |
925 | ||
926 | static const struct fence_ops etnaviv_fence_ops = { | |
927 | .get_driver_name = etnaviv_fence_get_driver_name, | |
928 | .get_timeline_name = etnaviv_fence_get_timeline_name, | |
929 | .enable_signaling = etnaviv_fence_enable_signaling, | |
930 | .signaled = etnaviv_fence_signaled, | |
931 | .wait = fence_default_wait, | |
932 | .release = etnaviv_fence_release, | |
933 | }; | |
934 | ||
935 | static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | |
936 | { | |
937 | struct etnaviv_fence *f; | |
938 | ||
939 | f = kzalloc(sizeof(*f), GFP_KERNEL); | |
940 | if (!f) | |
941 | return NULL; | |
942 | ||
943 | f->gpu = gpu; | |
944 | ||
945 | fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, | |
946 | gpu->fence_context, ++gpu->next_fence); | |
947 | ||
948 | return &f->base; | |
949 | } | |
950 | ||
951 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |
952 | unsigned int context, bool exclusive) | |
953 | { | |
954 | struct reservation_object *robj = etnaviv_obj->resv; | |
955 | struct reservation_object_list *fobj; | |
956 | struct fence *fence; | |
957 | int i, ret; | |
958 | ||
959 | if (!exclusive) { | |
960 | ret = reservation_object_reserve_shared(robj); | |
961 | if (ret) | |
962 | return ret; | |
963 | } | |
964 | ||
965 | /* | |
966 | * If we have any shared fences, then the exclusive fence | |
967 | * should be ignored as it will already have been signalled. | |
968 | */ | |
969 | fobj = reservation_object_get_list(robj); | |
970 | if (!fobj || fobj->shared_count == 0) { | |
971 | /* Wait on any existing exclusive fence which isn't our own */ | |
972 | fence = reservation_object_get_excl(robj); | |
973 | if (fence && fence->context != context) { | |
974 | ret = fence_wait(fence, true); | |
975 | if (ret) | |
976 | return ret; | |
977 | } | |
978 | } | |
979 | ||
980 | if (!exclusive || !fobj) | |
981 | return 0; | |
982 | ||
983 | for (i = 0; i < fobj->shared_count; i++) { | |
984 | fence = rcu_dereference_protected(fobj->shared[i], | |
985 | reservation_object_held(robj)); | |
986 | if (fence->context != context) { | |
987 | ret = fence_wait(fence, true); | |
988 | if (ret) | |
989 | return ret; | |
990 | } | |
991 | } | |
992 | ||
993 | return 0; | |
994 | } | |
995 | ||
996 | /* | |
997 | * event management: | |
998 | */ | |
999 | ||
1000 | static unsigned int event_alloc(struct etnaviv_gpu *gpu) | |
1001 | { | |
1002 | unsigned long ret, flags; | |
1003 | unsigned int i, event = ~0U; | |
1004 | ||
1005 | ret = wait_for_completion_timeout(&gpu->event_free, | |
1006 | msecs_to_jiffies(10 * 10000)); | |
1007 | if (!ret) | |
1008 | dev_err(gpu->dev, "wait_for_completion_timeout failed"); | |
1009 | ||
1010 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
1011 | ||
1012 | /* find first free event */ | |
1013 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { | |
1014 | if (gpu->event[i].used == false) { | |
1015 | gpu->event[i].used = true; | |
1016 | event = i; | |
1017 | break; | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1022 | ||
1023 | return event; | |
1024 | } | |
1025 | ||
1026 | static void event_free(struct etnaviv_gpu *gpu, unsigned int event) | |
1027 | { | |
1028 | unsigned long flags; | |
1029 | ||
1030 | spin_lock_irqsave(&gpu->event_spinlock, flags); | |
1031 | ||
1032 | if (gpu->event[event].used == false) { | |
1033 | dev_warn(gpu->dev, "event %u is already marked as free", | |
1034 | event); | |
1035 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1036 | } else { | |
1037 | gpu->event[event].used = false; | |
1038 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | |
1039 | ||
1040 | complete(&gpu->event_free); | |
1041 | } | |
1042 | } | |
1043 | ||
1044 | /* | |
1045 | * Cmdstream submission/retirement: | |
1046 | */ | |
1047 | ||
1048 | struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size, | |
1049 | size_t nr_bos) | |
1050 | { | |
1051 | struct etnaviv_cmdbuf *cmdbuf; | |
1052 | size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]), | |
1053 | sizeof(*cmdbuf)); | |
1054 | ||
1055 | cmdbuf = kzalloc(sz, GFP_KERNEL); | |
1056 | if (!cmdbuf) | |
1057 | return NULL; | |
1058 | ||
1059 | cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr, | |
1060 | GFP_KERNEL); | |
1061 | if (!cmdbuf->vaddr) { | |
1062 | kfree(cmdbuf); | |
1063 | return NULL; | |
1064 | } | |
1065 | ||
1066 | cmdbuf->gpu = gpu; | |
1067 | cmdbuf->size = size; | |
1068 | ||
1069 | return cmdbuf; | |
1070 | } | |
1071 | ||
1072 | void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf) | |
1073 | { | |
1074 | dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size, | |
1075 | cmdbuf->vaddr, cmdbuf->paddr); | |
1076 | kfree(cmdbuf); | |
1077 | } | |
1078 | ||
1079 | static void retire_worker(struct work_struct *work) | |
1080 | { | |
1081 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, | |
1082 | retire_work); | |
1083 | u32 fence = gpu->completed_fence; | |
1084 | struct etnaviv_cmdbuf *cmdbuf, *tmp; | |
1085 | unsigned int i; | |
1086 | ||
1087 | mutex_lock(&gpu->lock); | |
1088 | list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { | |
1089 | if (!fence_is_signaled(cmdbuf->fence)) | |
1090 | break; | |
1091 | ||
1092 | list_del(&cmdbuf->node); | |
1093 | fence_put(cmdbuf->fence); | |
1094 | ||
1095 | for (i = 0; i < cmdbuf->nr_bos; i++) { | |
1096 | struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i]; | |
1097 | ||
1098 | atomic_dec(&etnaviv_obj->gpu_active); | |
1099 | /* drop the refcount taken in etnaviv_gpu_submit */ | |
1100 | etnaviv_gem_put_iova(gpu, &etnaviv_obj->base); | |
1101 | } | |
1102 | ||
1103 | etnaviv_gpu_cmdbuf_free(cmdbuf); | |
1104 | } | |
1105 | ||
1106 | gpu->retired_fence = fence; | |
1107 | ||
1108 | mutex_unlock(&gpu->lock); | |
1109 | ||
1110 | wake_up_all(&gpu->fence_event); | |
1111 | } | |
1112 | ||
1113 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, | |
1114 | u32 fence, struct timespec *timeout) | |
1115 | { | |
1116 | int ret; | |
1117 | ||
1118 | if (fence_after(fence, gpu->next_fence)) { | |
1119 | DRM_ERROR("waiting on invalid fence: %u (of %u)\n", | |
1120 | fence, gpu->next_fence); | |
1121 | return -EINVAL; | |
1122 | } | |
1123 | ||
1124 | if (!timeout) { | |
1125 | /* No timeout was requested: just test for completion */ | |
1126 | ret = fence_completed(gpu, fence) ? 0 : -EBUSY; | |
1127 | } else { | |
1128 | unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); | |
1129 | ||
1130 | ret = wait_event_interruptible_timeout(gpu->fence_event, | |
1131 | fence_completed(gpu, fence), | |
1132 | remaining); | |
1133 | if (ret == 0) { | |
1134 | DBG("timeout waiting for fence: %u (retired: %u completed: %u)", | |
1135 | fence, gpu->retired_fence, | |
1136 | gpu->completed_fence); | |
1137 | ret = -ETIMEDOUT; | |
1138 | } else if (ret != -ERESTARTSYS) { | |
1139 | ret = 0; | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | return ret; | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * Wait for an object to become inactive. This, on it's own, is not race | |
1148 | * free: the object is moved by the retire worker off the active list, and | |
1149 | * then the iova is put. Moreover, the object could be re-submitted just | |
1150 | * after we notice that it's become inactive. | |
1151 | * | |
1152 | * Although the retirement happens under the gpu lock, we don't want to hold | |
1153 | * that lock in this function while waiting. | |
1154 | */ | |
1155 | int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, | |
1156 | struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout) | |
1157 | { | |
1158 | unsigned long remaining; | |
1159 | long ret; | |
1160 | ||
1161 | if (!timeout) | |
1162 | return !is_active(etnaviv_obj) ? 0 : -EBUSY; | |
1163 | ||
1164 | remaining = etnaviv_timeout_to_jiffies(timeout); | |
1165 | ||
1166 | ret = wait_event_interruptible_timeout(gpu->fence_event, | |
1167 | !is_active(etnaviv_obj), | |
1168 | remaining); | |
1169 | if (ret > 0) { | |
1170 | struct etnaviv_drm_private *priv = gpu->drm->dev_private; | |
1171 | ||
1172 | /* Synchronise with the retire worker */ | |
1173 | flush_workqueue(priv->wq); | |
1174 | return 0; | |
1175 | } else if (ret == -ERESTARTSYS) { | |
1176 | return -ERESTARTSYS; | |
1177 | } else { | |
1178 | return -ETIMEDOUT; | |
1179 | } | |
1180 | } | |
1181 | ||
1182 | int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu) | |
1183 | { | |
1184 | return pm_runtime_get_sync(gpu->dev); | |
1185 | } | |
1186 | ||
1187 | void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) | |
1188 | { | |
1189 | pm_runtime_mark_last_busy(gpu->dev); | |
1190 | pm_runtime_put_autosuspend(gpu->dev); | |
1191 | } | |
1192 | ||
1193 | /* add bo's to gpu's ring, and kick gpu: */ | |
1194 | int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |
1195 | struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) | |
1196 | { | |
1197 | struct fence *fence; | |
1198 | unsigned int event, i; | |
1199 | int ret; | |
1200 | ||
1201 | ret = etnaviv_gpu_pm_get_sync(gpu); | |
1202 | if (ret < 0) | |
1203 | return ret; | |
1204 | ||
1205 | mutex_lock(&gpu->lock); | |
1206 | ||
1207 | /* | |
1208 | * TODO | |
1209 | * | |
1210 | * - flush | |
1211 | * - data endian | |
1212 | * - prefetch | |
1213 | * | |
1214 | */ | |
1215 | ||
1216 | event = event_alloc(gpu); | |
1217 | if (unlikely(event == ~0U)) { | |
1218 | DRM_ERROR("no free event\n"); | |
1219 | ret = -EBUSY; | |
1220 | goto out_unlock; | |
1221 | } | |
1222 | ||
1223 | fence = etnaviv_gpu_fence_alloc(gpu); | |
1224 | if (!fence) { | |
1225 | event_free(gpu, event); | |
1226 | ret = -ENOMEM; | |
1227 | goto out_unlock; | |
1228 | } | |
1229 | ||
1230 | gpu->event[event].fence = fence; | |
1231 | submit->fence = fence->seqno; | |
1232 | gpu->active_fence = submit->fence; | |
1233 | ||
1234 | if (gpu->lastctx != cmdbuf->ctx) { | |
1235 | gpu->mmu->need_flush = true; | |
1236 | gpu->switch_context = true; | |
1237 | gpu->lastctx = cmdbuf->ctx; | |
1238 | } | |
1239 | ||
1240 | etnaviv_buffer_queue(gpu, event, cmdbuf); | |
1241 | ||
1242 | cmdbuf->fence = fence; | |
1243 | list_add_tail(&cmdbuf->node, &gpu->active_cmd_list); | |
1244 | ||
1245 | /* We're committed to adding this command buffer, hold a PM reference */ | |
1246 | pm_runtime_get_noresume(gpu->dev); | |
1247 | ||
1248 | for (i = 0; i < submit->nr_bos; i++) { | |
1249 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | |
1250 | u32 iova; | |
1251 | ||
1252 | /* Each cmdbuf takes a refcount on the iova */ | |
1253 | etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova); | |
1254 | cmdbuf->bo[i] = etnaviv_obj; | |
1255 | atomic_inc(&etnaviv_obj->gpu_active); | |
1256 | ||
1257 | if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) | |
1258 | reservation_object_add_excl_fence(etnaviv_obj->resv, | |
1259 | fence); | |
1260 | else | |
1261 | reservation_object_add_shared_fence(etnaviv_obj->resv, | |
1262 | fence); | |
1263 | } | |
1264 | cmdbuf->nr_bos = submit->nr_bos; | |
1265 | hangcheck_timer_reset(gpu); | |
1266 | ret = 0; | |
1267 | ||
1268 | out_unlock: | |
1269 | mutex_unlock(&gpu->lock); | |
1270 | ||
1271 | etnaviv_gpu_pm_put(gpu); | |
1272 | ||
1273 | return ret; | |
1274 | } | |
1275 | ||
1276 | /* | |
1277 | * Init/Cleanup: | |
1278 | */ | |
1279 | static irqreturn_t irq_handler(int irq, void *data) | |
1280 | { | |
1281 | struct etnaviv_gpu *gpu = data; | |
1282 | irqreturn_t ret = IRQ_NONE; | |
1283 | ||
1284 | u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); | |
1285 | ||
1286 | if (intr != 0) { | |
1287 | int event; | |
1288 | ||
1289 | pm_runtime_mark_last_busy(gpu->dev); | |
1290 | ||
1291 | dev_dbg(gpu->dev, "intr 0x%08x\n", intr); | |
1292 | ||
1293 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) { | |
1294 | dev_err(gpu->dev, "AXI bus error\n"); | |
1295 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR; | |
1296 | } | |
1297 | ||
1298 | while ((event = ffs(intr)) != 0) { | |
1299 | struct fence *fence; | |
1300 | ||
1301 | event -= 1; | |
1302 | ||
1303 | intr &= ~(1 << event); | |
1304 | ||
1305 | dev_dbg(gpu->dev, "event %u\n", event); | |
1306 | ||
1307 | fence = gpu->event[event].fence; | |
1308 | gpu->event[event].fence = NULL; | |
1309 | fence_signal(fence); | |
1310 | ||
1311 | /* | |
1312 | * Events can be processed out of order. Eg, | |
1313 | * - allocate and queue event 0 | |
1314 | * - allocate event 1 | |
1315 | * - event 0 completes, we process it | |
1316 | * - allocate and queue event 0 | |
1317 | * - event 1 and event 0 complete | |
1318 | * we can end up processing event 0 first, then 1. | |
1319 | */ | |
1320 | if (fence_after(fence->seqno, gpu->completed_fence)) | |
1321 | gpu->completed_fence = fence->seqno; | |
1322 | ||
1323 | event_free(gpu, event); | |
1324 | ||
1325 | /* | |
1326 | * We need to balance the runtime PM count caused by | |
1327 | * each submission. Upon submission, we increment | |
1328 | * the runtime PM counter, and allocate one event. | |
1329 | * So here, we put the runtime PM count for each | |
1330 | * completed event. | |
1331 | */ | |
1332 | pm_runtime_put_autosuspend(gpu->dev); | |
1333 | } | |
1334 | ||
1335 | /* Retire the buffer objects in a work */ | |
1336 | etnaviv_queue_work(gpu->drm, &gpu->retire_work); | |
1337 | ||
1338 | ret = IRQ_HANDLED; | |
1339 | } | |
1340 | ||
1341 | return ret; | |
1342 | } | |
1343 | ||
1344 | static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) | |
1345 | { | |
1346 | int ret; | |
1347 | ||
1348 | ret = enable_clk(gpu); | |
1349 | if (ret) | |
1350 | return ret; | |
1351 | ||
1352 | ret = enable_axi(gpu); | |
1353 | if (ret) { | |
1354 | disable_clk(gpu); | |
1355 | return ret; | |
1356 | } | |
1357 | ||
1358 | return 0; | |
1359 | } | |
1360 | ||
1361 | static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu) | |
1362 | { | |
1363 | int ret; | |
1364 | ||
1365 | ret = disable_axi(gpu); | |
1366 | if (ret) | |
1367 | return ret; | |
1368 | ||
1369 | ret = disable_clk(gpu); | |
1370 | if (ret) | |
1371 | return ret; | |
1372 | ||
1373 | return 0; | |
1374 | } | |
1375 | ||
1376 | static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) | |
1377 | { | |
1378 | if (gpu->buffer) { | |
1379 | unsigned long timeout; | |
1380 | ||
1381 | /* Replace the last WAIT with END */ | |
1382 | etnaviv_buffer_end(gpu); | |
1383 | ||
1384 | /* | |
1385 | * We know that only the FE is busy here, this should | |
1386 | * happen quickly (as the WAIT is only 200 cycles). If | |
1387 | * we fail, just warn and continue. | |
1388 | */ | |
1389 | timeout = jiffies + msecs_to_jiffies(100); | |
1390 | do { | |
1391 | u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); | |
1392 | ||
1393 | if ((idle & gpu->idle_mask) == gpu->idle_mask) | |
1394 | break; | |
1395 | ||
1396 | if (time_is_before_jiffies(timeout)) { | |
1397 | dev_warn(gpu->dev, | |
1398 | "timed out waiting for idle: idle=0x%x\n", | |
1399 | idle); | |
1400 | break; | |
1401 | } | |
1402 | ||
1403 | udelay(5); | |
1404 | } while (1); | |
1405 | } | |
1406 | ||
1407 | return etnaviv_gpu_clk_disable(gpu); | |
1408 | } | |
1409 | ||
1410 | #ifdef CONFIG_PM | |
1411 | static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) | |
1412 | { | |
1413 | u32 clock; | |
1414 | int ret; | |
1415 | ||
1416 | ret = mutex_lock_killable(&gpu->lock); | |
1417 | if (ret) | |
1418 | return ret; | |
1419 | ||
1420 | clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | |
1421 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | |
1422 | ||
1423 | etnaviv_gpu_load_clock(gpu, clock); | |
1424 | etnaviv_gpu_hw_init(gpu); | |
1425 | ||
1426 | gpu->switch_context = true; | |
1427 | ||
1428 | mutex_unlock(&gpu->lock); | |
1429 | ||
1430 | return 0; | |
1431 | } | |
1432 | #endif | |
1433 | ||
1434 | static int etnaviv_gpu_bind(struct device *dev, struct device *master, | |
1435 | void *data) | |
1436 | { | |
1437 | struct drm_device *drm = data; | |
1438 | struct etnaviv_drm_private *priv = drm->dev_private; | |
1439 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1440 | int ret; | |
1441 | ||
1442 | #ifdef CONFIG_PM | |
1443 | ret = pm_runtime_get_sync(gpu->dev); | |
1444 | #else | |
1445 | ret = etnaviv_gpu_clk_enable(gpu); | |
1446 | #endif | |
1447 | if (ret < 0) | |
1448 | return ret; | |
1449 | ||
1450 | gpu->drm = drm; | |
1451 | gpu->fence_context = fence_context_alloc(1); | |
1452 | spin_lock_init(&gpu->fence_spinlock); | |
1453 | ||
1454 | INIT_LIST_HEAD(&gpu->active_cmd_list); | |
1455 | INIT_WORK(&gpu->retire_work, retire_worker); | |
1456 | INIT_WORK(&gpu->recover_work, recover_worker); | |
1457 | init_waitqueue_head(&gpu->fence_event); | |
1458 | ||
1459 | setup_timer(&gpu->hangcheck_timer, hangcheck_handler, | |
1460 | (unsigned long)gpu); | |
1461 | ||
1462 | priv->gpu[priv->num_gpus++] = gpu; | |
1463 | ||
1464 | pm_runtime_mark_last_busy(gpu->dev); | |
1465 | pm_runtime_put_autosuspend(gpu->dev); | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | static void etnaviv_gpu_unbind(struct device *dev, struct device *master, | |
1471 | void *data) | |
1472 | { | |
1473 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1474 | ||
1475 | DBG("%s", dev_name(gpu->dev)); | |
1476 | ||
1477 | hangcheck_disable(gpu); | |
1478 | ||
1479 | #ifdef CONFIG_PM | |
1480 | pm_runtime_get_sync(gpu->dev); | |
1481 | pm_runtime_put_sync_suspend(gpu->dev); | |
1482 | #else | |
1483 | etnaviv_gpu_hw_suspend(gpu); | |
1484 | #endif | |
1485 | ||
1486 | if (gpu->buffer) { | |
1487 | etnaviv_gpu_cmdbuf_free(gpu->buffer); | |
1488 | gpu->buffer = NULL; | |
1489 | } | |
1490 | ||
1491 | if (gpu->mmu) { | |
1492 | etnaviv_iommu_destroy(gpu->mmu); | |
1493 | gpu->mmu = NULL; | |
1494 | } | |
1495 | ||
1496 | gpu->drm = NULL; | |
1497 | } | |
1498 | ||
1499 | static const struct component_ops gpu_ops = { | |
1500 | .bind = etnaviv_gpu_bind, | |
1501 | .unbind = etnaviv_gpu_unbind, | |
1502 | }; | |
1503 | ||
1504 | static const struct of_device_id etnaviv_gpu_match[] = { | |
1505 | { | |
1506 | .compatible = "vivante,gc" | |
1507 | }, | |
1508 | { /* sentinel */ } | |
1509 | }; | |
1510 | ||
1511 | static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | |
1512 | { | |
1513 | struct device *dev = &pdev->dev; | |
1514 | struct etnaviv_gpu *gpu; | |
1515 | int err = 0; | |
1516 | ||
1517 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); | |
1518 | if (!gpu) | |
1519 | return -ENOMEM; | |
1520 | ||
1521 | gpu->dev = &pdev->dev; | |
1522 | mutex_init(&gpu->lock); | |
1523 | ||
1524 | /* | |
1525 | * Set the GPU base address to the start of physical memory. This | |
1526 | * ensures that if we have up to 2GB, the v1 MMU can address the | |
1527 | * highest memory. This is important as command buffers may be | |
1528 | * allocated outside of this limit. | |
1529 | */ | |
1530 | gpu->memory_base = PHYS_OFFSET; | |
1531 | ||
1532 | /* Map registers: */ | |
1533 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); | |
1534 | if (IS_ERR(gpu->mmio)) | |
1535 | return PTR_ERR(gpu->mmio); | |
1536 | ||
1537 | /* Get Interrupt: */ | |
1538 | gpu->irq = platform_get_irq(pdev, 0); | |
1539 | if (gpu->irq < 0) { | |
1540 | err = gpu->irq; | |
1541 | dev_err(dev, "failed to get irq: %d\n", err); | |
1542 | goto fail; | |
1543 | } | |
1544 | ||
1545 | err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, | |
1546 | dev_name(gpu->dev), gpu); | |
1547 | if (err) { | |
1548 | dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); | |
1549 | goto fail; | |
1550 | } | |
1551 | ||
1552 | /* Get Clocks: */ | |
1553 | gpu->clk_bus = devm_clk_get(&pdev->dev, "bus"); | |
1554 | DBG("clk_bus: %p", gpu->clk_bus); | |
1555 | if (IS_ERR(gpu->clk_bus)) | |
1556 | gpu->clk_bus = NULL; | |
1557 | ||
1558 | gpu->clk_core = devm_clk_get(&pdev->dev, "core"); | |
1559 | DBG("clk_core: %p", gpu->clk_core); | |
1560 | if (IS_ERR(gpu->clk_core)) | |
1561 | gpu->clk_core = NULL; | |
1562 | ||
1563 | gpu->clk_shader = devm_clk_get(&pdev->dev, "shader"); | |
1564 | DBG("clk_shader: %p", gpu->clk_shader); | |
1565 | if (IS_ERR(gpu->clk_shader)) | |
1566 | gpu->clk_shader = NULL; | |
1567 | ||
1568 | /* TODO: figure out max mapped size */ | |
1569 | dev_set_drvdata(dev, gpu); | |
1570 | ||
1571 | /* | |
1572 | * We treat the device as initially suspended. The runtime PM | |
1573 | * autosuspend delay is rather arbitary: no measurements have | |
1574 | * yet been performed to determine an appropriate value. | |
1575 | */ | |
1576 | pm_runtime_use_autosuspend(gpu->dev); | |
1577 | pm_runtime_set_autosuspend_delay(gpu->dev, 200); | |
1578 | pm_runtime_enable(gpu->dev); | |
1579 | ||
1580 | err = component_add(&pdev->dev, &gpu_ops); | |
1581 | if (err < 0) { | |
1582 | dev_err(&pdev->dev, "failed to register component: %d\n", err); | |
1583 | goto fail; | |
1584 | } | |
1585 | ||
1586 | return 0; | |
1587 | ||
1588 | fail: | |
1589 | return err; | |
1590 | } | |
1591 | ||
1592 | static int etnaviv_gpu_platform_remove(struct platform_device *pdev) | |
1593 | { | |
1594 | component_del(&pdev->dev, &gpu_ops); | |
1595 | pm_runtime_disable(&pdev->dev); | |
1596 | return 0; | |
1597 | } | |
1598 | ||
1599 | #ifdef CONFIG_PM | |
1600 | static int etnaviv_gpu_rpm_suspend(struct device *dev) | |
1601 | { | |
1602 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1603 | u32 idle, mask; | |
1604 | ||
1605 | /* If we have outstanding fences, we're not idle */ | |
1606 | if (gpu->completed_fence != gpu->active_fence) | |
1607 | return -EBUSY; | |
1608 | ||
1609 | /* Check whether the hardware (except FE) is idle */ | |
1610 | mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE; | |
1611 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; | |
1612 | if (idle != mask) | |
1613 | return -EBUSY; | |
1614 | ||
1615 | return etnaviv_gpu_hw_suspend(gpu); | |
1616 | } | |
1617 | ||
1618 | static int etnaviv_gpu_rpm_resume(struct device *dev) | |
1619 | { | |
1620 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | |
1621 | int ret; | |
1622 | ||
1623 | ret = etnaviv_gpu_clk_enable(gpu); | |
1624 | if (ret) | |
1625 | return ret; | |
1626 | ||
1627 | /* Re-initialise the basic hardware state */ | |
1628 | if (gpu->drm && gpu->buffer) { | |
1629 | ret = etnaviv_gpu_hw_resume(gpu); | |
1630 | if (ret) { | |
1631 | etnaviv_gpu_clk_disable(gpu); | |
1632 | return ret; | |
1633 | } | |
1634 | } | |
1635 | ||
1636 | return 0; | |
1637 | } | |
1638 | #endif | |
1639 | ||
1640 | static const struct dev_pm_ops etnaviv_gpu_pm_ops = { | |
1641 | SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, | |
1642 | NULL) | |
1643 | }; | |
1644 | ||
1645 | struct platform_driver etnaviv_gpu_driver = { | |
1646 | .driver = { | |
1647 | .name = "etnaviv-gpu", | |
1648 | .owner = THIS_MODULE, | |
1649 | .pm = &etnaviv_gpu_pm_ops, | |
1650 | .of_match_table = etnaviv_gpu_match, | |
1651 | }, | |
1652 | .probe = etnaviv_gpu_platform_probe, | |
1653 | .remove = etnaviv_gpu_platform_remove, | |
1654 | .id_table = gpu_ids, | |
1655 | }; |