Commit | Line | Data |
---|---|---|
4b565ca5 JC |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ | |
3 | ||
4 | ||
5 | #include "msm_gem.h" | |
6 | #include "msm_mmu.h" | |
4241db42 | 7 | #include "msm_gpu_trace.h" |
4b565ca5 JC |
8 | #include "a6xx_gpu.h" |
9 | #include "a6xx_gmu.xml.h" | |
10 | ||
a2c3c0a5 SM |
11 | #include <linux/devfreq.h> |
12 | ||
abccb9fe JC |
13 | #define GPU_PAS_ID 13 |
14 | ||
4b565ca5 JC |
15 | static inline bool _a6xx_check_idle(struct msm_gpu *gpu) |
16 | { | |
17 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
18 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
19 | ||
20 | /* Check that the GMU is idle */ | |
21 | if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) | |
22 | return false; | |
23 | ||
24 | /* Check tha the CX master is idle */ | |
25 | if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & | |
26 | ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER) | |
27 | return false; | |
28 | ||
29 | return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & | |
30 | A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT); | |
31 | } | |
32 | ||
33 | bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) | |
34 | { | |
35 | /* wait for CP to drain ringbuffer: */ | |
36 | if (!adreno_idle(gpu, ring)) | |
37 | return false; | |
38 | ||
39 | if (spin_until(_a6xx_check_idle(gpu))) { | |
40 | DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", | |
41 | gpu->name, __builtin_return_address(0), | |
42 | gpu_read(gpu, REG_A6XX_RBBM_STATUS), | |
43 | gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS), | |
44 | gpu_read(gpu, REG_A6XX_CP_RB_RPTR), | |
45 | gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); | |
46 | return false; | |
47 | } | |
48 | ||
49 | return true; | |
50 | } | |
51 | ||
52 | static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) | |
53 | { | |
54 | uint32_t wptr; | |
55 | unsigned long flags; | |
56 | ||
57 | spin_lock_irqsave(&ring->lock, flags); | |
58 | ||
59 | /* Copy the shadow to the actual register */ | |
60 | ring->cur = ring->next; | |
61 | ||
62 | /* Make sure to wrap wptr if we need to */ | |
63 | wptr = get_wptr(ring); | |
64 | ||
65 | spin_unlock_irqrestore(&ring->lock, flags); | |
66 | ||
67 | /* Make sure everything is posted before making a decision */ | |
68 | mb(); | |
69 | ||
70 | gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); | |
71 | } | |
72 | ||
56869210 JC |
73 | static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, |
74 | u64 iova) | |
75 | { | |
76 | OUT_PKT7(ring, CP_REG_TO_MEM, 3); | |
77 | OUT_RING(ring, counter | (1 << 30) | (2 << 18)); | |
78 | OUT_RING(ring, lower_32_bits(iova)); | |
79 | OUT_RING(ring, upper_32_bits(iova)); | |
80 | } | |
81 | ||
4b565ca5 JC |
82 | static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
83 | struct msm_file_private *ctx) | |
84 | { | |
56869210 | 85 | unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; |
4b565ca5 | 86 | struct msm_drm_private *priv = gpu->dev->dev_private; |
4241db42 JC |
87 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
88 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
4b565ca5 JC |
89 | struct msm_ringbuffer *ring = submit->ring; |
90 | unsigned int i; | |
91 | ||
56869210 JC |
92 | get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO, |
93 | rbmemptr_stats(ring, index, cpcycles_start)); | |
94 | ||
95 | /* | |
96 | * For PM4 the GMU register offsets are calculated from the base of the | |
97 | * GPU registers so we need to add 0x1a800 to the register value on A630 | |
98 | * to get the right value from PM4. | |
99 | */ | |
100 | get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800, | |
101 | rbmemptr_stats(ring, index, alwayson_start)); | |
102 | ||
4b565ca5 JC |
103 | /* Invalidate CCU depth and color */ |
104 | OUT_PKT7(ring, CP_EVENT_WRITE, 1); | |
105 | OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH); | |
106 | ||
107 | OUT_PKT7(ring, CP_EVENT_WRITE, 1); | |
108 | OUT_RING(ring, PC_CCU_INVALIDATE_COLOR); | |
109 | ||
110 | /* Submit the commands */ | |
111 | for (i = 0; i < submit->nr_cmds; i++) { | |
112 | switch (submit->cmd[i].type) { | |
113 | case MSM_SUBMIT_CMD_IB_TARGET_BUF: | |
114 | break; | |
115 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: | |
116 | if (priv->lastctx == ctx) | |
117 | break; | |
118 | case MSM_SUBMIT_CMD_BUF: | |
119 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); | |
120 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); | |
121 | OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); | |
122 | OUT_RING(ring, submit->cmd[i].size); | |
123 | break; | |
124 | } | |
125 | } | |
126 | ||
56869210 JC |
127 | get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO, |
128 | rbmemptr_stats(ring, index, cpcycles_end)); | |
129 | get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800, | |
130 | rbmemptr_stats(ring, index, alwayson_end)); | |
131 | ||
4b565ca5 JC |
132 | /* Write the fence to the scratch register */ |
133 | OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1); | |
134 | OUT_RING(ring, submit->seqno); | |
135 | ||
136 | /* | |
137 | * Execute a CACHE_FLUSH_TS event. This will ensure that the | |
138 | * timestamp is written to the memory and then triggers the interrupt | |
139 | */ | |
140 | OUT_PKT7(ring, CP_EVENT_WRITE, 4); | |
141 | OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); | |
142 | OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); | |
143 | OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); | |
144 | OUT_RING(ring, submit->seqno); | |
145 | ||
4241db42 JC |
146 | trace_msm_gpu_submit_flush(submit, |
147 | gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L, | |
148 | REG_A6XX_GMU_ALWAYS_ON_COUNTER_H)); | |
149 | ||
4b565ca5 JC |
150 | a6xx_flush(gpu, ring); |
151 | } | |
152 | ||
153 | static const struct { | |
154 | u32 offset; | |
155 | u32 value; | |
156 | } a6xx_hwcg[] = { | |
157 | {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222}, | |
158 | {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222}, | |
159 | {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222}, | |
160 | {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222}, | |
161 | {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220}, | |
162 | {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220}, | |
163 | {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220}, | |
164 | {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220}, | |
165 | {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, | |
166 | {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, | |
167 | {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080}, | |
168 | {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080}, | |
169 | {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf}, | |
170 | {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf}, | |
171 | {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf}, | |
172 | {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf}, | |
173 | {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, | |
174 | {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222}, | |
175 | {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222}, | |
176 | {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222}, | |
177 | {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, | |
178 | {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, | |
179 | {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222}, | |
180 | {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222}, | |
181 | {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, | |
182 | {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222}, | |
183 | {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222}, | |
184 | {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222}, | |
185 | {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, | |
186 | {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222}, | |
187 | {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222}, | |
188 | {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222}, | |
189 | {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, | |
190 | {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, | |
191 | {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777}, | |
192 | {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777}, | |
193 | {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, | |
194 | {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, | |
195 | {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777}, | |
196 | {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777}, | |
197 | {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, | |
198 | {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777}, | |
199 | {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777}, | |
200 | {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777}, | |
201 | {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, | |
202 | {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777}, | |
203 | {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777}, | |
204 | {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777}, | |
205 | {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, | |
206 | {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, | |
207 | {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111}, | |
208 | {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111}, | |
209 | {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, | |
210 | {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, | |
211 | {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111}, | |
212 | {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111}, | |
213 | {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, | |
214 | {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111}, | |
215 | {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111}, | |
216 | {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111}, | |
217 | {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, | |
218 | {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111}, | |
219 | {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111}, | |
220 | {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111}, | |
221 | {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, | |
222 | {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, | |
223 | {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, | |
224 | {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, | |
225 | {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, | |
226 | {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, | |
227 | {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, | |
228 | {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, | |
229 | {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222}, | |
230 | {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222}, | |
231 | {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222}, | |
232 | {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222}, | |
233 | {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222}, | |
234 | {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222}, | |
235 | {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, | |
236 | {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220}, | |
237 | {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220}, | |
238 | {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220}, | |
239 | {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00}, | |
240 | {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00}, | |
241 | {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00}, | |
242 | {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00}, | |
243 | {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022}, | |
244 | {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, | |
245 | {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, | |
246 | {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, | |
247 | {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, | |
248 | {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, | |
249 | {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, | |
250 | {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, | |
251 | {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, | |
252 | {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, | |
253 | {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, | |
254 | {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, | |
255 | {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, | |
256 | {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, | |
257 | {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, | |
258 | {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, | |
259 | {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, | |
260 | {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, | |
261 | {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555} | |
262 | }; | |
263 | ||
264 | static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) | |
265 | { | |
266 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
267 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
268 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; | |
269 | unsigned int i; | |
270 | u32 val; | |
271 | ||
272 | val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); | |
273 | ||
274 | /* Don't re-program the registers if they are already correct */ | |
275 | if ((!state && !val) || (state && (val == 0x8aa8aa02))) | |
276 | return; | |
277 | ||
278 | /* Disable SP clock before programming HWCG registers */ | |
279 | gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); | |
280 | ||
281 | for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++) | |
282 | gpu_write(gpu, a6xx_hwcg[i].offset, | |
283 | state ? a6xx_hwcg[i].value : 0); | |
284 | ||
285 | /* Enable SP clock */ | |
286 | gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); | |
287 | ||
288 | gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0); | |
289 | } | |
290 | ||
291 | static int a6xx_cp_init(struct msm_gpu *gpu) | |
292 | { | |
293 | struct msm_ringbuffer *ring = gpu->rb[0]; | |
294 | ||
295 | OUT_PKT7(ring, CP_ME_INIT, 8); | |
296 | ||
297 | OUT_RING(ring, 0x0000002f); | |
298 | ||
299 | /* Enable multiple hardware contexts */ | |
300 | OUT_RING(ring, 0x00000003); | |
301 | ||
302 | /* Enable error detection */ | |
303 | OUT_RING(ring, 0x20000000); | |
304 | ||
305 | /* Don't enable header dump */ | |
306 | OUT_RING(ring, 0x00000000); | |
307 | OUT_RING(ring, 0x00000000); | |
308 | ||
309 | /* No workarounds enabled */ | |
310 | OUT_RING(ring, 0x00000000); | |
311 | ||
312 | /* Pad rest of the cmds with 0's */ | |
313 | OUT_RING(ring, 0x00000000); | |
314 | OUT_RING(ring, 0x00000000); | |
315 | ||
316 | a6xx_flush(gpu, ring); | |
317 | return a6xx_idle(gpu, ring) ? 0 : -EINVAL; | |
318 | } | |
319 | ||
320 | static int a6xx_ucode_init(struct msm_gpu *gpu) | |
321 | { | |
322 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
323 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
324 | ||
325 | if (!a6xx_gpu->sqe_bo) { | |
326 | a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, | |
327 | adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova); | |
328 | ||
329 | if (IS_ERR(a6xx_gpu->sqe_bo)) { | |
330 | int ret = PTR_ERR(a6xx_gpu->sqe_bo); | |
331 | ||
332 | a6xx_gpu->sqe_bo = NULL; | |
333 | DRM_DEV_ERROR(&gpu->pdev->dev, | |
334 | "Could not allocate SQE ucode: %d\n", ret); | |
335 | ||
336 | return ret; | |
337 | } | |
0815d774 JC |
338 | |
339 | msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); | |
4b565ca5 JC |
340 | } |
341 | ||
342 | gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO, | |
343 | REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova); | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
abccb9fe JC |
348 | static int a6xx_zap_shader_init(struct msm_gpu *gpu) |
349 | { | |
350 | static bool loaded; | |
351 | int ret; | |
352 | ||
353 | if (loaded) | |
354 | return 0; | |
355 | ||
356 | ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); | |
357 | ||
358 | loaded = !ret; | |
359 | return ret; | |
360 | } | |
361 | ||
4b565ca5 JC |
362 | #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ |
363 | A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ | |
364 | A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ | |
365 | A6XX_RBBM_INT_0_MASK_CP_IB2 | \ | |
366 | A6XX_RBBM_INT_0_MASK_CP_IB1 | \ | |
367 | A6XX_RBBM_INT_0_MASK_CP_RB | \ | |
368 | A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ | |
369 | A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ | |
370 | A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ | |
371 | A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ | |
372 | A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) | |
373 | ||
374 | static int a6xx_hw_init(struct msm_gpu *gpu) | |
375 | { | |
376 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
377 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
378 | int ret; | |
379 | ||
380 | /* Make sure the GMU keeps the GPU on while we set it up */ | |
381 | a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); | |
382 | ||
383 | gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); | |
384 | ||
385 | /* | |
386 | * Disable the trusted memory range - we don't actually supported secure | |
387 | * memory rendering at this point in time and we don't want to block off | |
388 | * part of the virtual memory space. | |
389 | */ | |
390 | gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, | |
391 | REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000); | |
392 | gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); | |
393 | ||
394 | /* enable hardware clockgating */ | |
395 | a6xx_set_hwcg(gpu, true); | |
396 | ||
397 | /* VBIF start */ | |
398 | gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); | |
399 | gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); | |
400 | ||
401 | /* Make all blocks contribute to the GPU BUSY perf counter */ | |
402 | gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); | |
403 | ||
404 | /* Disable L2 bypass in the UCHE */ | |
405 | gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0); | |
406 | gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff); | |
407 | gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000); | |
408 | gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff); | |
409 | gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000); | |
410 | gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff); | |
411 | ||
412 | /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ | |
413 | gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, | |
414 | REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000); | |
415 | ||
416 | gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO, | |
417 | REG_A6XX_UCHE_GMEM_RANGE_MAX_HI, | |
418 | 0x00100000 + adreno_gpu->gmem - 1); | |
419 | ||
420 | gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); | |
421 | gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); | |
422 | ||
423 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); | |
424 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); | |
425 | ||
426 | /* Setting the mem pool size */ | |
427 | gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); | |
428 | ||
429 | /* Setting the primFifo thresholds default values */ | |
430 | gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11)); | |
431 | ||
432 | /* Set the AHB default slave response to "ERROR" */ | |
433 | gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); | |
434 | ||
435 | /* Turn on performance counters */ | |
436 | gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); | |
437 | ||
438 | /* Select CP0 to always count cycles */ | |
439 | gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); | |
440 | ||
4b565ca5 JC |
441 | gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1); |
442 | gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1); | |
443 | gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1); | |
444 | gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21); | |
445 | ||
446 | /* Enable fault detection */ | |
447 | gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, | |
448 | (1 << 30) | 0x1fffff); | |
449 | ||
450 | gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); | |
451 | ||
452 | /* Protect registers from the CP */ | |
453 | gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003); | |
454 | ||
455 | gpu_write(gpu, REG_A6XX_CP_PROTECT(0), | |
456 | A6XX_PROTECT_RDONLY(0x600, 0x51)); | |
457 | gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2)); | |
458 | gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13)); | |
459 | gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8)); | |
460 | gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1)); | |
461 | gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187)); | |
462 | gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810)); | |
463 | gpu_write(gpu, REG_A6XX_CP_PROTECT(7), | |
464 | A6XX_PROTECT_RDONLY(0xfc00, 0x3)); | |
465 | gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0)); | |
466 | gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0)); | |
467 | gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0)); | |
468 | gpu_write(gpu, REG_A6XX_CP_PROTECT(11), | |
469 | A6XX_PROTECT_RDONLY(0x0, 0x4f9)); | |
470 | gpu_write(gpu, REG_A6XX_CP_PROTECT(12), | |
471 | A6XX_PROTECT_RDONLY(0x501, 0xa)); | |
472 | gpu_write(gpu, REG_A6XX_CP_PROTECT(13), | |
473 | A6XX_PROTECT_RDONLY(0x511, 0x44)); | |
474 | gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe)); | |
475 | gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0)); | |
476 | gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf)); | |
477 | gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0)); | |
478 | gpu_write(gpu, REG_A6XX_CP_PROTECT(18), | |
479 | A6XX_PROTECT_RW(0xbe20, 0x11f3)); | |
480 | gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82)); | |
481 | gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8)); | |
482 | gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19)); | |
483 | gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d)); | |
484 | gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76)); | |
485 | gpu_write(gpu, REG_A6XX_CP_PROTECT(24), | |
4b565ca5 | 486 | A6XX_PROTECT_RDONLY(0x980, 0x4)); |
3ce36b45 | 487 | gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0)); |
4b565ca5 JC |
488 | |
489 | /* Enable interrupts */ | |
490 | gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); | |
491 | ||
492 | ret = adreno_hw_init(gpu); | |
493 | if (ret) | |
494 | goto out; | |
495 | ||
496 | ret = a6xx_ucode_init(gpu); | |
497 | if (ret) | |
498 | goto out; | |
499 | ||
500 | /* Always come up on rb 0 */ | |
501 | a6xx_gpu->cur_ring = gpu->rb[0]; | |
502 | ||
503 | /* Enable the SQE_to start the CP engine */ | |
504 | gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); | |
505 | ||
506 | ret = a6xx_cp_init(gpu); | |
507 | if (ret) | |
508 | goto out; | |
509 | ||
abccb9fe JC |
510 | /* |
511 | * Try to load a zap shader into the secure world. If successful | |
512 | * we can use the CP to switch out of secure mode. If not then we | |
513 | * have no resource but to try to switch ourselves out manually. If we | |
514 | * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will | |
515 | * be blocked and a permissions violation will soon follow. | |
516 | */ | |
517 | ret = a6xx_zap_shader_init(gpu); | |
518 | if (!ret) { | |
519 | OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); | |
520 | OUT_RING(gpu->rb[0], 0x00000000); | |
521 | ||
522 | a6xx_flush(gpu, gpu->rb[0]); | |
523 | if (!a6xx_idle(gpu, gpu->rb[0])) | |
524 | return -EINVAL; | |
525 | } else { | |
526 | /* Print a warning so if we die, we know why */ | |
527 | dev_warn_once(gpu->dev->dev, | |
528 | "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); | |
529 | gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); | |
15273ffd | 530 | ret = 0; |
abccb9fe | 531 | } |
4b565ca5 JC |
532 | |
533 | out: | |
534 | /* | |
535 | * Tell the GMU that we are done touching the GPU and it can start power | |
536 | * management | |
537 | */ | |
538 | a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); | |
539 | ||
540 | /* Take the GMU out of its special boot mode */ | |
541 | a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); | |
542 | ||
543 | return ret; | |
544 | } | |
545 | ||
546 | static void a6xx_dump(struct msm_gpu *gpu) | |
547 | { | |
6a41da17 | 548 | DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", |
4b565ca5 JC |
549 | gpu_read(gpu, REG_A6XX_RBBM_STATUS)); |
550 | adreno_dump(gpu); | |
551 | } | |
552 | ||
553 | #define VBIF_RESET_ACK_TIMEOUT 100 | |
554 | #define VBIF_RESET_ACK_MASK 0x00f0 | |
555 | ||
556 | static void a6xx_recover(struct msm_gpu *gpu) | |
557 | { | |
558 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
559 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
560 | int i; | |
561 | ||
562 | adreno_dump_info(gpu); | |
563 | ||
564 | for (i = 0; i < 8; i++) | |
6a41da17 | 565 | DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, |
4b565ca5 JC |
566 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); |
567 | ||
568 | if (hang_debug) | |
569 | a6xx_dump(gpu); | |
570 | ||
571 | /* | |
572 | * Turn off keep alive that might have been enabled by the hang | |
573 | * interrupt | |
574 | */ | |
575 | gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); | |
576 | ||
577 | gpu->funcs->pm_suspend(gpu); | |
578 | gpu->funcs->pm_resume(gpu); | |
579 | ||
580 | msm_gpu_hw_init(gpu); | |
581 | } | |
582 | ||
583 | static int a6xx_fault_handler(void *arg, unsigned long iova, int flags) | |
584 | { | |
585 | struct msm_gpu *gpu = arg; | |
586 | ||
587 | pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n", | |
588 | iova, flags, | |
589 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), | |
590 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), | |
591 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), | |
592 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7))); | |
593 | ||
594 | return -EFAULT; | |
595 | } | |
596 | ||
597 | static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) | |
598 | { | |
599 | u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS); | |
600 | ||
601 | if (status & A6XX_CP_INT_CP_OPCODE_ERROR) { | |
602 | u32 val; | |
603 | ||
604 | gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1); | |
605 | val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA); | |
606 | dev_err_ratelimited(&gpu->pdev->dev, | |
607 | "CP | opcode error | possible opcode=0x%8.8X\n", | |
608 | val); | |
609 | } | |
610 | ||
611 | if (status & A6XX_CP_INT_CP_UCODE_ERROR) | |
612 | dev_err_ratelimited(&gpu->pdev->dev, | |
613 | "CP ucode error interrupt\n"); | |
614 | ||
615 | if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR) | |
616 | dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", | |
617 | gpu_read(gpu, REG_A6XX_CP_HW_FAULT)); | |
618 | ||
619 | if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) { | |
620 | u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS); | |
621 | ||
622 | dev_err_ratelimited(&gpu->pdev->dev, | |
623 | "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n", | |
624 | val & (1 << 20) ? "READ" : "WRITE", | |
625 | (val & 0x3ffff), val); | |
626 | } | |
627 | ||
628 | if (status & A6XX_CP_INT_CP_AHB_ERROR) | |
629 | dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); | |
630 | ||
631 | if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR) | |
632 | dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); | |
633 | ||
634 | if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR) | |
635 | dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); | |
636 | ||
637 | } | |
638 | ||
639 | static void a6xx_fault_detect_irq(struct msm_gpu *gpu) | |
640 | { | |
641 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
642 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
643 | struct drm_device *dev = gpu->dev; | |
644 | struct msm_drm_private *priv = dev->dev_private; | |
645 | struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); | |
646 | ||
647 | /* | |
648 | * Force the GPU to stay on until after we finish | |
649 | * collecting information | |
650 | */ | |
651 | gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); | |
652 | ||
653 | DRM_DEV_ERROR(&gpu->pdev->dev, | |
654 | "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", | |
655 | ring ? ring->id : -1, ring ? ring->seqno : 0, | |
656 | gpu_read(gpu, REG_A6XX_RBBM_STATUS), | |
657 | gpu_read(gpu, REG_A6XX_CP_RB_RPTR), | |
658 | gpu_read(gpu, REG_A6XX_CP_RB_WPTR), | |
659 | gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI), | |
660 | gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), | |
661 | gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI), | |
662 | gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); | |
663 | ||
664 | /* Turn off the hangcheck timer to keep it from bothering us */ | |
665 | del_timer(&gpu->hangcheck_timer); | |
666 | ||
667 | queue_work(priv->wq, &gpu->recover_work); | |
668 | } | |
669 | ||
670 | static irqreturn_t a6xx_irq(struct msm_gpu *gpu) | |
671 | { | |
672 | u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); | |
673 | ||
674 | gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); | |
675 | ||
676 | if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT) | |
677 | a6xx_fault_detect_irq(gpu); | |
678 | ||
679 | if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR) | |
680 | dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); | |
681 | ||
682 | if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR) | |
683 | a6xx_cp_hw_err_irq(gpu); | |
684 | ||
685 | if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW) | |
686 | dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); | |
687 | ||
688 | if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) | |
689 | dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); | |
690 | ||
691 | if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) | |
692 | dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); | |
693 | ||
694 | if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) | |
695 | msm_gpu_retire(gpu); | |
696 | ||
697 | return IRQ_HANDLED; | |
698 | } | |
699 | ||
700 | static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { | |
701 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE), | |
702 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI), | |
703 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, | |
704 | REG_A6XX_CP_RB_RPTR_ADDR_LO), | |
705 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, | |
706 | REG_A6XX_CP_RB_RPTR_ADDR_HI), | |
707 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR), | |
708 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR), | |
709 | REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), | |
710 | }; | |
711 | ||
4b565ca5 JC |
712 | static int a6xx_pm_resume(struct msm_gpu *gpu) |
713 | { | |
714 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
715 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
716 | int ret; | |
717 | ||
4b565ca5 JC |
718 | gpu->needs_hw_init = true; |
719 | ||
41570b74 JC |
720 | ret = a6xx_gmu_resume(a6xx_gpu); |
721 | if (ret) | |
722 | return ret; | |
723 | ||
a2c3c0a5 SM |
724 | msm_gpu_resume_devfreq(gpu); |
725 | ||
41570b74 | 726 | return 0; |
4b565ca5 JC |
727 | } |
728 | ||
729 | static int a6xx_pm_suspend(struct msm_gpu *gpu) | |
730 | { | |
731 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
732 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
733 | ||
a2c3c0a5 SM |
734 | devfreq_suspend_device(gpu->devfreq.devfreq); |
735 | ||
4b565ca5 JC |
736 | return a6xx_gmu_stop(a6xx_gpu); |
737 | } | |
738 | ||
739 | static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) | |
740 | { | |
741 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
742 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
743 | ||
744 | /* Force the GPU power on so we can read this register */ | |
745 | a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); | |
746 | ||
747 | *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO, | |
748 | REG_A6XX_RBBM_PERFCTR_CP_0_HI); | |
749 | ||
750 | a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); | |
751 | return 0; | |
752 | } | |
753 | ||
4b565ca5 JC |
754 | static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) |
755 | { | |
756 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
757 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
758 | ||
759 | return a6xx_gpu->cur_ring; | |
760 | } | |
761 | ||
762 | static void a6xx_destroy(struct msm_gpu *gpu) | |
763 | { | |
764 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
765 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
766 | ||
767 | if (a6xx_gpu->sqe_bo) { | |
7ad0e8cf | 768 | msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); |
64686886 | 769 | drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo); |
4b565ca5 JC |
770 | } |
771 | ||
772 | a6xx_gmu_remove(a6xx_gpu); | |
773 | ||
774 | adreno_gpu_cleanup(adreno_gpu); | |
775 | kfree(a6xx_gpu); | |
776 | } | |
777 | ||
a2c3c0a5 SM |
778 | static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) |
779 | { | |
780 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
781 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
16f37102 | 782 | u64 busy_cycles, busy_time; |
a2c3c0a5 SM |
783 | |
784 | busy_cycles = gmu_read64(&a6xx_gpu->gmu, | |
785 | REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, | |
786 | REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); | |
787 | ||
16f37102 SP |
788 | busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10; |
789 | do_div(busy_time, 192); | |
a2c3c0a5 SM |
790 | |
791 | gpu->devfreq.busy_cycles = busy_cycles; | |
792 | ||
16f37102 SP |
793 | if (WARN_ON(busy_time > ~0LU)) |
794 | return ~0LU; | |
795 | ||
796 | return (unsigned long)busy_time; | |
a2c3c0a5 SM |
797 | } |
798 | ||
4b565ca5 JC |
799 | static const struct adreno_gpu_funcs funcs = { |
800 | .base = { | |
801 | .get_param = adreno_get_param, | |
802 | .hw_init = a6xx_hw_init, | |
803 | .pm_suspend = a6xx_pm_suspend, | |
804 | .pm_resume = a6xx_pm_resume, | |
805 | .recover = a6xx_recover, | |
806 | .submit = a6xx_submit, | |
807 | .flush = a6xx_flush, | |
808 | .active_ring = a6xx_active_ring, | |
809 | .irq = a6xx_irq, | |
810 | .destroy = a6xx_destroy, | |
b02872df | 811 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
4b565ca5 JC |
812 | .show = a6xx_show, |
813 | #endif | |
a2c3c0a5 SM |
814 | .gpu_busy = a6xx_gpu_busy, |
815 | .gpu_get_freq = a6xx_gmu_get_freq, | |
816 | .gpu_set_freq = a6xx_gmu_set_freq, | |
b02872df | 817 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
1707add8 JC |
818 | .gpu_state_get = a6xx_gpu_state_get, |
819 | .gpu_state_put = a6xx_gpu_state_put, | |
b02872df | 820 | #endif |
4b565ca5 JC |
821 | }, |
822 | .get_timestamp = a6xx_get_timestamp, | |
823 | }; | |
824 | ||
825 | struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) | |
826 | { | |
827 | struct msm_drm_private *priv = dev->dev_private; | |
828 | struct platform_device *pdev = priv->gpu_pdev; | |
829 | struct device_node *node; | |
830 | struct a6xx_gpu *a6xx_gpu; | |
831 | struct adreno_gpu *adreno_gpu; | |
832 | struct msm_gpu *gpu; | |
833 | int ret; | |
834 | ||
835 | a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL); | |
836 | if (!a6xx_gpu) | |
837 | return ERR_PTR(-ENOMEM); | |
838 | ||
839 | adreno_gpu = &a6xx_gpu->base; | |
840 | gpu = &adreno_gpu->base; | |
841 | ||
1707add8 | 842 | adreno_gpu->registers = NULL; |
4b565ca5 JC |
843 | adreno_gpu->reg_offsets = a6xx_register_offsets; |
844 | ||
845 | ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); | |
846 | if (ret) { | |
847 | a6xx_destroy(&(a6xx_gpu->base.base)); | |
848 | return ERR_PTR(ret); | |
849 | } | |
850 | ||
851 | /* Check if there is a GMU phandle and set it up */ | |
06feed56 | 852 | node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); |
4b565ca5 JC |
853 | |
854 | /* FIXME: How do we gracefully handle this? */ | |
855 | BUG_ON(!node); | |
856 | ||
857 | ret = a6xx_gmu_probe(a6xx_gpu, node); | |
858 | if (ret) { | |
859 | a6xx_destroy(&(a6xx_gpu->base.base)); | |
860 | return ERR_PTR(ret); | |
861 | } | |
862 | ||
863 | if (gpu->aspace) | |
864 | msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, | |
865 | a6xx_fault_handler); | |
866 | ||
867 | return gpu; | |
868 | } |