Commit | Line | Data |
---|---|---|
4b565ca5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e812744c | 2 | /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ |
4b565ca5 JC |
3 | |
4 | #include <linux/clk.h> | |
fcf9d0b7 | 5 | #include <linux/interconnect.h> |
9325d426 | 6 | #include <linux/pm_domain.h> |
4b565ca5 JC |
7 | #include <linux/pm_opp.h> |
8 | #include <soc/qcom/cmd-db.h> | |
29ac8979 | 9 | #include <drm/drm_gem.h> |
4b565ca5 JC |
10 | |
11 | #include "a6xx_gpu.h" | |
12 | #include "a6xx_gmu.xml.h" | |
29ac8979 | 13 | #include "msm_gem.h" |
74c0a69c | 14 | #include "msm_gpu_trace.h" |
29ac8979 | 15 | #include "msm_mmu.h" |
4b565ca5 | 16 | |
e31fdb74 JC |
17 | static void a6xx_gmu_fault(struct a6xx_gmu *gmu) |
18 | { | |
19 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); | |
20 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
21 | struct msm_gpu *gpu = &adreno_gpu->base; | |
e31fdb74 JC |
22 | |
23 | /* FIXME: add a banner here */ | |
24 | gmu->hung = true; | |
25 | ||
26 | /* Turn off the hangcheck timer while we are resetting */ | |
27 | del_timer(&gpu->hangcheck_timer); | |
28 | ||
29 | /* Queue the GPU handler because we need to treat this as a recovery */ | |
7e688294 | 30 | kthread_queue_work(gpu->worker, &gpu->recover_work); |
e31fdb74 JC |
31 | } |
32 | ||
4b565ca5 JC |
33 | static irqreturn_t a6xx_gmu_irq(int irq, void *data) |
34 | { | |
35 | struct a6xx_gmu *gmu = data; | |
36 | u32 status; | |
37 | ||
38 | status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); | |
39 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); | |
40 | ||
41 | if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { | |
42 | dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); | |
43 | ||
e31fdb74 | 44 | a6xx_gmu_fault(gmu); |
4b565ca5 JC |
45 | } |
46 | ||
47 | if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) | |
48 | dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); | |
49 | ||
50 | if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) | |
51 | dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", | |
52 | gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); | |
53 | ||
54 | return IRQ_HANDLED; | |
55 | } | |
56 | ||
57 | static irqreturn_t a6xx_hfi_irq(int irq, void *data) | |
58 | { | |
59 | struct a6xx_gmu *gmu = data; | |
60 | u32 status; | |
61 | ||
62 | status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); | |
63 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); | |
64 | ||
4b565ca5 JC |
65 | if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { |
66 | dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); | |
67 | ||
e31fdb74 | 68 | a6xx_gmu_fault(gmu); |
4b565ca5 JC |
69 | } |
70 | ||
71 | return IRQ_HANDLED; | |
72 | } | |
73 | ||
1707add8 JC |
74 | bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) |
75 | { | |
76 | u32 val; | |
77 | ||
78 | /* This can be called from gpu state code so make sure GMU is valid */ | |
606ec90f | 79 | if (!gmu->initialized) |
1707add8 JC |
80 | return false; |
81 | ||
82 | val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); | |
83 | ||
84 | return !(val & | |
85 | (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | | |
86 | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); | |
87 | } | |
88 | ||
4b565ca5 | 89 | /* Check to see if the GX rail is still powered */ |
1707add8 | 90 | bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) |
4b565ca5 | 91 | { |
1707add8 JC |
92 | u32 val; |
93 | ||
94 | /* This can be called from gpu state code so make sure GMU is valid */ | |
606ec90f | 95 | if (!gmu->initialized) |
1707add8 JC |
96 | return false; |
97 | ||
98 | val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); | |
4b565ca5 JC |
99 | |
100 | return !(val & | |
101 | (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | | |
102 | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); | |
103 | } | |
104 | ||
6694482a DA |
105 | void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, |
106 | bool suspended) | |
4b565ca5 | 107 | { |
1f60d114 SM |
108 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
109 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
110 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; | |
111 | u32 perf_index; | |
112 | unsigned long gpu_freq; | |
113 | int ret = 0; | |
114 | ||
115 | gpu_freq = dev_pm_opp_get_freq(opp); | |
116 | ||
117 | if (gpu_freq == gmu->freq) | |
118 | return; | |
119 | ||
120 | for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) | |
121 | if (gpu_freq == gmu->gpu_freqs[perf_index]) | |
122 | break; | |
123 | ||
124 | gmu->current_perf_index = perf_index; | |
125 | gmu->freq = gmu->gpu_freqs[perf_index]; | |
126 | ||
74c0a69c RC |
127 | trace_msm_gmu_freq_change(gmu->freq, perf_index); |
128 | ||
1f60d114 SM |
129 | /* |
130 | * This can get called from devfreq while the hardware is idle. Don't | |
6694482a DA |
131 | * bring up the power if it isn't already active. All we're doing here |
132 | * is updating the frequency so that when we come back online we're at | |
133 | * the right rate. | |
1f60d114 | 134 | */ |
6694482a | 135 | if (suspended) |
1f60d114 SM |
136 | return; |
137 | ||
138 | if (!gmu->legacy) { | |
139 | a6xx_hfi_set_freq(gmu, perf_index); | |
920b4a67 | 140 | dev_pm_opp_set_opp(&gpu->pdev->dev, opp); |
1f60d114 SM |
141 | return; |
142 | } | |
a2c3c0a5 | 143 | |
4b565ca5 JC |
144 | gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); |
145 | ||
146 | gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, | |
1f60d114 | 147 | ((3 & 0xf) << 28) | perf_index); |
4b565ca5 JC |
148 | |
149 | /* | |
150 | * Send an invalid index as a vote for the bus bandwidth and let the | |
151 | * firmware decide on the right vote | |
152 | */ | |
153 | gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); | |
154 | ||
155 | /* Set and clear the OOB for DCVS to trigger the GMU */ | |
156 | a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); | |
157 | a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); | |
158 | ||
a2c3c0a5 SM |
159 | ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); |
160 | if (ret) | |
161 | dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); | |
162 | ||
920b4a67 | 163 | dev_pm_opp_set_opp(&gpu->pdev->dev, opp); |
a2c3c0a5 SM |
164 | } |
165 | ||
166 | unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) | |
167 | { | |
168 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | |
169 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); | |
170 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; | |
171 | ||
172 | return gmu->freq; | |
4b565ca5 JC |
173 | } |
174 | ||
175 | static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) | |
176 | { | |
177 | u32 val; | |
178 | int local = gmu->idle_level; | |
179 | ||
180 | /* SPTP and IFPC both report as IFPC */ | |
181 | if (gmu->idle_level == GMU_IDLE_STATE_SPTP) | |
182 | local = GMU_IDLE_STATE_IFPC; | |
183 | ||
184 | val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); | |
185 | ||
186 | if (val == local) { | |
187 | if (gmu->idle_level != GMU_IDLE_STATE_IFPC || | |
188 | !a6xx_gmu_gx_is_on(gmu)) | |
189 | return true; | |
190 | } | |
191 | ||
192 | return false; | |
193 | } | |
194 | ||
195 | /* Wait for the GMU to get to its most idle state */ | |
e31fdb74 | 196 | int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) |
4b565ca5 | 197 | { |
4b565ca5 JC |
198 | return spin_until(a6xx_gmu_check_idle_level(gmu)); |
199 | } | |
200 | ||
201 | static int a6xx_gmu_start(struct a6xx_gmu *gmu) | |
202 | { | |
203 | int ret; | |
204 | u32 val; | |
f5749d61 DB |
205 | u32 mask, reset_val; |
206 | ||
207 | val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); | |
208 | if (val <= 0x20010004) { | |
209 | mask = 0xffffffff; | |
210 | reset_val = 0xbabeface; | |
211 | } else { | |
212 | mask = 0x1ff; | |
213 | reset_val = 0x100; | |
214 | } | |
4b565ca5 JC |
215 | |
216 | gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); | |
ad4968d5 JM |
217 | |
218 | /* Set the log wptr index | |
219 | * note: downstream saves the value in poweroff and restores it here | |
220 | */ | |
221 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); | |
222 | ||
4b565ca5 JC |
223 | gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); |
224 | ||
225 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, | |
f5749d61 | 226 | (val & mask) == reset_val, 100, 10000); |
4b565ca5 JC |
227 | |
228 | if (ret) | |
6a41da17 | 229 | DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); |
4b565ca5 JC |
230 | |
231 | return ret; | |
232 | } | |
233 | ||
234 | static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) | |
235 | { | |
236 | u32 val; | |
237 | int ret; | |
238 | ||
4b565ca5 JC |
239 | gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); |
240 | ||
241 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, | |
242 | val & 1, 100, 10000); | |
243 | if (ret) | |
6a41da17 | 244 | DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); |
4b565ca5 JC |
245 | |
246 | return ret; | |
247 | } | |
248 | ||
555c50a4 | 249 | struct a6xx_gmu_oob_bits { |
2fc8a92e | 250 | int set, ack, set_new, ack_new, clear, clear_new; |
555c50a4 EA |
251 | const char *name; |
252 | }; | |
253 | ||
254 | /* These are the interrupt / ack bits for each OOB request that are set | |
255 | * in a6xx_gmu_set_oob and a6xx_clear_oob | |
256 | */ | |
257 | static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { | |
258 | [GMU_OOB_GPU_SET] = { | |
259 | .name = "GPU_SET", | |
260 | .set = 16, | |
261 | .ack = 24, | |
262 | .set_new = 30, | |
263 | .ack_new = 31, | |
2fc8a92e AO |
264 | .clear = 24, |
265 | .clear_new = 31, | |
555c50a4 EA |
266 | }, |
267 | ||
268 | [GMU_OOB_PERFCOUNTER_SET] = { | |
269 | .name = "PERFCOUNTER", | |
270 | .set = 17, | |
271 | .ack = 25, | |
272 | .set_new = 28, | |
273 | .ack_new = 30, | |
2fc8a92e AO |
274 | .clear = 25, |
275 | .clear_new = 29, | |
555c50a4 EA |
276 | }, |
277 | ||
278 | [GMU_OOB_BOOT_SLUMBER] = { | |
279 | .name = "BOOT_SLUMBER", | |
280 | .set = 22, | |
281 | .ack = 30, | |
2fc8a92e | 282 | .clear = 30, |
555c50a4 EA |
283 | }, |
284 | ||
285 | [GMU_OOB_DCVS_SET] = { | |
286 | .name = "GPU_DCVS", | |
287 | .set = 23, | |
288 | .ack = 31, | |
2fc8a92e | 289 | .clear = 31, |
555c50a4 EA |
290 | }, |
291 | }; | |
292 | ||
4b565ca5 JC |
293 | /* Trigger a OOB (out of band) request to the GMU */ |
294 | int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) | |
295 | { | |
296 | int ret; | |
297 | u32 val; | |
298 | int request, ack; | |
4b565ca5 | 299 | |
f6f59072 RC |
300 | WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); |
301 | ||
555c50a4 | 302 | if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) |
4b565ca5 | 303 | return -EINVAL; |
555c50a4 EA |
304 | |
305 | if (gmu->legacy) { | |
306 | request = a6xx_gmu_oob_bits[state].set; | |
307 | ack = a6xx_gmu_oob_bits[state].ack; | |
308 | } else { | |
309 | request = a6xx_gmu_oob_bits[state].set_new; | |
310 | ack = a6xx_gmu_oob_bits[state].ack_new; | |
311 | if (!request || !ack) { | |
312 | DRM_DEV_ERROR(gmu->dev, | |
313 | "Invalid non-legacy GMU request %s\n", | |
314 | a6xx_gmu_oob_bits[state].name); | |
315 | return -EINVAL; | |
316 | } | |
4b565ca5 JC |
317 | } |
318 | ||
319 | /* Trigger the equested OOB operation */ | |
320 | gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); | |
321 | ||
322 | /* Wait for the acknowledge interrupt */ | |
323 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, | |
324 | val & (1 << ack), 100, 10000); | |
325 | ||
326 | if (ret) | |
6a41da17 | 327 | DRM_DEV_ERROR(gmu->dev, |
4b565ca5 | 328 | "Timeout waiting for GMU OOB set %s: 0x%x\n", |
555c50a4 | 329 | a6xx_gmu_oob_bits[state].name, |
4b565ca5 JC |
330 | gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); |
331 | ||
332 | /* Clear the acknowledge interrupt */ | |
333 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); | |
334 | ||
335 | return ret; | |
336 | } | |
337 | ||
338 | /* Clear a pending OOB state in the GMU */ | |
339 | void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) | |
340 | { | |
555c50a4 EA |
341 | int bit; |
342 | ||
f6f59072 RC |
343 | WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); |
344 | ||
555c50a4 | 345 | if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) |
8167e6fa | 346 | return; |
8167e6fa | 347 | |
555c50a4 | 348 | if (gmu->legacy) |
2fc8a92e | 349 | bit = a6xx_gmu_oob_bits[state].clear; |
555c50a4 | 350 | else |
2fc8a92e | 351 | bit = a6xx_gmu_oob_bits[state].clear_new; |
555c50a4 | 352 | |
65aee407 | 353 | gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); |
4b565ca5 JC |
354 | } |
355 | ||
356 | /* Enable CPU control of SPTP power power collapse */ | |
357 | static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) | |
358 | { | |
359 | int ret; | |
360 | u32 val; | |
361 | ||
8167e6fa JM |
362 | if (!gmu->legacy) |
363 | return 0; | |
364 | ||
4b565ca5 JC |
365 | gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); |
366 | ||
367 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, | |
368 | (val & 0x38) == 0x28, 1, 100); | |
369 | ||
370 | if (ret) { | |
6a41da17 | 371 | DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", |
4b565ca5 JC |
372 | gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); |
373 | } | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | /* Disable CPU control of SPTP power power collapse */ | |
379 | static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) | |
380 | { | |
381 | u32 val; | |
382 | int ret; | |
383 | ||
8167e6fa JM |
384 | if (!gmu->legacy) |
385 | return; | |
386 | ||
4b565ca5 JC |
387 | /* Make sure retention is on */ |
388 | gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); | |
389 | ||
390 | gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); | |
391 | ||
392 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, | |
393 | (val & 0x04), 100, 10000); | |
394 | ||
395 | if (ret) | |
6a41da17 | 396 | DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", |
4b565ca5 JC |
397 | gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); |
398 | } | |
399 | ||
400 | /* Let the GMU know we are starting a boot sequence */ | |
401 | static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) | |
402 | { | |
403 | u32 vote; | |
404 | ||
405 | /* Let the GMU know we are getting ready for boot */ | |
406 | gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); | |
407 | ||
408 | /* Choose the "default" power level as the highest available */ | |
409 | vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; | |
410 | ||
411 | gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); | |
412 | gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); | |
413 | ||
414 | /* Let the GMU know the boot sequence has started */ | |
415 | return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); | |
416 | } | |
417 | ||
418 | /* Let the GMU know that we are about to go into slumber */ | |
419 | static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) | |
420 | { | |
421 | int ret; | |
422 | ||
423 | /* Disable the power counter so the GMU isn't busy */ | |
424 | gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); | |
425 | ||
426 | /* Disable SPTP_PC if the CPU is responsible for it */ | |
427 | if (gmu->idle_level < GMU_IDLE_STATE_SPTP) | |
428 | a6xx_sptprac_disable(gmu); | |
429 | ||
8167e6fa JM |
430 | if (!gmu->legacy) { |
431 | ret = a6xx_hfi_send_prep_slumber(gmu); | |
432 | goto out; | |
433 | } | |
434 | ||
4b565ca5 JC |
435 | /* Tell the GMU to get ready to slumber */ |
436 | gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); | |
437 | ||
438 | ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); | |
439 | a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); | |
440 | ||
441 | if (!ret) { | |
442 | /* Check to see if the GMU really did slumber */ | |
443 | if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) | |
444 | != 0x0f) { | |
6a41da17 | 445 | DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); |
4b565ca5 JC |
446 | ret = -ETIMEDOUT; |
447 | } | |
448 | } | |
449 | ||
8167e6fa | 450 | out: |
4b565ca5 JC |
451 | /* Put fence into allow mode */ |
452 | gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); | |
453 | return ret; | |
454 | } | |
455 | ||
456 | static int a6xx_rpmh_start(struct a6xx_gmu *gmu) | |
457 | { | |
458 | int ret; | |
459 | u32 val; | |
460 | ||
461 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); | |
462 | /* Wait for the register to finish posting */ | |
463 | wmb(); | |
464 | ||
465 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, | |
466 | val & (1 << 1), 100, 10000); | |
467 | if (ret) { | |
6a41da17 | 468 | DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); |
4b565ca5 JC |
469 | return ret; |
470 | } | |
471 | ||
02ef80c5 | 472 | ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, |
4b565ca5 JC |
473 | !val, 100, 10000); |
474 | ||
56869210 JC |
475 | if (ret) { |
476 | DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); | |
477 | return ret; | |
4b565ca5 JC |
478 | } |
479 | ||
56869210 JC |
480 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); |
481 | ||
482 | /* Set up CX GMU counter 0 to count busy ticks */ | |
483 | gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); | |
484 | gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); | |
485 | ||
486 | /* Enable the power counter */ | |
487 | gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); | |
488 | return 0; | |
4b565ca5 JC |
489 | } |
490 | ||
491 | static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) | |
492 | { | |
493 | int ret; | |
494 | u32 val; | |
495 | ||
496 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); | |
497 | ||
02ef80c5 | 498 | ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, |
4b565ca5 JC |
499 | val, val & (1 << 16), 100, 10000); |
500 | if (ret) | |
6a41da17 | 501 | DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); |
4b565ca5 JC |
502 | |
503 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); | |
504 | } | |
505 | ||
f8fc924e JC |
506 | static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) |
507 | { | |
d7499634 | 508 | msm_writel(value, ptr + (offset << 2)); |
f8fc924e JC |
509 | } |
510 | ||
511 | static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, | |
512 | const char *name); | |
513 | ||
4b565ca5 JC |
514 | static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) |
515 | { | |
e812744c SM |
516 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
517 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
f8fc924e JC |
518 | struct platform_device *pdev = to_platform_device(gmu->dev); |
519 | void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); | |
3d91e50f | 520 | void __iomem *seqptr = NULL; |
02ef80c5 | 521 | uint32_t pdc_address_offset; |
64245fc5 | 522 | bool pdc_in_aop = false; |
f8fc924e | 523 | |
3d91e50f | 524 | if (IS_ERR(pdcptr)) |
f8fc924e JC |
525 | goto err; |
526 | ||
192f4ee3 | 527 | if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) |
64245fc5 | 528 | pdc_in_aop = true; |
083cc3a4 | 529 | else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) |
02ef80c5 | 530 | pdc_address_offset = 0x30090; |
b7616b5c KD |
531 | else if (adreno_is_a619(adreno_gpu)) |
532 | pdc_address_offset = 0x300a0; | |
02ef80c5 JM |
533 | else |
534 | pdc_address_offset = 0x30080; | |
535 | ||
64245fc5 JM |
536 | if (!pdc_in_aop) { |
537 | seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); | |
3d91e50f | 538 | if (IS_ERR(seqptr)) |
64245fc5 JM |
539 | goto err; |
540 | } | |
541 | ||
4b565ca5 | 542 | /* Disable SDE clock gating */ |
02ef80c5 | 543 | gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); |
4b565ca5 JC |
544 | |
545 | /* Setup RSC PDC handshake for sleep and wakeup */ | |
02ef80c5 JM |
546 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); |
547 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); | |
548 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); | |
549 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); | |
550 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); | |
551 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); | |
552 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); | |
553 | gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); | |
554 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); | |
555 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); | |
556 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); | |
4b565ca5 JC |
557 | |
558 | /* Load RSC sequencer uCode for sleep and wakeup */ | |
f6d62d09 | 559 | if (adreno_is_a650_family(adreno_gpu)) { |
02ef80c5 JM |
560 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); |
561 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); | |
562 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); | |
563 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); | |
564 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); | |
565 | } else { | |
566 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); | |
567 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); | |
568 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); | |
569 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); | |
570 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); | |
571 | } | |
4b565ca5 | 572 | |
64245fc5 JM |
573 | if (pdc_in_aop) |
574 | goto setup_pdc; | |
575 | ||
4b565ca5 | 576 | /* Load PDC sequencer uCode for power up and power down sequence */ |
f8fc924e JC |
577 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); |
578 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); | |
579 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); | |
580 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); | |
581 | pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); | |
4b565ca5 JC |
582 | |
583 | /* Set TCS commands used by PDC sequence for low power modes */ | |
f8fc924e JC |
584 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); |
585 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); | |
586 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); | |
587 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); | |
588 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); | |
589 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); | |
590 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); | |
591 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); | |
592 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); | |
e812744c | 593 | |
f8fc924e | 594 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); |
02ef80c5 | 595 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset); |
f8fc924e | 596 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); |
e812744c | 597 | |
f8fc924e JC |
598 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); |
599 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); | |
600 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); | |
601 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); | |
602 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); | |
603 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); | |
e812744c | 604 | |
f8fc924e JC |
605 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); |
606 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); | |
b7616b5c KD |
607 | if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) || |
608 | adreno_is_a650_family(adreno_gpu)) | |
e812744c SM |
609 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); |
610 | else | |
611 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); | |
f8fc924e | 612 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); |
02ef80c5 | 613 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset); |
f8fc924e | 614 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); |
4b565ca5 JC |
615 | |
616 | /* Setup GPU PDC */ | |
64245fc5 | 617 | setup_pdc: |
f8fc924e JC |
618 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); |
619 | pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); | |
4b565ca5 JC |
620 | |
621 | /* ensure no writes happen before the uCode is fully written */ | |
622 | wmb(); | |
f8fc924e JC |
623 | |
624 | err: | |
5ca4a094 | 625 | if (!IS_ERR_OR_NULL(pdcptr)) |
a62fb211 | 626 | iounmap(pdcptr); |
5ca4a094 | 627 | if (!IS_ERR_OR_NULL(seqptr)) |
a62fb211 | 628 | iounmap(seqptr); |
4b565ca5 JC |
629 | } |
630 | ||
631 | /* | |
632 | * The lowest 16 bits of this value are the number of XO clock cycles for main | |
633 | * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are | |
634 | * for the shorter hysteresis that happens after main - this is 0xa (.5 us) | |
635 | */ | |
636 | ||
637 | #define GMU_PWR_COL_HYST 0x000a1680 | |
638 | ||
639 | /* Set up the idle state for the GMU */ | |
640 | static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) | |
641 | { | |
642 | /* Disable GMU WB/RB buffer */ | |
643 | gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); | |
c6ed04f8 JM |
644 | gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); |
645 | gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); | |
4b565ca5 JC |
646 | |
647 | gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); | |
648 | ||
649 | switch (gmu->idle_level) { | |
650 | case GMU_IDLE_STATE_IFPC: | |
651 | gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, | |
652 | GMU_PWR_COL_HYST); | |
653 | gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, | |
654 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | | |
655 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); | |
df561f66 | 656 | fallthrough; |
4b565ca5 JC |
657 | case GMU_IDLE_STATE_SPTP: |
658 | gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, | |
659 | GMU_PWR_COL_HYST); | |
660 | gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, | |
661 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | | |
662 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); | |
663 | } | |
664 | ||
665 | /* Enable RPMh GPU client */ | |
666 | gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, | |
667 | A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | | |
668 | A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | | |
669 | A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | | |
670 | A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | | |
671 | A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | | |
672 | A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); | |
673 | } | |
674 | ||
c6ed04f8 JM |
675 | struct block_header { |
676 | u32 addr; | |
677 | u32 size; | |
678 | u32 type; | |
679 | u32 value; | |
680 | u32 data[]; | |
681 | }; | |
682 | ||
683 | /* this should be a general kernel helper */ | |
684 | static int in_range(u32 addr, u32 start, u32 size) | |
685 | { | |
686 | return addr >= start && addr < start + size; | |
687 | } | |
688 | ||
689 | static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) | |
690 | { | |
691 | if (!in_range(blk->addr, bo->iova, bo->size)) | |
692 | return false; | |
693 | ||
694 | memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); | |
695 | return true; | |
696 | } | |
697 | ||
698 | static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) | |
699 | { | |
700 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); | |
701 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
702 | const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; | |
703 | const struct block_header *blk; | |
704 | u32 reg_offset; | |
705 | ||
706 | u32 itcm_base = 0x00000000; | |
707 | u32 dtcm_base = 0x00040000; | |
708 | ||
f6d62d09 | 709 | if (adreno_is_a650_family(adreno_gpu)) |
c6ed04f8 JM |
710 | dtcm_base = 0x10004000; |
711 | ||
712 | if (gmu->legacy) { | |
713 | /* Sanity check the size of the firmware that was loaded */ | |
714 | if (fw_image->size > 0x8000) { | |
715 | DRM_DEV_ERROR(gmu->dev, | |
716 | "GMU firmware is bigger than the available region\n"); | |
717 | return -EINVAL; | |
718 | } | |
719 | ||
720 | gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, | |
721 | (u32*) fw_image->data, fw_image->size); | |
722 | return 0; | |
723 | } | |
724 | ||
725 | ||
726 | for (blk = (const struct block_header *) fw_image->data; | |
727 | (const u8*) blk < fw_image->data + fw_image->size; | |
728 | blk = (const struct block_header *) &blk->data[blk->size >> 2]) { | |
729 | if (blk->size == 0) | |
730 | continue; | |
731 | ||
732 | if (in_range(blk->addr, itcm_base, SZ_16K)) { | |
733 | reg_offset = (blk->addr - itcm_base) >> 2; | |
734 | gmu_write_bulk(gmu, | |
735 | REG_A6XX_GMU_CM3_ITCM_START + reg_offset, | |
736 | blk->data, blk->size); | |
737 | } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { | |
738 | reg_offset = (blk->addr - dtcm_base) >> 2; | |
739 | gmu_write_bulk(gmu, | |
740 | REG_A6XX_GMU_CM3_DTCM_START + reg_offset, | |
741 | blk->data, blk->size); | |
742 | } else if (!fw_block_mem(&gmu->icache, blk) && | |
743 | !fw_block_mem(&gmu->dcache, blk) && | |
744 | !fw_block_mem(&gmu->dummy, blk)) { | |
745 | DRM_DEV_ERROR(gmu->dev, | |
746 | "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n", | |
747 | blk->addr, blk->size, blk->data[0]); | |
748 | } | |
749 | } | |
750 | ||
751 | return 0; | |
752 | } | |
753 | ||
4b565ca5 JC |
754 | static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) |
755 | { | |
756 | static bool rpmh_init; | |
757 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); | |
758 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
c6ed04f8 | 759 | int ret; |
4b565ca5 | 760 | u32 chipid; |
c6ed04f8 | 761 | |
f6d62d09 | 762 | if (adreno_is_a650_family(adreno_gpu)) { |
58e933e3 | 763 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); |
c6ed04f8 | 764 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); |
58e933e3 | 765 | } |
4b565ca5 JC |
766 | |
767 | if (state == GMU_WARM_BOOT) { | |
768 | ret = a6xx_rpmh_start(gmu); | |
769 | if (ret) | |
770 | return ret; | |
771 | } else { | |
772 | if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], | |
773 | "GMU firmware is not loaded\n")) | |
774 | return -ENOENT; | |
775 | ||
4b565ca5 JC |
776 | /* Turn on register retention */ |
777 | gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); | |
778 | ||
779 | /* We only need to load the RPMh microcode once */ | |
780 | if (!rpmh_init) { | |
781 | a6xx_gmu_rpmh_init(gmu); | |
782 | rpmh_init = true; | |
e31fdb74 | 783 | } else { |
4b565ca5 JC |
784 | ret = a6xx_rpmh_start(gmu); |
785 | if (ret) | |
786 | return ret; | |
787 | } | |
788 | ||
c6ed04f8 JM |
789 | ret = a6xx_gmu_fw_load(gmu); |
790 | if (ret) | |
791 | return ret; | |
4b565ca5 JC |
792 | } |
793 | ||
794 | gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); | |
795 | gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); | |
796 | ||
797 | /* Write the iova of the HFI table */ | |
29ac8979 | 798 | gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); |
4b565ca5 JC |
799 | gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); |
800 | ||
801 | gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, | |
802 | (1 << 31) | (0xa << 18) | (0xa0)); | |
803 | ||
804 | chipid = adreno_gpu->rev.core << 24; | |
805 | chipid |= adreno_gpu->rev.major << 16; | |
806 | chipid |= adreno_gpu->rev.minor << 12; | |
807 | chipid |= adreno_gpu->rev.patchid << 8; | |
808 | ||
809 | gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); | |
810 | ||
ad4968d5 JM |
811 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, |
812 | gmu->log.iova | (gmu->log.size / SZ_4K - 1)); | |
813 | ||
4b565ca5 JC |
814 | /* Set up the lowest idle level on the GMU */ |
815 | a6xx_gmu_power_config(gmu); | |
816 | ||
817 | ret = a6xx_gmu_start(gmu); | |
818 | if (ret) | |
819 | return ret; | |
820 | ||
8167e6fa JM |
821 | if (gmu->legacy) { |
822 | ret = a6xx_gmu_gfx_rail_on(gmu); | |
823 | if (ret) | |
824 | return ret; | |
825 | } | |
4b565ca5 JC |
826 | |
827 | /* Enable SPTP_PC if the CPU is responsible for it */ | |
828 | if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { | |
829 | ret = a6xx_sptprac_enable(gmu); | |
830 | if (ret) | |
831 | return ret; | |
832 | } | |
833 | ||
834 | ret = a6xx_gmu_hfi_start(gmu); | |
835 | if (ret) | |
836 | return ret; | |
837 | ||
838 | /* FIXME: Do we need this wmb() here? */ | |
839 | wmb(); | |
840 | ||
841 | return 0; | |
842 | } | |
843 | ||
844 | #define A6XX_HFI_IRQ_MASK \ | |
df0dff13 | 845 | (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) |
4b565ca5 JC |
846 | |
847 | #define A6XX_GMU_IRQ_MASK \ | |
848 | (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ | |
849 | A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ | |
850 | A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) | |
851 | ||
4b565ca5 JC |
852 | static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) |
853 | { | |
854 | disable_irq(gmu->gmu_irq); | |
855 | disable_irq(gmu->hfi_irq); | |
856 | ||
857 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); | |
858 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); | |
859 | } | |
860 | ||
41570b74 | 861 | static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) |
4b565ca5 | 862 | { |
4b565ca5 JC |
863 | u32 val; |
864 | ||
41570b74 | 865 | /* Make sure there are no outstanding RPMh votes */ |
02ef80c5 | 866 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, |
41570b74 | 867 | (val & 1), 100, 10000); |
02ef80c5 | 868 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, |
41570b74 | 869 | (val & 1), 100, 10000); |
02ef80c5 | 870 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, |
41570b74 | 871 | (val & 1), 100, 10000); |
02ef80c5 | 872 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, |
41570b74 JC |
873 | (val & 1), 100, 1000); |
874 | } | |
875 | ||
3a9dd708 AO |
876 | #define GBIF_CLIENT_HALT_MASK BIT(0) |
877 | #define GBIF_ARB_HALT_MASK BIT(1) | |
878 | ||
f4a75b59 AO |
879 | static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, |
880 | bool gx_off) | |
3a9dd708 AO |
881 | { |
882 | struct msm_gpu *gpu = &adreno_gpu->base; | |
883 | ||
884 | if (!a6xx_has_gbif(adreno_gpu)) { | |
885 | gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); | |
886 | spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & | |
887 | 0xf) == 0xf); | |
888 | gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); | |
889 | ||
890 | return; | |
891 | } | |
892 | ||
f4a75b59 AO |
893 | if (gx_off) { |
894 | /* Halt the gx side of GBIF */ | |
895 | gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); | |
896 | spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); | |
897 | } | |
3a9dd708 AO |
898 | |
899 | /* Halt new client requests on GBIF */ | |
900 | gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); | |
901 | spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & | |
902 | (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); | |
903 | ||
904 | /* Halt all AXI requests on GBIF */ | |
905 | gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); | |
906 | spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & | |
907 | (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); | |
908 | ||
909 | /* The GBIF halt needs to be explicitly cleared */ | |
910 | gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); | |
911 | } | |
912 | ||
41570b74 JC |
913 | /* Force the GMU off in case it isn't responsive */ |
914 | static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) | |
915 | { | |
3a9dd708 AO |
916 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
917 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
918 | struct msm_gpu *gpu = &adreno_gpu->base; | |
919 | ||
4b565ca5 JC |
920 | /* Flush all the queues */ |
921 | a6xx_hfi_stop(gmu); | |
922 | ||
923 | /* Stop the interrupts */ | |
924 | a6xx_gmu_irq_disable(gmu); | |
925 | ||
926 | /* Force off SPTP in case the GMU is managing it */ | |
927 | a6xx_sptprac_disable(gmu); | |
928 | ||
929 | /* Make sure there are no outstanding RPMh votes */ | |
41570b74 | 930 | a6xx_gmu_rpmh_off(gmu); |
3a9dd708 AO |
931 | |
932 | /* Halt the gmu cm3 core */ | |
933 | gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); | |
934 | ||
f4a75b59 | 935 | a6xx_bus_clear_pending_transactions(adreno_gpu, true); |
3a9dd708 AO |
936 | |
937 | /* Reset GPU core blocks */ | |
938 | gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1); | |
939 | udelay(100); | |
4b565ca5 JC |
940 | } |
941 | ||
1f60d114 SM |
942 | static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) |
943 | { | |
944 | struct dev_pm_opp *gpu_opp; | |
945 | unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; | |
946 | ||
947 | gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); | |
39b14bb5 | 948 | if (IS_ERR(gpu_opp)) |
1f60d114 SM |
949 | return; |
950 | ||
5e0c22d4 | 951 | gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ |
6694482a | 952 | a6xx_gmu_set_freq(gpu, gpu_opp, false); |
1f60d114 SM |
953 | dev_pm_opp_put(gpu_opp); |
954 | } | |
955 | ||
20925fe8 SM |
956 | static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) |
957 | { | |
958 | struct dev_pm_opp *gpu_opp; | |
959 | unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; | |
960 | ||
961 | gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); | |
39b14bb5 | 962 | if (IS_ERR(gpu_opp)) |
20925fe8 SM |
963 | return; |
964 | ||
920b4a67 | 965 | dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); |
20925fe8 SM |
966 | dev_pm_opp_put(gpu_opp); |
967 | } | |
968 | ||
4b565ca5 JC |
969 | int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) |
970 | { | |
fcf9d0b7 JC |
971 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
972 | struct msm_gpu *gpu = &adreno_gpu->base; | |
4b565ca5 JC |
973 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
974 | int status, ret; | |
975 | ||
606ec90f | 976 | if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) |
4cd15a3e | 977 | return -EINVAL; |
4b565ca5 | 978 | |
e31fdb74 JC |
979 | gmu->hung = false; |
980 | ||
4b565ca5 JC |
981 | /* Turn on the resources */ |
982 | pm_runtime_get_sync(gmu->dev); | |
983 | ||
57c0bd51 AO |
984 | /* |
985 | * "enable" the GX power domain which won't actually do anything but it | |
986 | * will make sure that the refcounting is correct in case we need to | |
987 | * bring down the GX after a GMU failure | |
988 | */ | |
989 | if (!IS_ERR_OR_NULL(gmu->gxpd)) | |
990 | pm_runtime_get_sync(gmu->gxpd); | |
991 | ||
4b565ca5 JC |
992 | /* Use a known rate to bring up the GMU */ |
993 | clk_set_rate(gmu->core_clk, 200000000); | |
192f4ee3 | 994 | clk_set_rate(gmu->hub_clk, 150000000); |
4b565ca5 | 995 | ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); |
41570b74 | 996 | if (ret) { |
57c0bd51 | 997 | pm_runtime_put(gmu->gxpd); |
41570b74 JC |
998 | pm_runtime_put(gmu->dev); |
999 | return ret; | |
1000 | } | |
4b565ca5 | 1001 | |
fcf9d0b7 | 1002 | /* Set the bus quota to a reasonable value for boot */ |
20925fe8 | 1003 | a6xx_gmu_set_initial_bw(gpu, gmu); |
fcf9d0b7 | 1004 | |
41570b74 JC |
1005 | /* Enable the GMU interrupt */ |
1006 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); | |
1007 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); | |
1008 | enable_irq(gmu->gmu_irq); | |
4b565ca5 JC |
1009 | |
1010 | /* Check to see if we are doing a cold or warm boot */ | |
1011 | status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? | |
1012 | GMU_WARM_BOOT : GMU_COLD_BOOT; | |
1013 | ||
c6ed04f8 JM |
1014 | /* |
1015 | * Warm boot path does not work on newer GPUs | |
1016 | * Presumably this is because icache/dcache regions must be restored | |
1017 | */ | |
1018 | if (!gmu->legacy) | |
1019 | status = GMU_COLD_BOOT; | |
1020 | ||
4b565ca5 JC |
1021 | ret = a6xx_gmu_fw_start(gmu, status); |
1022 | if (ret) | |
1023 | goto out; | |
1024 | ||
1025 | ret = a6xx_hfi_start(gmu, status); | |
41570b74 JC |
1026 | if (ret) |
1027 | goto out; | |
1028 | ||
1029 | /* | |
1030 | * Turn on the GMU firmware fault interrupt after we know the boot | |
1031 | * sequence is successful | |
1032 | */ | |
1033 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); | |
1034 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); | |
1035 | enable_irq(gmu->hfi_irq); | |
4b565ca5 | 1036 | |
bd3fe811 | 1037 | /* Set the GPU to the current freq */ |
1f60d114 | 1038 | a6xx_gmu_set_initial_freq(gpu, gmu); |
4b565ca5 JC |
1039 | |
1040 | out: | |
41570b74 JC |
1041 | /* On failure, shut down the GMU to leave it in a good state */ |
1042 | if (ret) { | |
1043 | disable_irq(gmu->gmu_irq); | |
1044 | a6xx_rpmh_stop(gmu); | |
57c0bd51 | 1045 | pm_runtime_put(gmu->gxpd); |
41570b74 JC |
1046 | pm_runtime_put(gmu->dev); |
1047 | } | |
4b565ca5 JC |
1048 | |
1049 | return ret; | |
1050 | } | |
1051 | ||
1052 | bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) | |
1053 | { | |
1054 | u32 reg; | |
1055 | ||
606ec90f | 1056 | if (!gmu->initialized) |
4b565ca5 JC |
1057 | return true; |
1058 | ||
1059 | reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); | |
1060 | ||
1061 | if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) | |
1062 | return false; | |
1063 | ||
1064 | return true; | |
1065 | } | |
1066 | ||
e31fdb74 JC |
1067 | /* Gracefully try to shut down the GMU and by extension the GPU */ |
1068 | static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) | |
4b565ca5 | 1069 | { |
41570b74 JC |
1070 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
1071 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
4b565ca5 JC |
1072 | u32 val; |
1073 | ||
1074 | /* | |
1075 | * The GMU may still be in slumber unless the GPU started so check and | |
1076 | * skip putting it back into slumber if so | |
1077 | */ | |
1078 | val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); | |
1079 | ||
1080 | if (val != 0xf) { | |
e31fdb74 | 1081 | int ret = a6xx_gmu_wait_for_idle(gmu); |
4b565ca5 | 1082 | |
e31fdb74 JC |
1083 | /* If the GMU isn't responding assume it is hung */ |
1084 | if (ret) { | |
1085 | a6xx_gmu_force_off(gmu); | |
1086 | return; | |
1087 | } | |
4b565ca5 | 1088 | |
f4a75b59 | 1089 | a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); |
41570b74 | 1090 | |
4b565ca5 | 1091 | /* tell the GMU we want to slumber */ |
d6463fd4 AO |
1092 | ret = a6xx_gmu_notify_slumber(gmu); |
1093 | if (ret) { | |
1094 | a6xx_gmu_force_off(gmu); | |
1095 | return; | |
1096 | } | |
4b565ca5 JC |
1097 | |
1098 | ret = gmu_poll_timeout(gmu, | |
1099 | REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, | |
1100 | !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), | |
1101 | 100, 10000); | |
1102 | ||
1103 | /* | |
1104 | * Let the user know we failed to slumber but don't worry too | |
1105 | * much because we are powering down anyway | |
1106 | */ | |
1107 | ||
1108 | if (ret) | |
6a41da17 | 1109 | DRM_DEV_ERROR(gmu->dev, |
4b565ca5 JC |
1110 | "Unable to slumber GMU: status = 0%x/0%x\n", |
1111 | gmu_read(gmu, | |
1112 | REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), | |
1113 | gmu_read(gmu, | |
1114 | REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); | |
1115 | } | |
1116 | ||
1117 | /* Turn off HFI */ | |
1118 | a6xx_hfi_stop(gmu); | |
1119 | ||
1120 | /* Stop the interrupts and mask the hardware */ | |
1121 | a6xx_gmu_irq_disable(gmu); | |
1122 | ||
1123 | /* Tell RPMh to power off the GPU */ | |
1124 | a6xx_rpmh_stop(gmu); | |
e31fdb74 JC |
1125 | } |
1126 | ||
1127 | ||
1128 | int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) | |
1129 | { | |
1130 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; | |
1131 | struct msm_gpu *gpu = &a6xx_gpu->base.base; | |
1132 | ||
41570b74 JC |
1133 | if (!pm_runtime_active(gmu->dev)) |
1134 | return 0; | |
1135 | ||
e31fdb74 JC |
1136 | /* |
1137 | * Force the GMU off if we detected a hang, otherwise try to shut it | |
1138 | * down gracefully | |
1139 | */ | |
1140 | if (gmu->hung) | |
1141 | a6xx_gmu_force_off(gmu); | |
1142 | else | |
1143 | a6xx_gmu_shutdown(gmu); | |
4b565ca5 | 1144 | |
fcf9d0b7 | 1145 | /* Remove the bus vote */ |
920b4a67 | 1146 | dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); |
fcf9d0b7 | 1147 | |
9325d426 | 1148 | /* |
e31fdb74 JC |
1149 | * Make sure the GX domain is off before turning off the GMU (CX) |
1150 | * domain. Usually the GMU does this but only if the shutdown sequence | |
1151 | * was successful | |
9325d426 | 1152 | */ |
2b117451 | 1153 | if (!IS_ERR_OR_NULL(gmu->gxpd)) |
9325d426 JC |
1154 | pm_runtime_put_sync(gmu->gxpd); |
1155 | ||
4b565ca5 JC |
1156 | clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); |
1157 | ||
1158 | pm_runtime_put_sync(gmu->dev); | |
1159 | ||
1160 | return 0; | |
1161 | } | |
1162 | ||
29ac8979 | 1163 | static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) |
4b565ca5 | 1164 | { |
030af2b0 RC |
1165 | msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); |
1166 | msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); | |
1167 | msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); | |
1168 | msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); | |
1169 | msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); | |
1170 | msm_gem_kernel_put(gmu->log.obj, gmu->aspace); | |
29ac8979 JM |
1171 | |
1172 | gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); | |
1173 | msm_gem_address_space_put(gmu->aspace); | |
1174 | } | |
1175 | ||
1176 | static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, | |
a630ac68 | 1177 | size_t size, u64 iova, const char *name) |
29ac8979 JM |
1178 | { |
1179 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); | |
1180 | struct drm_device *dev = a6xx_gpu->base.base.dev; | |
1181 | uint32_t flags = MSM_BO_WC; | |
1182 | u64 range_start, range_end; | |
1183 | int ret; | |
1184 | ||
1185 | size = PAGE_ALIGN(size); | |
1186 | if (!iova) { | |
1187 | /* no fixed address - use GMU's uncached range */ | |
c6ed04f8 | 1188 | range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ |
29ac8979 JM |
1189 | range_end = 0x80000000; |
1190 | } else { | |
1191 | /* range for fixed address */ | |
1192 | range_start = iova; | |
1193 | range_end = iova + size; | |
c6ed04f8 JM |
1194 | /* use IOMMU_PRIV for icache/dcache */ |
1195 | flags |= MSM_BO_MAP_PRIV; | |
29ac8979 JM |
1196 | } |
1197 | ||
1198 | bo->obj = msm_gem_new(dev, size, flags); | |
1199 | if (IS_ERR(bo->obj)) | |
1200 | return PTR_ERR(bo->obj); | |
4b565ca5 | 1201 | |
29ac8979 | 1202 | ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, |
2ee4b5d2 | 1203 | range_start, range_end); |
29ac8979 JM |
1204 | if (ret) { |
1205 | drm_gem_object_put(bo->obj); | |
1206 | return ret; | |
1207 | } | |
1208 | ||
1209 | bo->virt = msm_gem_get_vaddr(bo->obj); | |
1210 | bo->size = size; | |
1211 | ||
a630ac68 RC |
1212 | msm_gem_object_set_name(bo->obj, name); |
1213 | ||
29ac8979 | 1214 | return 0; |
4b565ca5 JC |
1215 | } |
1216 | ||
29ac8979 | 1217 | static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) |
4b565ca5 | 1218 | { |
ccac7ce3 | 1219 | struct msm_mmu *mmu; |
4b565ca5 | 1220 | |
3236130b DB |
1221 | mmu = msm_iommu_new(gmu->dev, 0); |
1222 | if (!mmu) | |
29ac8979 | 1223 | return -ENODEV; |
3236130b DB |
1224 | if (IS_ERR(mmu)) |
1225 | return PTR_ERR(mmu); | |
4b565ca5 | 1226 | |
30480e6e | 1227 | gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); |
3236130b | 1228 | if (IS_ERR(gmu->aspace)) |
29ac8979 | 1229 | return PTR_ERR(gmu->aspace); |
4b565ca5 | 1230 | |
29ac8979 | 1231 | return 0; |
4b565ca5 JC |
1232 | } |
1233 | ||
4b565ca5 | 1234 | /* Return the 'arc-level' for the given frequency */ |
e1505f62 DA |
1235 | static unsigned int a6xx_gmu_get_arc_level(struct device *dev, |
1236 | unsigned long freq) | |
4b565ca5 JC |
1237 | { |
1238 | struct dev_pm_opp *opp; | |
e1505f62 | 1239 | unsigned int val; |
4b565ca5 JC |
1240 | |
1241 | if (!freq) | |
1242 | return 0; | |
1243 | ||
e1505f62 | 1244 | opp = dev_pm_opp_find_freq_exact(dev, freq, true); |
4b565ca5 JC |
1245 | if (IS_ERR(opp)) |
1246 | return 0; | |
1247 | ||
e1505f62 | 1248 | val = dev_pm_opp_get_level(opp); |
4b565ca5 JC |
1249 | |
1250 | dev_pm_opp_put(opp); | |
1251 | ||
1252 | return val; | |
1253 | } | |
1254 | ||
1255 | static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, | |
ed3cafa7 | 1256 | unsigned long *freqs, int freqs_count, const char *id) |
4b565ca5 JC |
1257 | { |
1258 | int i, j; | |
ed3cafa7 SB |
1259 | const u16 *pri, *sec; |
1260 | size_t pri_count, sec_count; | |
1261 | ||
1262 | pri = cmd_db_read_aux_data(id, &pri_count); | |
b601f731 SB |
1263 | if (IS_ERR(pri)) |
1264 | return PTR_ERR(pri); | |
ed3cafa7 SB |
1265 | /* |
1266 | * The data comes back as an array of unsigned shorts so adjust the | |
1267 | * count accordingly | |
1268 | */ | |
1269 | pri_count >>= 1; | |
1270 | if (!pri_count) | |
1271 | return -EINVAL; | |
1272 | ||
1273 | sec = cmd_db_read_aux_data("mx.lvl", &sec_count); | |
b601f731 SB |
1274 | if (IS_ERR(sec)) |
1275 | return PTR_ERR(sec); | |
1276 | ||
ed3cafa7 SB |
1277 | sec_count >>= 1; |
1278 | if (!sec_count) | |
1279 | return -EINVAL; | |
4b565ca5 JC |
1280 | |
1281 | /* Construct a vote for each frequency */ | |
1282 | for (i = 0; i < freqs_count; i++) { | |
1283 | u8 pindex = 0, sindex = 0; | |
e1505f62 | 1284 | unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); |
4b565ca5 JC |
1285 | |
1286 | /* Get the primary index that matches the arc level */ | |
1287 | for (j = 0; j < pri_count; j++) { | |
1288 | if (pri[j] >= level) { | |
1289 | pindex = j; | |
1290 | break; | |
1291 | } | |
1292 | } | |
1293 | ||
1294 | if (j == pri_count) { | |
6a41da17 | 1295 | DRM_DEV_ERROR(dev, |
09b4138e CJ |
1296 | "Level %u not found in the RPMh list\n", |
1297 | level); | |
6a41da17 | 1298 | DRM_DEV_ERROR(dev, "Available levels:\n"); |
4b565ca5 | 1299 | for (j = 0; j < pri_count; j++) |
6a41da17 | 1300 | DRM_DEV_ERROR(dev, " %u\n", pri[j]); |
4b565ca5 JC |
1301 | |
1302 | return -EINVAL; | |
1303 | } | |
1304 | ||
1305 | /* | |
1306 | * Look for a level in in the secondary list that matches. If | |
1307 | * nothing fits, use the maximum non zero vote | |
1308 | */ | |
1309 | ||
1310 | for (j = 0; j < sec_count; j++) { | |
1311 | if (sec[j] >= level) { | |
1312 | sindex = j; | |
1313 | break; | |
1314 | } else if (sec[j]) { | |
1315 | sindex = j; | |
1316 | } | |
1317 | } | |
1318 | ||
1319 | /* Construct the vote */ | |
1320 | votes[i] = ((pri[pindex] & 0xffff) << 16) | | |
1321 | (sindex << 8) | pindex; | |
1322 | } | |
1323 | ||
1324 | return 0; | |
1325 | } | |
1326 | ||
1327 | /* | |
1328 | * The GMU votes with the RPMh for itself and on behalf of the GPU but we need | |
1329 | * to construct the list of votes on the CPU and send it over. Query the RPMh | |
1330 | * voltage levels and build the votes | |
1331 | */ | |
1332 | ||
1333 | static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) | |
1334 | { | |
1335 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); | |
1336 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
1337 | struct msm_gpu *gpu = &adreno_gpu->base; | |
4b565ca5 JC |
1338 | int ret; |
1339 | ||
4b565ca5 JC |
1340 | /* Build the GX votes */ |
1341 | ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, | |
ed3cafa7 | 1342 | gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); |
4b565ca5 JC |
1343 | |
1344 | /* Build the CX votes */ | |
1345 | ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, | |
ed3cafa7 | 1346 | gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); |
4b565ca5 JC |
1347 | |
1348 | return ret; | |
1349 | } | |
1350 | ||
1351 | static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, | |
1352 | u32 size) | |
1353 | { | |
1354 | int count = dev_pm_opp_get_opp_count(dev); | |
1355 | struct dev_pm_opp *opp; | |
1356 | int i, index = 0; | |
1357 | unsigned long freq = 1; | |
1358 | ||
1359 | /* | |
1360 | * The OPP table doesn't contain the "off" frequency level so we need to | |
1361 | * add 1 to the table size to account for it | |
1362 | */ | |
1363 | ||
1364 | if (WARN(count + 1 > size, | |
1365 | "The GMU frequency table is being truncated\n")) | |
1366 | count = size - 1; | |
1367 | ||
1368 | /* Set the "off" frequency */ | |
1369 | freqs[index++] = 0; | |
1370 | ||
1371 | for (i = 0; i < count; i++) { | |
1372 | opp = dev_pm_opp_find_freq_ceil(dev, &freq); | |
1373 | if (IS_ERR(opp)) | |
1374 | break; | |
1375 | ||
1376 | dev_pm_opp_put(opp); | |
1377 | freqs[index++] = freq++; | |
1378 | } | |
1379 | ||
1380 | return index; | |
1381 | } | |
1382 | ||
1383 | static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) | |
1384 | { | |
1385 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); | |
1386 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; | |
1387 | struct msm_gpu *gpu = &adreno_gpu->base; | |
1388 | ||
1389 | int ret = 0; | |
1390 | ||
1391 | /* | |
1392 | * The GMU handles its own frequency switching so build a list of | |
546907de | 1393 | * available frequencies to send during initialization |
4b565ca5 | 1394 | */ |
11120e93 | 1395 | ret = devm_pm_opp_of_add_table(gmu->dev); |
4b565ca5 | 1396 | if (ret) { |
6a41da17 | 1397 | DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); |
4b565ca5 JC |
1398 | return ret; |
1399 | } | |
1400 | ||
1401 | gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, | |
1402 | gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); | |
1403 | ||
1404 | /* | |
1405 | * The GMU also handles GPU frequency switching so build a list | |
1406 | * from the GPU OPP table | |
1407 | */ | |
1408 | gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, | |
1409 | gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); | |
1410 | ||
bd3fe811 RC |
1411 | gmu->current_perf_index = gmu->nr_gpu_freqs - 1; |
1412 | ||
4b565ca5 JC |
1413 | /* Build the list of RPMh votes that we'll send to the GMU */ |
1414 | return a6xx_gmu_rpmh_votes_init(gmu); | |
1415 | } | |
1416 | ||
1417 | static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) | |
1418 | { | |
8e3e791d | 1419 | int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); |
4b565ca5 JC |
1420 | |
1421 | if (ret < 1) | |
1422 | return ret; | |
1423 | ||
1424 | gmu->nr_clocks = ret; | |
1425 | ||
1426 | gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, | |
1427 | gmu->nr_clocks, "gmu"); | |
1428 | ||
192f4ee3 AO |
1429 | gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, |
1430 | gmu->nr_clocks, "hub"); | |
1431 | ||
4b565ca5 JC |
1432 | return 0; |
1433 | } | |
1434 | ||
1435 | static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, | |
1436 | const char *name) | |
1437 | { | |
1438 | void __iomem *ret; | |
1439 | struct resource *res = platform_get_resource_byname(pdev, | |
1440 | IORESOURCE_MEM, name); | |
1441 | ||
1442 | if (!res) { | |
6a41da17 | 1443 | DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); |
4b565ca5 JC |
1444 | return ERR_PTR(-EINVAL); |
1445 | } | |
1446 | ||
a62fb211 | 1447 | ret = ioremap(res->start, resource_size(res)); |
4b565ca5 | 1448 | if (!ret) { |
6a41da17 | 1449 | DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); |
4b565ca5 JC |
1450 | return ERR_PTR(-EINVAL); |
1451 | } | |
1452 | ||
1453 | return ret; | |
1454 | } | |
1455 | ||
1456 | static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, | |
1457 | const char *name, irq_handler_t handler) | |
1458 | { | |
1459 | int irq, ret; | |
1460 | ||
1461 | irq = platform_get_irq_byname(pdev, name); | |
1462 | ||
a62fb211 | 1463 | ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); |
4b565ca5 | 1464 | if (ret) { |
a62fb211 SP |
1465 | DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", |
1466 | name, ret); | |
4b565ca5 JC |
1467 | return ret; |
1468 | } | |
1469 | ||
1470 | disable_irq(irq); | |
1471 | ||
1472 | return irq; | |
1473 | } | |
1474 | ||
1475 | void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) | |
1476 | { | |
1477 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; | |
02ef80c5 | 1478 | struct platform_device *pdev = to_platform_device(gmu->dev); |
4b565ca5 | 1479 | |
606ec90f | 1480 | if (!gmu->initialized) |
4b565ca5 JC |
1481 | return; |
1482 | ||
03b7af1e | 1483 | pm_runtime_force_suspend(gmu->dev); |
9325d426 | 1484 | |
2b117451 | 1485 | if (!IS_ERR_OR_NULL(gmu->gxpd)) { |
9325d426 JC |
1486 | pm_runtime_disable(gmu->gxpd); |
1487 | dev_pm_domain_detach(gmu->gxpd, false); | |
1488 | } | |
1489 | ||
a62fb211 | 1490 | iounmap(gmu->mmio); |
02ef80c5 JM |
1491 | if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) |
1492 | iounmap(gmu->rscc); | |
a62fb211 | 1493 | gmu->mmio = NULL; |
02ef80c5 | 1494 | gmu->rscc = NULL; |
a62fb211 | 1495 | |
29ac8979 | 1496 | a6xx_gmu_memory_free(gmu); |
4b565ca5 | 1497 | |
a62fb211 SP |
1498 | free_irq(gmu->gmu_irq, gmu); |
1499 | free_irq(gmu->hfi_irq, gmu); | |
1500 | ||
998efc74 SP |
1501 | /* Drop reference taken in of_find_device_by_node */ |
1502 | put_device(gmu->dev); | |
1503 | ||
606ec90f | 1504 | gmu->initialized = false; |
4b565ca5 JC |
1505 | } |
1506 | ||
981f2aab | 1507 | int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) |
4b565ca5 | 1508 | { |
8167e6fa | 1509 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
4b565ca5 JC |
1510 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1511 | struct platform_device *pdev = of_find_device_by_node(node); | |
1512 | int ret; | |
1513 | ||
1514 | if (!pdev) | |
1515 | return -ENODEV; | |
1516 | ||
f6f59072 RC |
1517 | mutex_init(&gmu->lock); |
1518 | ||
4b565ca5 JC |
1519 | gmu->dev = &pdev->dev; |
1520 | ||
29ac8979 | 1521 | of_dma_configure(gmu->dev, node, true); |
4b565ca5 JC |
1522 | |
1523 | /* Fow now, don't do anything fancy until we get our feet under us */ | |
1524 | gmu->idle_level = GMU_IDLE_STATE_ACTIVE; | |
1525 | ||
1526 | pm_runtime_enable(gmu->dev); | |
4b565ca5 JC |
1527 | |
1528 | /* Get the list of clocks */ | |
1529 | ret = a6xx_gmu_clocks_probe(gmu); | |
1530 | if (ret) | |
998efc74 | 1531 | goto err_put_device; |
4b565ca5 | 1532 | |
29ac8979 JM |
1533 | ret = a6xx_gmu_memory_probe(gmu); |
1534 | if (ret) | |
1535 | goto err_put_device; | |
1536 | ||
f6d62d09 JM |
1537 | |
1538 | /* A660 now requires handling "prealloc requests" in GMU firmware | |
1539 | * For now just hardcode allocations based on the known firmware. | |
1540 | * note: there is no indication that these correspond to "dummy" or | |
1541 | * "debug" regions, but this "guess" allows reusing these BOs which | |
1542 | * are otherwise unused by a660. | |
1543 | */ | |
1544 | gmu->dummy.size = SZ_4K; | |
192f4ee3 | 1545 | if (adreno_is_a660_family(adreno_gpu)) { |
a630ac68 RC |
1546 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, |
1547 | 0x60400000, "debug"); | |
f6d62d09 JM |
1548 | if (ret) |
1549 | goto err_memory; | |
1550 | ||
1551 | gmu->dummy.size = SZ_8K; | |
1552 | } | |
1553 | ||
c6ed04f8 | 1554 | /* Allocate memory for the GMU dummy page */ |
a630ac68 RC |
1555 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, |
1556 | 0x60000000, "dummy"); | |
c6ed04f8 JM |
1557 | if (ret) |
1558 | goto err_memory; | |
1559 | ||
f4f6dfde | 1560 | /* Note that a650 family also includes a660 family: */ |
f6d62d09 | 1561 | if (adreno_is_a650_family(adreno_gpu)) { |
c6ed04f8 | 1562 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, |
a630ac68 | 1563 | SZ_16M - SZ_16K, 0x04000, "icache"); |
c6ed04f8 JM |
1564 | if (ret) |
1565 | goto err_memory; | |
b7616b5c KD |
1566 | /* |
1567 | * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition | |
1568 | * to allocate icache/dcache here, as per downstream code flow, but it may not actually be | |
1569 | * necessary. If you omit this step and you don't get random pagefaults, you are likely | |
1570 | * good to go without this! | |
1571 | */ | |
083cc3a4 | 1572 | } else if (adreno_is_a640_family(adreno_gpu)) { |
c6ed04f8 | 1573 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, |
a630ac68 | 1574 | SZ_256K - SZ_16K, 0x04000, "icache"); |
c6ed04f8 JM |
1575 | if (ret) |
1576 | goto err_memory; | |
1577 | ||
1578 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, | |
a630ac68 | 1579 | SZ_256K - SZ_16K, 0x44000, "dcache"); |
c6ed04f8 JM |
1580 | if (ret) |
1581 | goto err_memory; | |
b7616b5c | 1582 | } else if (adreno_is_a630(adreno_gpu) || adreno_is_a615_family(adreno_gpu)) { |
8167e6fa JM |
1583 | /* HFI v1, has sptprac */ |
1584 | gmu->legacy = true; | |
1585 | ||
1586 | /* Allocate memory for the GMU debug region */ | |
a630ac68 | 1587 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); |
8167e6fa JM |
1588 | if (ret) |
1589 | goto err_memory; | |
1590 | } | |
1591 | ||
4b565ca5 | 1592 | /* Allocate memory for for the HFI queues */ |
a630ac68 | 1593 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); |
29ac8979 | 1594 | if (ret) |
a62fb211 | 1595 | goto err_memory; |
4b565ca5 | 1596 | |
ad4968d5 | 1597 | /* Allocate memory for the GMU log region */ |
a630ac68 | 1598 | ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log"); |
ad4968d5 JM |
1599 | if (ret) |
1600 | goto err_memory; | |
1601 | ||
4b565ca5 JC |
1602 | /* Map the GMU registers */ |
1603 | gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); | |
29ac8979 JM |
1604 | if (IS_ERR(gmu->mmio)) { |
1605 | ret = PTR_ERR(gmu->mmio); | |
a62fb211 | 1606 | goto err_memory; |
29ac8979 | 1607 | } |
4b565ca5 | 1608 | |
f6d62d09 | 1609 | if (adreno_is_a650_family(adreno_gpu)) { |
02ef80c5 JM |
1610 | gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); |
1611 | if (IS_ERR(gmu->rscc)) | |
1612 | goto err_mmio; | |
1613 | } else { | |
1614 | gmu->rscc = gmu->mmio + 0x23000; | |
1615 | } | |
1616 | ||
4b565ca5 JC |
1617 | /* Get the HFI and GMU interrupts */ |
1618 | gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); | |
1619 | gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); | |
1620 | ||
1621 | if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) | |
a62fb211 | 1622 | goto err_mmio; |
4b565ca5 | 1623 | |
9325d426 JC |
1624 | /* |
1625 | * Get a link to the GX power domain to reset the GPU in case of GMU | |
1626 | * crash | |
1627 | */ | |
1628 | gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); | |
1629 | ||
4b565ca5 JC |
1630 | /* Get the power levels for the GMU and GPU */ |
1631 | a6xx_gmu_pwrlevels_probe(gmu); | |
1632 | ||
1633 | /* Set up the HFI queues */ | |
1634 | a6xx_hfi_init(gmu); | |
1635 | ||
606ec90f SP |
1636 | gmu->initialized = true; |
1637 | ||
4b565ca5 | 1638 | return 0; |
a62fb211 SP |
1639 | |
1640 | err_mmio: | |
1641 | iounmap(gmu->mmio); | |
02ef80c5 JM |
1642 | if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) |
1643 | iounmap(gmu->rscc); | |
a62fb211 SP |
1644 | free_irq(gmu->gmu_irq, gmu); |
1645 | free_irq(gmu->hfi_irq, gmu); | |
4b565ca5 | 1646 | |
998efc74 | 1647 | ret = -ENODEV; |
4b565ca5 | 1648 | |
29ac8979 JM |
1649 | err_memory: |
1650 | a6xx_gmu_memory_free(gmu); | |
998efc74 SP |
1651 | err_put_device: |
1652 | /* Drop reference taken in of_find_device_by_node */ | |
1653 | put_device(gmu->dev); | |
1654 | ||
1655 | return ret; | |
4b565ca5 | 1656 | } |