1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
5 #include <linux/pm_opp.h>
6 #include <soc/qcom/cmd-db.h>
9 #include "a6xx_gmu.xml.h"
11 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
13 struct a6xx_gmu *gmu = data;
16 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
17 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
19 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
20 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
22 /* Temporary until we can recover safely */
26 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
27 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
29 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
30 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
31 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
36 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
38 struct a6xx_gmu *gmu = data;
41 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
42 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
44 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
45 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
47 /* Temporary until we can recover safely */
54 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
58 /* This can be called from gpu state code so make sure GMU is valid */
59 if (IS_ERR_OR_NULL(gmu->mmio))
62 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
65 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
66 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
69 /* Check to see if the GX rail is still powered */
70 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
74 /* This can be called from gpu state code so make sure GMU is valid */
75 if (IS_ERR_OR_NULL(gmu->mmio))
78 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
81 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
82 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
85 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
89 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
91 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
92 ((3 & 0xf) << 28) | index);
95 * Send an invalid index as a vote for the bus bandwidth and let the
96 * firmware decide on the right vote
98 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
100 /* Set and clear the OOB for DCVS to trigger the GMU */
101 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
102 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
104 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
106 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
108 gmu->freq = gmu->gpu_freqs[index];
111 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
113 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
114 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
115 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
118 if (freq == gmu->freq)
121 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
122 if (freq == gmu->gpu_freqs[perf_index])
125 __a6xx_gmu_set_freq(gmu, perf_index);
128 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
130 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
131 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
132 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
137 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
140 int local = gmu->idle_level;
142 /* SPTP and IFPC both report as IFPC */
143 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
144 local = GMU_IDLE_STATE_IFPC;
146 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
149 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
150 !a6xx_gmu_gx_is_on(gmu))
157 /* Wait for the GMU to get to its most idle state */
158 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
160 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
162 return spin_until(a6xx_gmu_check_idle_level(gmu));
165 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
170 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
171 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
173 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
174 val == 0xbabeface, 100, 10000);
177 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
182 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
187 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
189 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
190 val & 1, 100, 10000);
192 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
197 /* Trigger a OOB (out of band) request to the GMU */
198 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
206 case GMU_OOB_GPU_SET:
207 request = GMU_OOB_GPU_SET_REQUEST;
208 ack = GMU_OOB_GPU_SET_ACK;
211 case GMU_OOB_BOOT_SLUMBER:
212 request = GMU_OOB_BOOT_SLUMBER_REQUEST;
213 ack = GMU_OOB_BOOT_SLUMBER_ACK;
214 name = "BOOT_SLUMBER";
216 case GMU_OOB_DCVS_SET:
217 request = GMU_OOB_DCVS_REQUEST;
218 ack = GMU_OOB_DCVS_ACK;
225 /* Trigger the equested OOB operation */
226 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
228 /* Wait for the acknowledge interrupt */
229 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
230 val & (1 << ack), 100, 10000);
233 DRM_DEV_ERROR(gmu->dev,
234 "Timeout waiting for GMU OOB set %s: 0x%x\n",
236 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
238 /* Clear the acknowledge interrupt */
239 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
244 /* Clear a pending OOB state in the GMU */
245 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
248 case GMU_OOB_GPU_SET:
249 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
250 1 << GMU_OOB_GPU_SET_CLEAR);
252 case GMU_OOB_BOOT_SLUMBER:
253 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
254 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
256 case GMU_OOB_DCVS_SET:
257 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
258 1 << GMU_OOB_DCVS_CLEAR);
263 /* Enable CPU control of SPTP power power collapse */
264 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
269 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
271 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
272 (val & 0x38) == 0x28, 1, 100);
275 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
276 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
282 /* Disable CPU control of SPTP power power collapse */
283 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
288 /* Make sure retention is on */
289 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
291 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
293 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
294 (val & 0x04), 100, 10000);
297 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
298 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
301 /* Let the GMU know we are starting a boot sequence */
302 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
306 /* Let the GMU know we are getting ready for boot */
307 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
309 /* Choose the "default" power level as the highest available */
310 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
312 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
313 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
315 /* Let the GMU know the boot sequence has started */
316 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
319 /* Let the GMU know that we are about to go into slumber */
320 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
324 /* Disable the power counter so the GMU isn't busy */
325 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
327 /* Disable SPTP_PC if the CPU is responsible for it */
328 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
329 a6xx_sptprac_disable(gmu);
331 /* Tell the GMU to get ready to slumber */
332 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
334 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
335 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
338 /* Check to see if the GMU really did slumber */
339 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
341 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
346 /* Put fence into allow mode */
347 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
351 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
356 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
357 /* Wait for the register to finish posting */
360 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
361 val & (1 << 1), 100, 10000);
363 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
367 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
371 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
375 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
377 /* Set up CX GMU counter 0 to count busy ticks */
378 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
379 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
381 /* Enable the power counter */
382 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
386 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
391 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
393 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
394 val, val & (1 << 16), 100, 10000);
396 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
398 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
401 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
403 return msm_writel(value, ptr + (offset << 2));
406 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
409 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
411 struct platform_device *pdev = to_platform_device(gmu->dev);
412 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
413 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
415 if (!pdcptr || !seqptr)
418 /* Disable SDE clock gating */
419 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
421 /* Setup RSC PDC handshake for sleep and wakeup */
422 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
423 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
424 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
425 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
426 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
427 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
428 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
429 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
430 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
431 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
432 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
434 /* Load RSC sequencer uCode for sleep and wakeup */
435 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
436 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
437 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
438 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
439 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
441 /* Load PDC sequencer uCode for power up and power down sequence */
442 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
443 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
444 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
445 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
446 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
448 /* Set TCS commands used by PDC sequence for low power modes */
449 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
450 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
451 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
452 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
453 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
454 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
455 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
456 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
457 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
458 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
459 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
460 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
461 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
462 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
463 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
464 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
465 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
466 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
467 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
468 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
469 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
470 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
471 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
472 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
475 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
476 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
478 /* ensure no writes happen before the uCode is fully written */
482 devm_iounmap(gmu->dev, pdcptr);
483 devm_iounmap(gmu->dev, seqptr);
487 * The lowest 16 bits of this value are the number of XO clock cycles for main
488 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
489 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
492 #define GMU_PWR_COL_HYST 0x000a1680
494 /* Set up the idle state for the GMU */
495 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
497 /* Disable GMU WB/RB buffer */
498 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
500 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
502 switch (gmu->idle_level) {
503 case GMU_IDLE_STATE_IFPC:
504 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
506 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
507 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
508 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
510 case GMU_IDLE_STATE_SPTP:
511 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
513 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
514 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
515 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
518 /* Enable RPMh GPU client */
519 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
520 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
521 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
522 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
523 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
524 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
525 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
528 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
530 static bool rpmh_init;
531 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
532 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
537 if (state == GMU_WARM_BOOT) {
538 ret = a6xx_rpmh_start(gmu);
542 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
543 "GMU firmware is not loaded\n"))
546 /* Sanity check the size of the firmware that was loaded */
547 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
548 DRM_DEV_ERROR(gmu->dev,
549 "GMU firmware is bigger than the available region\n");
553 /* Turn on register retention */
554 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
556 /* We only need to load the RPMh microcode once */
558 a6xx_gmu_rpmh_init(gmu);
560 } else if (state != GMU_RESET) {
561 ret = a6xx_rpmh_start(gmu);
566 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
568 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
569 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
573 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
574 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
576 /* Write the iova of the HFI table */
577 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
578 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
580 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
581 (1 << 31) | (0xa << 18) | (0xa0));
583 chipid = adreno_gpu->rev.core << 24;
584 chipid |= adreno_gpu->rev.major << 16;
585 chipid |= adreno_gpu->rev.minor << 12;
586 chipid |= adreno_gpu->rev.patchid << 8;
588 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
590 /* Set up the lowest idle level on the GMU */
591 a6xx_gmu_power_config(gmu);
593 ret = a6xx_gmu_start(gmu);
597 ret = a6xx_gmu_gfx_rail_on(gmu);
601 /* Enable SPTP_PC if the CPU is responsible for it */
602 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
603 ret = a6xx_sptprac_enable(gmu);
608 ret = a6xx_gmu_hfi_start(gmu);
612 /* FIXME: Do we need this wmb() here? */
618 #define A6XX_HFI_IRQ_MASK \
619 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
621 #define A6XX_GMU_IRQ_MASK \
622 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
623 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
624 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
626 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
628 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
629 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
631 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
633 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
636 enable_irq(gmu->gmu_irq);
637 enable_irq(gmu->hfi_irq);
640 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
642 disable_irq(gmu->gmu_irq);
643 disable_irq(gmu->hfi_irq);
645 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
646 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
649 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
651 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
655 /* Flush all the queues */
658 /* Stop the interrupts */
659 a6xx_gmu_irq_disable(gmu);
661 /* Force off SPTP in case the GMU is managing it */
662 a6xx_sptprac_disable(gmu);
664 /* Make sure there are no outstanding RPMh votes */
665 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
666 (val & 1), 100, 10000);
667 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
668 (val & 1), 100, 10000);
669 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
670 (val & 1), 100, 10000);
671 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
672 (val & 1), 100, 1000);
674 /* Force off the GX GSDC */
675 regulator_force_disable(gmu->gx);
677 /* Disable the resources */
678 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
679 pm_runtime_put_sync(gmu->dev);
681 /* Re-enable the resources */
682 pm_runtime_get_sync(gmu->dev);
684 /* Use a known rate to bring up the GMU */
685 clk_set_rate(gmu->core_clk, 200000000);
686 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
690 a6xx_gmu_irq_enable(gmu);
692 ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
694 ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
696 /* Set the GPU back to the highest power frequency */
697 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
701 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
706 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
708 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
711 if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
714 /* Turn on the resources */
715 pm_runtime_get_sync(gmu->dev);
717 /* Use a known rate to bring up the GMU */
718 clk_set_rate(gmu->core_clk, 200000000);
719 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
723 a6xx_gmu_irq_enable(gmu);
725 /* Check to see if we are doing a cold or warm boot */
726 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
727 GMU_WARM_BOOT : GMU_COLD_BOOT;
729 ret = a6xx_gmu_fw_start(gmu, status);
733 ret = a6xx_hfi_start(gmu, status);
735 /* Set the GPU to the highest power frequency */
736 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
739 /* Make sure to turn off the boot OOB request on error */
741 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
746 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
753 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
755 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
761 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
763 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
767 * The GMU may still be in slumber unless the GPU started so check and
768 * skip putting it back into slumber if so
770 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
773 int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
775 /* Temporary until we can recover safely */
778 /* tell the GMU we want to slumber */
779 a6xx_gmu_notify_slumber(gmu);
781 ret = gmu_poll_timeout(gmu,
782 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
783 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
787 * Let the user know we failed to slumber but don't worry too
788 * much because we are powering down anyway
792 DRM_DEV_ERROR(gmu->dev,
793 "Unable to slumber GMU: status = 0%x/0%x\n",
795 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
797 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
803 /* Stop the interrupts and mask the hardware */
804 a6xx_gmu_irq_disable(gmu);
806 /* Tell RPMh to power off the GPU */
809 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
811 pm_runtime_put_sync(gmu->dev);
816 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
821 if (IS_ERR_OR_NULL(bo))
824 count = bo->size >> PAGE_SHIFT;
827 for (i = 0; i < count; i++, iova += PAGE_SIZE) {
828 iommu_unmap(gmu->domain, iova, PAGE_SIZE);
829 __free_pages(bo->pages[i], 0);
836 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
839 struct a6xx_gmu_bo *bo;
842 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
844 return ERR_PTR(-ENOMEM);
846 bo->size = PAGE_ALIGN(size);
848 count = bo->size >> PAGE_SHIFT;
850 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
853 return ERR_PTR(-ENOMEM);
856 for (i = 0; i < count; i++) {
857 bo->pages[i] = alloc_page(GFP_KERNEL);
862 bo->iova = gmu->uncached_iova_base;
864 for (i = 0; i < count; i++) {
865 ret = iommu_map(gmu->domain,
866 bo->iova + (PAGE_SIZE * i),
867 page_to_phys(bo->pages[i]), PAGE_SIZE,
868 IOMMU_READ | IOMMU_WRITE);
871 DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
873 for (i = i - 1 ; i >= 0; i--)
874 iommu_unmap(gmu->domain,
875 bo->iova + (PAGE_SIZE * i),
882 bo->virt = vmap(bo->pages, count, VM_IOREMAP,
883 pgprot_writecombine(PAGE_KERNEL));
887 /* Align future IOVA addresses on 1MB boundaries */
888 gmu->uncached_iova_base += ALIGN(size, SZ_1M);
893 for (i = 0; i < count; i++) {
895 __free_pages(bo->pages[i], 0);
901 return ERR_PTR(-ENOMEM);
904 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
909 * The GMU address space is hardcoded to treat the range
910 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
911 * between the GMU and the CPU will live in this space
913 gmu->uncached_iova_base = 0x60000000;
916 gmu->domain = iommu_domain_alloc(&platform_bus_type);
920 ret = iommu_attach_device(gmu->domain, gmu->dev);
923 iommu_domain_free(gmu->domain);
930 /* Return the 'arc-level' for the given frequency */
931 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
933 struct dev_pm_opp *opp;
934 struct device_node *np;
940 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
944 np = dev_pm_opp_get_of_node(opp);
947 of_property_read_u32(np, "opp-level", &val);
956 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
957 unsigned long *freqs, int freqs_count, const char *id)
960 const u16 *pri, *sec;
961 size_t pri_count, sec_count;
963 pri = cmd_db_read_aux_data(id, &pri_count);
967 * The data comes back as an array of unsigned shorts so adjust the
974 sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
982 /* Construct a vote for each frequency */
983 for (i = 0; i < freqs_count; i++) {
984 u8 pindex = 0, sindex = 0;
985 u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
987 /* Get the primary index that matches the arc level */
988 for (j = 0; j < pri_count; j++) {
989 if (pri[j] >= level) {
995 if (j == pri_count) {
997 "Level %u not found in in the RPMh list\n",
999 DRM_DEV_ERROR(dev, "Available levels:\n");
1000 for (j = 0; j < pri_count; j++)
1001 DRM_DEV_ERROR(dev, " %u\n", pri[j]);
1007 * Look for a level in in the secondary list that matches. If
1008 * nothing fits, use the maximum non zero vote
1011 for (j = 0; j < sec_count; j++) {
1012 if (sec[j] >= level) {
1015 } else if (sec[j]) {
1020 /* Construct the vote */
1021 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1022 (sindex << 8) | pindex;
1029 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1030 * to construct the list of votes on the CPU and send it over. Query the RPMh
1031 * voltage levels and build the votes
1034 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1036 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1037 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1038 struct msm_gpu *gpu = &adreno_gpu->base;
1041 /* Build the GX votes */
1042 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1043 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1045 /* Build the CX votes */
1046 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1047 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1052 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1055 int count = dev_pm_opp_get_opp_count(dev);
1056 struct dev_pm_opp *opp;
1058 unsigned long freq = 1;
1061 * The OPP table doesn't contain the "off" frequency level so we need to
1062 * add 1 to the table size to account for it
1065 if (WARN(count + 1 > size,
1066 "The GMU frequency table is being truncated\n"))
1069 /* Set the "off" frequency */
1072 for (i = 0; i < count; i++) {
1073 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1077 dev_pm_opp_put(opp);
1078 freqs[index++] = freq++;
1084 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1086 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1087 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1088 struct msm_gpu *gpu = &adreno_gpu->base;
1093 * The GMU handles its own frequency switching so build a list of
1094 * available frequencies to send during initialization
1096 ret = dev_pm_opp_of_add_table(gmu->dev);
1098 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1102 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1103 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1106 * The GMU also handles GPU frequency switching so build a list
1107 * from the GPU OPP table
1109 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1110 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1112 /* Build the list of RPMh votes that we'll send to the GMU */
1113 return a6xx_gmu_rpmh_votes_init(gmu);
1116 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1118 int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
1123 gmu->nr_clocks = ret;
1125 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1126 gmu->nr_clocks, "gmu");
1131 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1135 struct resource *res = platform_get_resource_byname(pdev,
1136 IORESOURCE_MEM, name);
1139 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1140 return ERR_PTR(-EINVAL);
1143 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1145 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1146 return ERR_PTR(-EINVAL);
1152 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1153 const char *name, irq_handler_t handler)
1157 irq = platform_get_irq_byname(pdev, name);
1159 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
1162 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
1171 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1173 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1175 if (IS_ERR_OR_NULL(gmu->mmio))
1178 pm_runtime_disable(gmu->dev);
1179 a6xx_gmu_stop(a6xx_gpu);
1181 a6xx_gmu_irq_disable(gmu);
1182 a6xx_gmu_memory_free(gmu, gmu->hfi);
1184 iommu_detach_device(gmu->domain, gmu->dev);
1186 iommu_domain_free(gmu->domain);
1189 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1191 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1192 struct platform_device *pdev = of_find_device_by_node(node);
1198 gmu->dev = &pdev->dev;
1200 of_dma_configure(gmu->dev, node, true);
1202 /* Fow now, don't do anything fancy until we get our feet under us */
1203 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1205 pm_runtime_enable(gmu->dev);
1206 gmu->gx = devm_regulator_get(gmu->dev, "vdd");
1208 /* Get the list of clocks */
1209 ret = a6xx_gmu_clocks_probe(gmu);
1213 /* Set up the IOMMU context bank */
1214 ret = a6xx_gmu_memory_probe(gmu);
1218 /* Allocate memory for for the HFI queues */
1219 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1220 if (IS_ERR(gmu->hfi))
1223 /* Allocate memory for the GMU debug region */
1224 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1225 if (IS_ERR(gmu->debug))
1228 /* Map the GMU registers */
1229 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1230 if (IS_ERR(gmu->mmio))
1233 /* Get the HFI and GMU interrupts */
1234 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1235 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1237 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1240 /* Get the power levels for the GMU and GPU */
1241 a6xx_gmu_pwrlevels_probe(gmu);
1243 /* Set up the HFI queues */
1248 a6xx_gmu_memory_free(gmu, gmu->hfi);
1251 iommu_detach_device(gmu->domain, gmu->dev);
1253 iommu_domain_free(gmu->domain);