1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2014 The Linux Foundation. All rights reserved.
5 #ifdef CONFIG_MSM_OCMEM
6 # include <soc/qcom/ocmem.h>
9 #define A4XX_INT0_MASK \
10 (A4XX_INT0_RBBM_AHB_ERROR | \
11 A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
12 A4XX_INT0_CP_T0_PACKET_IN_IB | \
13 A4XX_INT0_CP_OPCODE_ERROR | \
14 A4XX_INT0_CP_RESERVED_BIT_ERROR | \
15 A4XX_INT0_CP_HW_FAULT | \
16 A4XX_INT0_CP_IB1_INT | \
17 A4XX_INT0_CP_IB2_INT | \
18 A4XX_INT0_CP_RB_INT | \
19 A4XX_INT0_CP_REG_PROTECT_FAULT | \
20 A4XX_INT0_CP_AHB_ERROR_HALT | \
21 A4XX_INT0_CACHE_FLUSH_TS | \
22 A4XX_INT0_UCHE_OOB_ACCESS)
24 extern bool hang_debug;
25 static void a4xx_dump(struct msm_gpu *gpu);
26 static bool a4xx_idle(struct msm_gpu *gpu);
29 * a4xx_enable_hwcg() - Program the clock control registers
30 * @device: The adreno device pointer
32 static void a4xx_enable_hwcg(struct msm_gpu *gpu)
34 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
36 for (i = 0; i < 4; i++)
37 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
38 for (i = 0; i < 4; i++)
39 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
40 for (i = 0; i < 4; i++)
41 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
42 for (i = 0; i < 4; i++)
43 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
44 for (i = 0; i < 4; i++)
45 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
46 for (i = 0; i < 4; i++)
47 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
48 for (i = 0; i < 4; i++)
49 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
50 for (i = 0; i < 4; i++)
51 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
52 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
53 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
54 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
55 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
56 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
57 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
58 for (i = 0; i < 4; i++)
59 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
61 /* Disable L1 clocking in A420 due to CCU issues with it */
62 for (i = 0; i < 4; i++) {
63 if (adreno_is_a420(adreno_gpu)) {
64 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
67 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
72 for (i = 0; i < 4; i++) {
73 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
77 for (i = 0; i < 4; i++) {
78 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
82 for (i = 0; i < 4; i++) {
83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
87 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
88 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
89 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
90 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
91 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
92 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
93 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
94 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
95 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
96 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
97 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
98 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
99 /* Early A430's have a timing issue with SP/TP power collapse;
100 disabling HW clock gating prevents it. */
101 if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
102 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
104 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
105 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
109 static bool a4xx_me_init(struct msm_gpu *gpu)
111 struct msm_ringbuffer *ring = gpu->rb[0];
113 OUT_PKT3(ring, CP_ME_INIT, 17);
114 OUT_RING(ring, 0x000003f7);
115 OUT_RING(ring, 0x00000000);
116 OUT_RING(ring, 0x00000000);
117 OUT_RING(ring, 0x00000000);
118 OUT_RING(ring, 0x00000080);
119 OUT_RING(ring, 0x00000100);
120 OUT_RING(ring, 0x00000180);
121 OUT_RING(ring, 0x00006600);
122 OUT_RING(ring, 0x00000150);
123 OUT_RING(ring, 0x0000014e);
124 OUT_RING(ring, 0x00000154);
125 OUT_RING(ring, 0x00000001);
126 OUT_RING(ring, 0x00000000);
127 OUT_RING(ring, 0x00000000);
128 OUT_RING(ring, 0x00000000);
129 OUT_RING(ring, 0x00000000);
130 OUT_RING(ring, 0x00000000);
132 gpu->funcs->flush(gpu, ring);
133 return a4xx_idle(gpu);
136 static int a4xx_hw_init(struct msm_gpu *gpu)
138 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
139 struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
143 if (adreno_is_a420(adreno_gpu)) {
144 gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
145 gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
146 gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
147 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
148 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
149 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
150 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
151 gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
152 } else if (adreno_is_a430(adreno_gpu)) {
153 gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
154 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
155 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
156 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
157 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
158 gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
163 /* Make all blocks contribute to the GPU BUSY perf counter */
164 gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
166 /* Tune the hystersis counters for SP and CP idle detection */
167 gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
168 gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
170 if (adreno_is_a430(adreno_gpu)) {
171 gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
174 /* Enable the RBBM error reporting bits */
175 gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
177 /* Enable AHB error reporting*/
178 gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
180 /* Enable power counters*/
181 gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
184 * Turn on hang detection - this spews a lot of useful information
185 * into the RBBM registers on a hang:
187 gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
190 gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
191 (unsigned int)(a4xx_gpu->ocmem_base >> 14));
193 /* Turn on performance counters: */
194 gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
196 /* use the first CP counter for timestamp queries.. userspace may set
197 * this as well but it selects the same counter/countable:
199 gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
201 if (adreno_is_a430(adreno_gpu))
202 gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
204 /* Disable L2 bypass to avoid UCHE out of bounds errors */
205 gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
206 gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
208 gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
209 (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
211 /* On A430 enable SP regfile sleep for power savings */
212 /* TODO downstream does this for !420, so maybe applies for 405 too? */
213 if (!adreno_is_a420(adreno_gpu)) {
214 gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
216 gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
220 a4xx_enable_hwcg(gpu);
223 * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
224 * due to timing issue with HLSQ_TP_CLK_EN
226 if (adreno_is_a420(adreno_gpu)) {
228 val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
229 val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
230 val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
231 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
234 /* setup access protection: */
235 gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
238 gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
239 gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
240 gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
241 gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
242 gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
243 gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
246 gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
247 gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
251 gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
254 gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
257 gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
260 gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
262 gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
264 ret = adreno_hw_init(gpu);
269 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
270 len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
271 DBG("loading PM4 ucode version: %u", ptr[0]);
272 gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
273 for (i = 1; i < len; i++)
274 gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
277 ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
278 len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
279 DBG("loading PFP ucode version: %u", ptr[0]);
281 gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
282 for (i = 1; i < len; i++)
283 gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
285 /* clear ME_HALT to start micro engine */
286 gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
288 return a4xx_me_init(gpu) ? 0 : -EINVAL;
291 static void a4xx_recover(struct msm_gpu *gpu)
295 adreno_dump_info(gpu);
297 for (i = 0; i < 8; i++) {
298 printk("CP_SCRATCH_REG%d: %u\n", i,
299 gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
302 /* dump registers before resetting gpu, if enabled: */
306 gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
307 gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
308 gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
312 static void a4xx_destroy(struct msm_gpu *gpu)
314 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
315 struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
317 DBG("%s", gpu->name);
319 adreno_gpu_cleanup(adreno_gpu);
321 #ifdef CONFIG_MSM_OCMEM
322 if (a4xx_gpu->ocmem_base)
323 ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
329 static bool a4xx_idle(struct msm_gpu *gpu)
331 /* wait for ringbuffer to drain: */
332 if (!adreno_idle(gpu, gpu->rb[0]))
335 /* then wait for GPU to finish: */
336 if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
337 A4XX_RBBM_STATUS_GPU_BUSY))) {
338 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
339 /* TODO maybe we need to reset GPU here to recover from hang? */
346 static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
350 status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
351 DBG("%s: Int status %08x", gpu->name, status);
353 if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
354 uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
355 printk("CP | Protected mode error| %s | addr=%x\n",
356 reg & (1 << 24) ? "WRITE" : "READ",
357 (reg & 0xFFFFF) >> 2);
360 gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
367 static const unsigned int a4xx_registers[] = {
369 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
370 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
371 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
373 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
376 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
378 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
380 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
382 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
386 0x0E60, 0x0E61, 0x0E63, 0x0E68,
388 0x0E80, 0x0E84, 0x0E88, 0x0E95,
390 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
391 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
392 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
393 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
396 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
398 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
400 0x2200, 0x2204, 0x2208, 0x22A9,
402 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
404 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
406 0x2600, 0x2604, 0x2608, 0x26A9,
408 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
409 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
410 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
412 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
413 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
414 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
415 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
416 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
417 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
418 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
419 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
420 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
421 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
422 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
423 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
424 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
425 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
426 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
427 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
428 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
429 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
430 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
431 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
432 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
433 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
434 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
435 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
436 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
437 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
438 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
439 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
440 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
441 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
442 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
443 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
444 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
445 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
449 static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
451 struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
454 return ERR_PTR(-ENOMEM);
456 adreno_gpu_state_get(gpu, state);
458 state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
463 /* Register offset defines for A4XX, in order of enum adreno_regs */
464 static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
465 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
466 REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
467 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
468 REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
469 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
470 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
471 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
474 static void a4xx_dump(struct msm_gpu *gpu)
476 printk("status: %08x\n",
477 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
481 static int a4xx_pm_resume(struct msm_gpu *gpu) {
482 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
485 ret = msm_gpu_pm_resume(gpu);
489 if (adreno_is_a430(adreno_gpu)) {
491 /* Set the default register values; set SW_COLLAPSE to 0 */
492 gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
495 reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
496 } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
501 static int a4xx_pm_suspend(struct msm_gpu *gpu) {
502 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
505 ret = msm_gpu_pm_suspend(gpu);
509 if (adreno_is_a430(adreno_gpu)) {
510 /* Set the default register values; set SW_COLLAPSE to 1 */
511 gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
516 static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
518 *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
519 REG_A4XX_RBBM_PERFCTR_CP_0_HI);
524 static const struct adreno_gpu_funcs funcs = {
526 .get_param = adreno_get_param,
527 .hw_init = a4xx_hw_init,
528 .pm_suspend = a4xx_pm_suspend,
529 .pm_resume = a4xx_pm_resume,
530 .recover = a4xx_recover,
531 .submit = adreno_submit,
532 .flush = adreno_flush,
533 .active_ring = adreno_active_ring,
535 .destroy = a4xx_destroy,
536 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
539 .gpu_state_get = a4xx_gpu_state_get,
540 .gpu_state_put = adreno_gpu_state_put,
542 .get_timestamp = a4xx_get_timestamp,
545 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
547 struct a4xx_gpu *a4xx_gpu = NULL;
548 struct adreno_gpu *adreno_gpu;
550 struct msm_drm_private *priv = dev->dev_private;
551 struct platform_device *pdev = priv->gpu_pdev;
555 DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
560 a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
566 adreno_gpu = &a4xx_gpu->base;
567 gpu = &adreno_gpu->base;
569 gpu->perfcntrs = NULL;
570 gpu->num_perfcntrs = 0;
572 adreno_gpu->registers = a4xx_registers;
573 adreno_gpu->reg_offsets = a4xx_register_offsets;
575 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
579 /* if needed, allocate gmem: */
580 if (adreno_is_a4xx(adreno_gpu)) {
581 #ifdef CONFIG_MSM_OCMEM
582 /* TODO this is different/missing upstream: */
583 struct ocmem_buf *ocmem_hdl =
584 ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
586 a4xx_gpu->ocmem_hdl = ocmem_hdl;
587 a4xx_gpu->ocmem_base = ocmem_hdl->addr;
588 adreno_gpu->gmem = ocmem_hdl->len;
589 DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
590 a4xx_gpu->ocmem_base);
595 /* TODO we think it is possible to configure the GPU to
596 * restrict access to VRAM carveout. But the required
597 * registers are unknown. For now just bail out and
598 * limp along with just modesetting. If it turns out
599 * to not be possible to restrict access, then we must
600 * implement a cmdstream validator.
602 DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
611 a4xx_destroy(&a4xx_gpu->base.base);