Commit | Line | Data |
---|---|---|
43b3cd99 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
0f0de06c AD |
24 | #include <linux/firmware.h> |
25 | #include <linux/platform_device.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/module.h> | |
760285e7 | 28 | #include <drm/drmP.h> |
43b3cd99 AD |
29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | |
760285e7 | 31 | #include <drm/radeon_drm.h> |
43b3cd99 AD |
32 | #include "sid.h" |
33 | #include "atom.h" | |
48c0c902 | 34 | #include "si_blit_shaders.h" |
43b3cd99 | 35 | |
0f0de06c AD |
36 | #define SI_PFP_UCODE_SIZE 2144 |
37 | #define SI_PM4_UCODE_SIZE 2144 | |
38 | #define SI_CE_UCODE_SIZE 2144 | |
39 | #define SI_RLC_UCODE_SIZE 2048 | |
40 | #define SI_MC_UCODE_SIZE 7769 | |
41 | ||
42 | MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); | |
43 | MODULE_FIRMWARE("radeon/TAHITI_me.bin"); | |
44 | MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); | |
45 | MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); | |
46 | MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); | |
47 | MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); | |
48 | MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); | |
49 | MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); | |
50 | MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); | |
51 | MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); | |
52 | MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); | |
53 | MODULE_FIRMWARE("radeon/VERDE_me.bin"); | |
54 | MODULE_FIRMWARE("radeon/VERDE_ce.bin"); | |
55 | MODULE_FIRMWARE("radeon/VERDE_mc.bin"); | |
56 | MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); | |
57 | ||
25a857fb AD |
58 | extern int r600_ih_ring_alloc(struct radeon_device *rdev); |
59 | extern void r600_ih_ring_fini(struct radeon_device *rdev); | |
0a96d72b | 60 | extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); |
c476dde2 AD |
61 | extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); |
62 | extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); | |
ca7db22b | 63 | extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); |
1c534671 | 64 | extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); |
0a96d72b | 65 | |
1bd47d2e AD |
66 | /* get temperature in millidegrees */ |
67 | int si_get_temp(struct radeon_device *rdev) | |
68 | { | |
69 | u32 temp; | |
70 | int actual_temp = 0; | |
71 | ||
72 | temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> | |
73 | CTF_TEMP_SHIFT; | |
74 | ||
75 | if (temp & 0x200) | |
76 | actual_temp = 255; | |
77 | else | |
78 | actual_temp = temp & 0x1ff; | |
79 | ||
80 | actual_temp = (actual_temp * 1000); | |
81 | ||
82 | return actual_temp; | |
83 | } | |
84 | ||
8b074dd6 AD |
85 | #define TAHITI_IO_MC_REGS_SIZE 36 |
86 | ||
87 | static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { | |
88 | {0x0000006f, 0x03044000}, | |
89 | {0x00000070, 0x0480c018}, | |
90 | {0x00000071, 0x00000040}, | |
91 | {0x00000072, 0x01000000}, | |
92 | {0x00000074, 0x000000ff}, | |
93 | {0x00000075, 0x00143400}, | |
94 | {0x00000076, 0x08ec0800}, | |
95 | {0x00000077, 0x040000cc}, | |
96 | {0x00000079, 0x00000000}, | |
97 | {0x0000007a, 0x21000409}, | |
98 | {0x0000007c, 0x00000000}, | |
99 | {0x0000007d, 0xe8000000}, | |
100 | {0x0000007e, 0x044408a8}, | |
101 | {0x0000007f, 0x00000003}, | |
102 | {0x00000080, 0x00000000}, | |
103 | {0x00000081, 0x01000000}, | |
104 | {0x00000082, 0x02000000}, | |
105 | {0x00000083, 0x00000000}, | |
106 | {0x00000084, 0xe3f3e4f4}, | |
107 | {0x00000085, 0x00052024}, | |
108 | {0x00000087, 0x00000000}, | |
109 | {0x00000088, 0x66036603}, | |
110 | {0x00000089, 0x01000000}, | |
111 | {0x0000008b, 0x1c0a0000}, | |
112 | {0x0000008c, 0xff010000}, | |
113 | {0x0000008e, 0xffffefff}, | |
114 | {0x0000008f, 0xfff3efff}, | |
115 | {0x00000090, 0xfff3efbf}, | |
116 | {0x00000094, 0x00101101}, | |
117 | {0x00000095, 0x00000fff}, | |
118 | {0x00000096, 0x00116fff}, | |
119 | {0x00000097, 0x60010000}, | |
120 | {0x00000098, 0x10010000}, | |
121 | {0x00000099, 0x00006000}, | |
122 | {0x0000009a, 0x00001000}, | |
123 | {0x0000009f, 0x00a77400} | |
124 | }; | |
125 | ||
126 | static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { | |
127 | {0x0000006f, 0x03044000}, | |
128 | {0x00000070, 0x0480c018}, | |
129 | {0x00000071, 0x00000040}, | |
130 | {0x00000072, 0x01000000}, | |
131 | {0x00000074, 0x000000ff}, | |
132 | {0x00000075, 0x00143400}, | |
133 | {0x00000076, 0x08ec0800}, | |
134 | {0x00000077, 0x040000cc}, | |
135 | {0x00000079, 0x00000000}, | |
136 | {0x0000007a, 0x21000409}, | |
137 | {0x0000007c, 0x00000000}, | |
138 | {0x0000007d, 0xe8000000}, | |
139 | {0x0000007e, 0x044408a8}, | |
140 | {0x0000007f, 0x00000003}, | |
141 | {0x00000080, 0x00000000}, | |
142 | {0x00000081, 0x01000000}, | |
143 | {0x00000082, 0x02000000}, | |
144 | {0x00000083, 0x00000000}, | |
145 | {0x00000084, 0xe3f3e4f4}, | |
146 | {0x00000085, 0x00052024}, | |
147 | {0x00000087, 0x00000000}, | |
148 | {0x00000088, 0x66036603}, | |
149 | {0x00000089, 0x01000000}, | |
150 | {0x0000008b, 0x1c0a0000}, | |
151 | {0x0000008c, 0xff010000}, | |
152 | {0x0000008e, 0xffffefff}, | |
153 | {0x0000008f, 0xfff3efff}, | |
154 | {0x00000090, 0xfff3efbf}, | |
155 | {0x00000094, 0x00101101}, | |
156 | {0x00000095, 0x00000fff}, | |
157 | {0x00000096, 0x00116fff}, | |
158 | {0x00000097, 0x60010000}, | |
159 | {0x00000098, 0x10010000}, | |
160 | {0x00000099, 0x00006000}, | |
161 | {0x0000009a, 0x00001000}, | |
162 | {0x0000009f, 0x00a47400} | |
163 | }; | |
164 | ||
165 | static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { | |
166 | {0x0000006f, 0x03044000}, | |
167 | {0x00000070, 0x0480c018}, | |
168 | {0x00000071, 0x00000040}, | |
169 | {0x00000072, 0x01000000}, | |
170 | {0x00000074, 0x000000ff}, | |
171 | {0x00000075, 0x00143400}, | |
172 | {0x00000076, 0x08ec0800}, | |
173 | {0x00000077, 0x040000cc}, | |
174 | {0x00000079, 0x00000000}, | |
175 | {0x0000007a, 0x21000409}, | |
176 | {0x0000007c, 0x00000000}, | |
177 | {0x0000007d, 0xe8000000}, | |
178 | {0x0000007e, 0x044408a8}, | |
179 | {0x0000007f, 0x00000003}, | |
180 | {0x00000080, 0x00000000}, | |
181 | {0x00000081, 0x01000000}, | |
182 | {0x00000082, 0x02000000}, | |
183 | {0x00000083, 0x00000000}, | |
184 | {0x00000084, 0xe3f3e4f4}, | |
185 | {0x00000085, 0x00052024}, | |
186 | {0x00000087, 0x00000000}, | |
187 | {0x00000088, 0x66036603}, | |
188 | {0x00000089, 0x01000000}, | |
189 | {0x0000008b, 0x1c0a0000}, | |
190 | {0x0000008c, 0xff010000}, | |
191 | {0x0000008e, 0xffffefff}, | |
192 | {0x0000008f, 0xfff3efff}, | |
193 | {0x00000090, 0xfff3efbf}, | |
194 | {0x00000094, 0x00101101}, | |
195 | {0x00000095, 0x00000fff}, | |
196 | {0x00000096, 0x00116fff}, | |
197 | {0x00000097, 0x60010000}, | |
198 | {0x00000098, 0x10010000}, | |
199 | {0x00000099, 0x00006000}, | |
200 | {0x0000009a, 0x00001000}, | |
201 | {0x0000009f, 0x00a37400} | |
202 | }; | |
203 | ||
204 | /* ucode loading */ | |
205 | static int si_mc_load_microcode(struct radeon_device *rdev) | |
206 | { | |
207 | const __be32 *fw_data; | |
208 | u32 running, blackout = 0; | |
209 | u32 *io_mc_regs; | |
210 | int i, ucode_size, regs_size; | |
211 | ||
212 | if (!rdev->mc_fw) | |
213 | return -EINVAL; | |
214 | ||
215 | switch (rdev->family) { | |
216 | case CHIP_TAHITI: | |
217 | io_mc_regs = (u32 *)&tahiti_io_mc_regs; | |
218 | ucode_size = SI_MC_UCODE_SIZE; | |
219 | regs_size = TAHITI_IO_MC_REGS_SIZE; | |
220 | break; | |
221 | case CHIP_PITCAIRN: | |
222 | io_mc_regs = (u32 *)&pitcairn_io_mc_regs; | |
223 | ucode_size = SI_MC_UCODE_SIZE; | |
224 | regs_size = TAHITI_IO_MC_REGS_SIZE; | |
225 | break; | |
226 | case CHIP_VERDE: | |
227 | default: | |
228 | io_mc_regs = (u32 *)&verde_io_mc_regs; | |
229 | ucode_size = SI_MC_UCODE_SIZE; | |
230 | regs_size = TAHITI_IO_MC_REGS_SIZE; | |
231 | break; | |
232 | } | |
233 | ||
234 | running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; | |
235 | ||
236 | if (running == 0) { | |
237 | if (running) { | |
238 | blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); | |
239 | WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); | |
240 | } | |
241 | ||
242 | /* reset the engine and set to writable */ | |
243 | WREG32(MC_SEQ_SUP_CNTL, 0x00000008); | |
244 | WREG32(MC_SEQ_SUP_CNTL, 0x00000010); | |
245 | ||
246 | /* load mc io regs */ | |
247 | for (i = 0; i < regs_size; i++) { | |
248 | WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); | |
249 | WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); | |
250 | } | |
251 | /* load the MC ucode */ | |
252 | fw_data = (const __be32 *)rdev->mc_fw->data; | |
253 | for (i = 0; i < ucode_size; i++) | |
254 | WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); | |
255 | ||
256 | /* put the engine back into the active state */ | |
257 | WREG32(MC_SEQ_SUP_CNTL, 0x00000008); | |
258 | WREG32(MC_SEQ_SUP_CNTL, 0x00000004); | |
259 | WREG32(MC_SEQ_SUP_CNTL, 0x00000001); | |
260 | ||
261 | /* wait for training to complete */ | |
262 | for (i = 0; i < rdev->usec_timeout; i++) { | |
263 | if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0) | |
264 | break; | |
265 | udelay(1); | |
266 | } | |
267 | for (i = 0; i < rdev->usec_timeout; i++) { | |
268 | if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1) | |
269 | break; | |
270 | udelay(1); | |
271 | } | |
272 | ||
273 | if (running) | |
274 | WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); | |
275 | } | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
0f0de06c AD |
280 | static int si_init_microcode(struct radeon_device *rdev) |
281 | { | |
282 | struct platform_device *pdev; | |
283 | const char *chip_name; | |
284 | const char *rlc_chip_name; | |
285 | size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; | |
286 | char fw_name[30]; | |
287 | int err; | |
288 | ||
289 | DRM_DEBUG("\n"); | |
290 | ||
291 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | |
292 | err = IS_ERR(pdev); | |
293 | if (err) { | |
294 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | |
295 | return -EINVAL; | |
296 | } | |
297 | ||
298 | switch (rdev->family) { | |
299 | case CHIP_TAHITI: | |
300 | chip_name = "TAHITI"; | |
301 | rlc_chip_name = "TAHITI"; | |
302 | pfp_req_size = SI_PFP_UCODE_SIZE * 4; | |
303 | me_req_size = SI_PM4_UCODE_SIZE * 4; | |
304 | ce_req_size = SI_CE_UCODE_SIZE * 4; | |
305 | rlc_req_size = SI_RLC_UCODE_SIZE * 4; | |
306 | mc_req_size = SI_MC_UCODE_SIZE * 4; | |
307 | break; | |
308 | case CHIP_PITCAIRN: | |
309 | chip_name = "PITCAIRN"; | |
310 | rlc_chip_name = "PITCAIRN"; | |
311 | pfp_req_size = SI_PFP_UCODE_SIZE * 4; | |
312 | me_req_size = SI_PM4_UCODE_SIZE * 4; | |
313 | ce_req_size = SI_CE_UCODE_SIZE * 4; | |
314 | rlc_req_size = SI_RLC_UCODE_SIZE * 4; | |
315 | mc_req_size = SI_MC_UCODE_SIZE * 4; | |
316 | break; | |
317 | case CHIP_VERDE: | |
318 | chip_name = "VERDE"; | |
319 | rlc_chip_name = "VERDE"; | |
320 | pfp_req_size = SI_PFP_UCODE_SIZE * 4; | |
321 | me_req_size = SI_PM4_UCODE_SIZE * 4; | |
322 | ce_req_size = SI_CE_UCODE_SIZE * 4; | |
323 | rlc_req_size = SI_RLC_UCODE_SIZE * 4; | |
324 | mc_req_size = SI_MC_UCODE_SIZE * 4; | |
325 | break; | |
326 | default: BUG(); | |
327 | } | |
328 | ||
329 | DRM_INFO("Loading %s Microcode\n", chip_name); | |
330 | ||
331 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | |
332 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | |
333 | if (err) | |
334 | goto out; | |
335 | if (rdev->pfp_fw->size != pfp_req_size) { | |
336 | printk(KERN_ERR | |
337 | "si_cp: Bogus length %zu in firmware \"%s\"\n", | |
338 | rdev->pfp_fw->size, fw_name); | |
339 | err = -EINVAL; | |
340 | goto out; | |
341 | } | |
342 | ||
343 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | |
344 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | |
345 | if (err) | |
346 | goto out; | |
347 | if (rdev->me_fw->size != me_req_size) { | |
348 | printk(KERN_ERR | |
349 | "si_cp: Bogus length %zu in firmware \"%s\"\n", | |
350 | rdev->me_fw->size, fw_name); | |
351 | err = -EINVAL; | |
352 | } | |
353 | ||
354 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); | |
355 | err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); | |
356 | if (err) | |
357 | goto out; | |
358 | if (rdev->ce_fw->size != ce_req_size) { | |
359 | printk(KERN_ERR | |
360 | "si_cp: Bogus length %zu in firmware \"%s\"\n", | |
361 | rdev->ce_fw->size, fw_name); | |
362 | err = -EINVAL; | |
363 | } | |
364 | ||
365 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | |
366 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | |
367 | if (err) | |
368 | goto out; | |
369 | if (rdev->rlc_fw->size != rlc_req_size) { | |
370 | printk(KERN_ERR | |
371 | "si_rlc: Bogus length %zu in firmware \"%s\"\n", | |
372 | rdev->rlc_fw->size, fw_name); | |
373 | err = -EINVAL; | |
374 | } | |
375 | ||
376 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | |
377 | err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); | |
378 | if (err) | |
379 | goto out; | |
380 | if (rdev->mc_fw->size != mc_req_size) { | |
381 | printk(KERN_ERR | |
382 | "si_mc: Bogus length %zu in firmware \"%s\"\n", | |
383 | rdev->mc_fw->size, fw_name); | |
384 | err = -EINVAL; | |
385 | } | |
386 | ||
387 | out: | |
388 | platform_device_unregister(pdev); | |
389 | ||
390 | if (err) { | |
391 | if (err != -EINVAL) | |
392 | printk(KERN_ERR | |
393 | "si_cp: Failed to load firmware \"%s\"\n", | |
394 | fw_name); | |
395 | release_firmware(rdev->pfp_fw); | |
396 | rdev->pfp_fw = NULL; | |
397 | release_firmware(rdev->me_fw); | |
398 | rdev->me_fw = NULL; | |
399 | release_firmware(rdev->ce_fw); | |
400 | rdev->ce_fw = NULL; | |
401 | release_firmware(rdev->rlc_fw); | |
402 | rdev->rlc_fw = NULL; | |
403 | release_firmware(rdev->mc_fw); | |
404 | rdev->mc_fw = NULL; | |
405 | } | |
406 | return err; | |
407 | } | |
408 | ||
43b3cd99 AD |
409 | /* watermark setup */ |
410 | static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, | |
411 | struct radeon_crtc *radeon_crtc, | |
412 | struct drm_display_mode *mode, | |
413 | struct drm_display_mode *other_mode) | |
414 | { | |
415 | u32 tmp; | |
416 | /* | |
417 | * Line Buffer Setup | |
418 | * There are 3 line buffers, each one shared by 2 display controllers. | |
419 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | |
420 | * the display controllers. The paritioning is done via one of four | |
421 | * preset allocations specified in bits 21:20: | |
422 | * 0 - half lb | |
423 | * 2 - whole lb, other crtc must be disabled | |
424 | */ | |
425 | /* this can get tricky if we have two large displays on a paired group | |
426 | * of crtcs. Ideally for multiple large displays we'd assign them to | |
427 | * non-linked crtcs for maximum line buffer allocation. | |
428 | */ | |
429 | if (radeon_crtc->base.enabled && mode) { | |
430 | if (other_mode) | |
431 | tmp = 0; /* 1/2 */ | |
432 | else | |
433 | tmp = 2; /* whole */ | |
434 | } else | |
435 | tmp = 0; | |
436 | ||
437 | WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, | |
438 | DC_LB_MEMORY_CONFIG(tmp)); | |
439 | ||
440 | if (radeon_crtc->base.enabled && mode) { | |
441 | switch (tmp) { | |
442 | case 0: | |
443 | default: | |
444 | return 4096 * 2; | |
445 | case 2: | |
446 | return 8192 * 2; | |
447 | } | |
448 | } | |
449 | ||
450 | /* controller not enabled, so no lb used */ | |
451 | return 0; | |
452 | } | |
453 | ||
ca7db22b | 454 | static u32 si_get_number_of_dram_channels(struct radeon_device *rdev) |
43b3cd99 AD |
455 | { |
456 | u32 tmp = RREG32(MC_SHARED_CHMAP); | |
457 | ||
458 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | |
459 | case 0: | |
460 | default: | |
461 | return 1; | |
462 | case 1: | |
463 | return 2; | |
464 | case 2: | |
465 | return 4; | |
466 | case 3: | |
467 | return 8; | |
468 | case 4: | |
469 | return 3; | |
470 | case 5: | |
471 | return 6; | |
472 | case 6: | |
473 | return 10; | |
474 | case 7: | |
475 | return 12; | |
476 | case 8: | |
477 | return 16; | |
478 | } | |
479 | } | |
480 | ||
481 | struct dce6_wm_params { | |
482 | u32 dram_channels; /* number of dram channels */ | |
483 | u32 yclk; /* bandwidth per dram data pin in kHz */ | |
484 | u32 sclk; /* engine clock in kHz */ | |
485 | u32 disp_clk; /* display clock in kHz */ | |
486 | u32 src_width; /* viewport width */ | |
487 | u32 active_time; /* active display time in ns */ | |
488 | u32 blank_time; /* blank time in ns */ | |
489 | bool interlaced; /* mode is interlaced */ | |
490 | fixed20_12 vsc; /* vertical scale ratio */ | |
491 | u32 num_heads; /* number of active crtcs */ | |
492 | u32 bytes_per_pixel; /* bytes per pixel display + overlay */ | |
493 | u32 lb_size; /* line buffer allocated to pipe */ | |
494 | u32 vtaps; /* vertical scaler taps */ | |
495 | }; | |
496 | ||
497 | static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm) | |
498 | { | |
499 | /* Calculate raw DRAM Bandwidth */ | |
500 | fixed20_12 dram_efficiency; /* 0.7 */ | |
501 | fixed20_12 yclk, dram_channels, bandwidth; | |
502 | fixed20_12 a; | |
503 | ||
504 | a.full = dfixed_const(1000); | |
505 | yclk.full = dfixed_const(wm->yclk); | |
506 | yclk.full = dfixed_div(yclk, a); | |
507 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | |
508 | a.full = dfixed_const(10); | |
509 | dram_efficiency.full = dfixed_const(7); | |
510 | dram_efficiency.full = dfixed_div(dram_efficiency, a); | |
511 | bandwidth.full = dfixed_mul(dram_channels, yclk); | |
512 | bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); | |
513 | ||
514 | return dfixed_trunc(bandwidth); | |
515 | } | |
516 | ||
517 | static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm) | |
518 | { | |
519 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | |
520 | fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ | |
521 | fixed20_12 yclk, dram_channels, bandwidth; | |
522 | fixed20_12 a; | |
523 | ||
524 | a.full = dfixed_const(1000); | |
525 | yclk.full = dfixed_const(wm->yclk); | |
526 | yclk.full = dfixed_div(yclk, a); | |
527 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | |
528 | a.full = dfixed_const(10); | |
529 | disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ | |
530 | disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); | |
531 | bandwidth.full = dfixed_mul(dram_channels, yclk); | |
532 | bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); | |
533 | ||
534 | return dfixed_trunc(bandwidth); | |
535 | } | |
536 | ||
537 | static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm) | |
538 | { | |
539 | /* Calculate the display Data return Bandwidth */ | |
540 | fixed20_12 return_efficiency; /* 0.8 */ | |
541 | fixed20_12 sclk, bandwidth; | |
542 | fixed20_12 a; | |
543 | ||
544 | a.full = dfixed_const(1000); | |
545 | sclk.full = dfixed_const(wm->sclk); | |
546 | sclk.full = dfixed_div(sclk, a); | |
547 | a.full = dfixed_const(10); | |
548 | return_efficiency.full = dfixed_const(8); | |
549 | return_efficiency.full = dfixed_div(return_efficiency, a); | |
550 | a.full = dfixed_const(32); | |
551 | bandwidth.full = dfixed_mul(a, sclk); | |
552 | bandwidth.full = dfixed_mul(bandwidth, return_efficiency); | |
553 | ||
554 | return dfixed_trunc(bandwidth); | |
555 | } | |
556 | ||
557 | static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm) | |
558 | { | |
559 | return 32; | |
560 | } | |
561 | ||
562 | static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm) | |
563 | { | |
564 | /* Calculate the DMIF Request Bandwidth */ | |
565 | fixed20_12 disp_clk_request_efficiency; /* 0.8 */ | |
566 | fixed20_12 disp_clk, sclk, bandwidth; | |
567 | fixed20_12 a, b1, b2; | |
568 | u32 min_bandwidth; | |
569 | ||
570 | a.full = dfixed_const(1000); | |
571 | disp_clk.full = dfixed_const(wm->disp_clk); | |
572 | disp_clk.full = dfixed_div(disp_clk, a); | |
573 | a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2); | |
574 | b1.full = dfixed_mul(a, disp_clk); | |
575 | ||
576 | a.full = dfixed_const(1000); | |
577 | sclk.full = dfixed_const(wm->sclk); | |
578 | sclk.full = dfixed_div(sclk, a); | |
579 | a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm)); | |
580 | b2.full = dfixed_mul(a, sclk); | |
581 | ||
582 | a.full = dfixed_const(10); | |
583 | disp_clk_request_efficiency.full = dfixed_const(8); | |
584 | disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); | |
585 | ||
586 | min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2)); | |
587 | ||
588 | a.full = dfixed_const(min_bandwidth); | |
589 | bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency); | |
590 | ||
591 | return dfixed_trunc(bandwidth); | |
592 | } | |
593 | ||
594 | static u32 dce6_available_bandwidth(struct dce6_wm_params *wm) | |
595 | { | |
596 | /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ | |
597 | u32 dram_bandwidth = dce6_dram_bandwidth(wm); | |
598 | u32 data_return_bandwidth = dce6_data_return_bandwidth(wm); | |
599 | u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm); | |
600 | ||
601 | return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); | |
602 | } | |
603 | ||
604 | static u32 dce6_average_bandwidth(struct dce6_wm_params *wm) | |
605 | { | |
606 | /* Calculate the display mode Average Bandwidth | |
607 | * DisplayMode should contain the source and destination dimensions, | |
608 | * timing, etc. | |
609 | */ | |
610 | fixed20_12 bpp; | |
611 | fixed20_12 line_time; | |
612 | fixed20_12 src_width; | |
613 | fixed20_12 bandwidth; | |
614 | fixed20_12 a; | |
615 | ||
616 | a.full = dfixed_const(1000); | |
617 | line_time.full = dfixed_const(wm->active_time + wm->blank_time); | |
618 | line_time.full = dfixed_div(line_time, a); | |
619 | bpp.full = dfixed_const(wm->bytes_per_pixel); | |
620 | src_width.full = dfixed_const(wm->src_width); | |
621 | bandwidth.full = dfixed_mul(src_width, bpp); | |
622 | bandwidth.full = dfixed_mul(bandwidth, wm->vsc); | |
623 | bandwidth.full = dfixed_div(bandwidth, line_time); | |
624 | ||
625 | return dfixed_trunc(bandwidth); | |
626 | } | |
627 | ||
628 | static u32 dce6_latency_watermark(struct dce6_wm_params *wm) | |
629 | { | |
630 | /* First calcualte the latency in ns */ | |
631 | u32 mc_latency = 2000; /* 2000 ns. */ | |
632 | u32 available_bandwidth = dce6_available_bandwidth(wm); | |
633 | u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; | |
634 | u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; | |
635 | u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ | |
636 | u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + | |
637 | (wm->num_heads * cursor_line_pair_return_time); | |
638 | u32 latency = mc_latency + other_heads_data_return_time + dc_latency; | |
639 | u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; | |
640 | u32 tmp, dmif_size = 12288; | |
641 | fixed20_12 a, b, c; | |
642 | ||
643 | if (wm->num_heads == 0) | |
644 | return 0; | |
645 | ||
646 | a.full = dfixed_const(2); | |
647 | b.full = dfixed_const(1); | |
648 | if ((wm->vsc.full > a.full) || | |
649 | ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || | |
650 | (wm->vtaps >= 5) || | |
651 | ((wm->vsc.full >= a.full) && wm->interlaced)) | |
652 | max_src_lines_per_dst_line = 4; | |
653 | else | |
654 | max_src_lines_per_dst_line = 2; | |
655 | ||
656 | a.full = dfixed_const(available_bandwidth); | |
657 | b.full = dfixed_const(wm->num_heads); | |
658 | a.full = dfixed_div(a, b); | |
659 | ||
660 | b.full = dfixed_const(mc_latency + 512); | |
661 | c.full = dfixed_const(wm->disp_clk); | |
662 | b.full = dfixed_div(b, c); | |
663 | ||
664 | c.full = dfixed_const(dmif_size); | |
665 | b.full = dfixed_div(c, b); | |
666 | ||
667 | tmp = min(dfixed_trunc(a), dfixed_trunc(b)); | |
668 | ||
669 | b.full = dfixed_const(1000); | |
670 | c.full = dfixed_const(wm->disp_clk); | |
671 | b.full = dfixed_div(c, b); | |
672 | c.full = dfixed_const(wm->bytes_per_pixel); | |
673 | b.full = dfixed_mul(b, c); | |
674 | ||
675 | lb_fill_bw = min(tmp, dfixed_trunc(b)); | |
676 | ||
677 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); | |
678 | b.full = dfixed_const(1000); | |
679 | c.full = dfixed_const(lb_fill_bw); | |
680 | b.full = dfixed_div(c, b); | |
681 | a.full = dfixed_div(a, b); | |
682 | line_fill_time = dfixed_trunc(a); | |
683 | ||
684 | if (line_fill_time < wm->active_time) | |
685 | return latency; | |
686 | else | |
687 | return latency + (line_fill_time - wm->active_time); | |
688 | ||
689 | } | |
690 | ||
691 | static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) | |
692 | { | |
693 | if (dce6_average_bandwidth(wm) <= | |
694 | (dce6_dram_bandwidth_for_display(wm) / wm->num_heads)) | |
695 | return true; | |
696 | else | |
697 | return false; | |
698 | }; | |
699 | ||
700 | static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) | |
701 | { | |
702 | if (dce6_average_bandwidth(wm) <= | |
703 | (dce6_available_bandwidth(wm) / wm->num_heads)) | |
704 | return true; | |
705 | else | |
706 | return false; | |
707 | }; | |
708 | ||
709 | static bool dce6_check_latency_hiding(struct dce6_wm_params *wm) | |
710 | { | |
711 | u32 lb_partitions = wm->lb_size / wm->src_width; | |
712 | u32 line_time = wm->active_time + wm->blank_time; | |
713 | u32 latency_tolerant_lines; | |
714 | u32 latency_hiding; | |
715 | fixed20_12 a; | |
716 | ||
717 | a.full = dfixed_const(1); | |
718 | if (wm->vsc.full > a.full) | |
719 | latency_tolerant_lines = 1; | |
720 | else { | |
721 | if (lb_partitions <= (wm->vtaps + 1)) | |
722 | latency_tolerant_lines = 1; | |
723 | else | |
724 | latency_tolerant_lines = 2; | |
725 | } | |
726 | ||
727 | latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); | |
728 | ||
729 | if (dce6_latency_watermark(wm) <= latency_hiding) | |
730 | return true; | |
731 | else | |
732 | return false; | |
733 | } | |
734 | ||
735 | static void dce6_program_watermarks(struct radeon_device *rdev, | |
736 | struct radeon_crtc *radeon_crtc, | |
737 | u32 lb_size, u32 num_heads) | |
738 | { | |
739 | struct drm_display_mode *mode = &radeon_crtc->base.mode; | |
740 | struct dce6_wm_params wm; | |
741 | u32 pixel_period; | |
742 | u32 line_time = 0; | |
743 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | |
744 | u32 priority_a_mark = 0, priority_b_mark = 0; | |
745 | u32 priority_a_cnt = PRIORITY_OFF; | |
746 | u32 priority_b_cnt = PRIORITY_OFF; | |
747 | u32 tmp, arb_control3; | |
748 | fixed20_12 a, b, c; | |
749 | ||
750 | if (radeon_crtc->base.enabled && num_heads && mode) { | |
751 | pixel_period = 1000000 / (u32)mode->clock; | |
752 | line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); | |
753 | priority_a_cnt = 0; | |
754 | priority_b_cnt = 0; | |
755 | ||
756 | wm.yclk = rdev->pm.current_mclk * 10; | |
757 | wm.sclk = rdev->pm.current_sclk * 10; | |
758 | wm.disp_clk = mode->clock; | |
759 | wm.src_width = mode->crtc_hdisplay; | |
760 | wm.active_time = mode->crtc_hdisplay * pixel_period; | |
761 | wm.blank_time = line_time - wm.active_time; | |
762 | wm.interlaced = false; | |
763 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | |
764 | wm.interlaced = true; | |
765 | wm.vsc = radeon_crtc->vsc; | |
766 | wm.vtaps = 1; | |
767 | if (radeon_crtc->rmx_type != RMX_OFF) | |
768 | wm.vtaps = 2; | |
769 | wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ | |
770 | wm.lb_size = lb_size; | |
ca7db22b AD |
771 | if (rdev->family == CHIP_ARUBA) |
772 | wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); | |
773 | else | |
774 | wm.dram_channels = si_get_number_of_dram_channels(rdev); | |
43b3cd99 AD |
775 | wm.num_heads = num_heads; |
776 | ||
777 | /* set for high clocks */ | |
778 | latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535); | |
779 | /* set for low clocks */ | |
780 | /* wm.yclk = low clk; wm.sclk = low clk */ | |
781 | latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535); | |
782 | ||
783 | /* possibly force display priority to high */ | |
784 | /* should really do this at mode validation time... */ | |
785 | if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || | |
786 | !dce6_average_bandwidth_vs_available_bandwidth(&wm) || | |
787 | !dce6_check_latency_hiding(&wm) || | |
788 | (rdev->disp_priority == 2)) { | |
789 | DRM_DEBUG_KMS("force priority to high\n"); | |
790 | priority_a_cnt |= PRIORITY_ALWAYS_ON; | |
791 | priority_b_cnt |= PRIORITY_ALWAYS_ON; | |
792 | } | |
793 | ||
794 | a.full = dfixed_const(1000); | |
795 | b.full = dfixed_const(mode->clock); | |
796 | b.full = dfixed_div(b, a); | |
797 | c.full = dfixed_const(latency_watermark_a); | |
798 | c.full = dfixed_mul(c, b); | |
799 | c.full = dfixed_mul(c, radeon_crtc->hsc); | |
800 | c.full = dfixed_div(c, a); | |
801 | a.full = dfixed_const(16); | |
802 | c.full = dfixed_div(c, a); | |
803 | priority_a_mark = dfixed_trunc(c); | |
804 | priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; | |
805 | ||
806 | a.full = dfixed_const(1000); | |
807 | b.full = dfixed_const(mode->clock); | |
808 | b.full = dfixed_div(b, a); | |
809 | c.full = dfixed_const(latency_watermark_b); | |
810 | c.full = dfixed_mul(c, b); | |
811 | c.full = dfixed_mul(c, radeon_crtc->hsc); | |
812 | c.full = dfixed_div(c, a); | |
813 | a.full = dfixed_const(16); | |
814 | c.full = dfixed_div(c, a); | |
815 | priority_b_mark = dfixed_trunc(c); | |
816 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | |
817 | } | |
818 | ||
819 | /* select wm A */ | |
820 | arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); | |
821 | tmp = arb_control3; | |
822 | tmp &= ~LATENCY_WATERMARK_MASK(3); | |
823 | tmp |= LATENCY_WATERMARK_MASK(1); | |
824 | WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); | |
825 | WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, | |
826 | (LATENCY_LOW_WATERMARK(latency_watermark_a) | | |
827 | LATENCY_HIGH_WATERMARK(line_time))); | |
828 | /* select wm B */ | |
829 | tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); | |
830 | tmp &= ~LATENCY_WATERMARK_MASK(3); | |
831 | tmp |= LATENCY_WATERMARK_MASK(2); | |
832 | WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); | |
833 | WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, | |
834 | (LATENCY_LOW_WATERMARK(latency_watermark_b) | | |
835 | LATENCY_HIGH_WATERMARK(line_time))); | |
836 | /* restore original selection */ | |
837 | WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3); | |
838 | ||
839 | /* write the priority marks */ | |
840 | WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); | |
841 | WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); | |
842 | ||
843 | } | |
844 | ||
845 | void dce6_bandwidth_update(struct radeon_device *rdev) | |
846 | { | |
847 | struct drm_display_mode *mode0 = NULL; | |
848 | struct drm_display_mode *mode1 = NULL; | |
849 | u32 num_heads = 0, lb_size; | |
850 | int i; | |
851 | ||
852 | radeon_update_display_priority(rdev); | |
853 | ||
854 | for (i = 0; i < rdev->num_crtc; i++) { | |
855 | if (rdev->mode_info.crtcs[i]->base.enabled) | |
856 | num_heads++; | |
857 | } | |
858 | for (i = 0; i < rdev->num_crtc; i += 2) { | |
859 | mode0 = &rdev->mode_info.crtcs[i]->base.mode; | |
860 | mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; | |
861 | lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); | |
862 | dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); | |
863 | lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); | |
864 | dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); | |
865 | } | |
866 | } | |
867 | ||
0a96d72b AD |
868 | /* |
869 | * Core functions | |
870 | */ | |
0a96d72b AD |
871 | static void si_tiling_mode_table_init(struct radeon_device *rdev) |
872 | { | |
873 | const u32 num_tile_mode_states = 32; | |
874 | u32 reg_offset, gb_tile_moden, split_equal_to_row_size; | |
875 | ||
876 | switch (rdev->config.si.mem_row_size_in_kb) { | |
877 | case 1: | |
878 | split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; | |
879 | break; | |
880 | case 2: | |
881 | default: | |
882 | split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; | |
883 | break; | |
884 | case 4: | |
885 | split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; | |
886 | break; | |
887 | } | |
888 | ||
889 | if ((rdev->family == CHIP_TAHITI) || | |
890 | (rdev->family == CHIP_PITCAIRN)) { | |
891 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | |
892 | switch (reg_offset) { | |
893 | case 0: /* non-AA compressed depth or any compressed stencil */ | |
894 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
895 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
896 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
897 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
898 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
899 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
900 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
901 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
902 | break; | |
903 | case 1: /* 2xAA/4xAA compressed depth only */ | |
904 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
905 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
906 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
907 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | |
908 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
909 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
910 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
911 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
912 | break; | |
913 | case 2: /* 8xAA compressed depth only */ | |
914 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
915 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
916 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
917 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
918 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
919 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
920 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
921 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
922 | break; | |
923 | case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ | |
924 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
925 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
926 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
927 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | |
928 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
929 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
930 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
931 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
932 | break; | |
933 | case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ | |
934 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | |
935 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
936 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
937 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
938 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
939 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
940 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
941 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
942 | break; | |
943 | case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ | |
944 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
945 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
946 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
947 | TILE_SPLIT(split_equal_to_row_size) | | |
948 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
949 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
950 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
951 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
952 | break; | |
953 | case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ | |
954 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
955 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
956 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
957 | TILE_SPLIT(split_equal_to_row_size) | | |
958 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
959 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
960 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
961 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
962 | break; | |
963 | case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ | |
964 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
965 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
966 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
967 | TILE_SPLIT(split_equal_to_row_size) | | |
968 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
969 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
970 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
971 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
972 | break; | |
973 | case 8: /* 1D and 1D Array Surfaces */ | |
974 | gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | |
975 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
976 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
977 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
978 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
979 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
980 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
981 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
982 | break; | |
983 | case 9: /* Displayable maps. */ | |
984 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | |
985 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
986 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
987 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
988 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
989 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
990 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
991 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
992 | break; | |
993 | case 10: /* Display 8bpp. */ | |
994 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
995 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
996 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
997 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
998 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
999 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1000 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1001 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1002 | break; | |
1003 | case 11: /* Display 16bpp. */ | |
1004 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1005 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
1006 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1007 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1008 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1009 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1010 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1011 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1012 | break; | |
1013 | case 12: /* Display 32bpp. */ | |
1014 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1015 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
1016 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1017 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | |
1018 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1019 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1020 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1021 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
1022 | break; | |
1023 | case 13: /* Thin. */ | |
1024 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | |
1025 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1026 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1027 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
1028 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1029 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1030 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1031 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1032 | break; | |
1033 | case 14: /* Thin 8 bpp. */ | |
1034 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1035 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1036 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1037 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1038 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1039 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1040 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1041 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
1042 | break; | |
1043 | case 15: /* Thin 16 bpp. */ | |
1044 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1045 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1046 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1047 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1048 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1049 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1050 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1051 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
1052 | break; | |
1053 | case 16: /* Thin 32 bpp. */ | |
1054 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1055 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1056 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1057 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | |
1058 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1059 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1060 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1061 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
1062 | break; | |
1063 | case 17: /* Thin 64 bpp. */ | |
1064 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1065 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1066 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1067 | TILE_SPLIT(split_equal_to_row_size) | | |
1068 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1069 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1070 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1071 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
1072 | break; | |
1073 | case 21: /* 8 bpp PRT. */ | |
1074 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1075 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1076 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1077 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1078 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1079 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | |
1080 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1081 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1082 | break; | |
1083 | case 22: /* 16 bpp PRT */ | |
1084 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1085 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1086 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1087 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1088 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1089 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1090 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1091 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1092 | break; | |
1093 | case 23: /* 32 bpp PRT */ | |
1094 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1095 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1096 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1097 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1098 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1099 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1100 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1101 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1102 | break; | |
1103 | case 24: /* 64 bpp PRT */ | |
1104 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1105 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1106 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1107 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | |
1108 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1109 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1110 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1111 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1112 | break; | |
1113 | case 25: /* 128 bpp PRT */ | |
1114 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1115 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1116 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1117 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | | |
1118 | NUM_BANKS(ADDR_SURF_8_BANK) | | |
1119 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1120 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1121 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
1122 | break; | |
1123 | default: | |
1124 | gb_tile_moden = 0; | |
1125 | break; | |
1126 | } | |
1127 | WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); | |
1128 | } | |
1129 | } else if (rdev->family == CHIP_VERDE) { | |
1130 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | |
1131 | switch (reg_offset) { | |
1132 | case 0: /* non-AA compressed depth or any compressed stencil */ | |
1133 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1134 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1135 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1136 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
1137 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1138 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1139 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1140 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1141 | break; | |
1142 | case 1: /* 2xAA/4xAA compressed depth only */ | |
1143 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1144 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1145 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1146 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | |
1147 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1148 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1149 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1150 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1151 | break; | |
1152 | case 2: /* 8xAA compressed depth only */ | |
1153 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1154 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1155 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1156 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1157 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1158 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1159 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1160 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1161 | break; | |
1162 | case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ | |
1163 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1164 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1165 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1166 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | |
1167 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1168 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1169 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1170 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1171 | break; | |
1172 | case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ | |
1173 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | |
1174 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1175 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1176 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
1177 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1178 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1179 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1180 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1181 | break; | |
1182 | case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ | |
1183 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1184 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1185 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1186 | TILE_SPLIT(split_equal_to_row_size) | | |
1187 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1188 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1189 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1190 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1191 | break; | |
1192 | case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ | |
1193 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1194 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1195 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1196 | TILE_SPLIT(split_equal_to_row_size) | | |
1197 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1198 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1199 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1200 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1201 | break; | |
1202 | case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ | |
1203 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1204 | MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | |
1205 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1206 | TILE_SPLIT(split_equal_to_row_size) | | |
1207 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1208 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1209 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1210 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1211 | break; | |
1212 | case 8: /* 1D and 1D Array Surfaces */ | |
1213 | gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | |
1214 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
1215 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1216 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
1217 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1218 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1219 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1220 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1221 | break; | |
1222 | case 9: /* Displayable maps. */ | |
1223 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | |
1224 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
1225 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1226 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
1227 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1228 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1229 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1230 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1231 | break; | |
1232 | case 10: /* Display 8bpp. */ | |
1233 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1234 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
1235 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1236 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1237 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1238 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1239 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1240 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1241 | break; | |
1242 | case 11: /* Display 16bpp. */ | |
1243 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1244 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
1245 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1246 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1247 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1248 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1249 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1250 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1251 | break; | |
1252 | case 12: /* Display 32bpp. */ | |
1253 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1254 | MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | |
1255 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1256 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | |
1257 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1258 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1259 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1260 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1261 | break; | |
1262 | case 13: /* Thin. */ | |
1263 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | |
1264 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1265 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1266 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | |
1267 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1268 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1269 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1270 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1271 | break; | |
1272 | case 14: /* Thin 8 bpp. */ | |
1273 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1274 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1275 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1276 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1277 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1278 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1279 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1280 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1281 | break; | |
1282 | case 15: /* Thin 16 bpp. */ | |
1283 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1284 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1285 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1286 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1287 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1288 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1289 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1290 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1291 | break; | |
1292 | case 16: /* Thin 32 bpp. */ | |
1293 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1294 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1295 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1296 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | |
1297 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1298 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1299 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1300 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1301 | break; | |
1302 | case 17: /* Thin 64 bpp. */ | |
1303 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1304 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1305 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | |
1306 | TILE_SPLIT(split_equal_to_row_size) | | |
1307 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1308 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1309 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1310 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1311 | break; | |
1312 | case 21: /* 8 bpp PRT. */ | |
1313 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1314 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1315 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1316 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1317 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1318 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | |
1319 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1320 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1321 | break; | |
1322 | case 22: /* 16 bpp PRT */ | |
1323 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1324 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1325 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1326 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1327 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1328 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1329 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | |
1330 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); | |
1331 | break; | |
1332 | case 23: /* 32 bpp PRT */ | |
1333 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1334 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1335 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1336 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | |
1337 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1338 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1339 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | |
1340 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1341 | break; | |
1342 | case 24: /* 64 bpp PRT */ | |
1343 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1344 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1345 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1346 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | |
1347 | NUM_BANKS(ADDR_SURF_16_BANK) | | |
1348 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1349 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1350 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); | |
1351 | break; | |
1352 | case 25: /* 128 bpp PRT */ | |
1353 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | |
1354 | MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | |
1355 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | |
1356 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | | |
1357 | NUM_BANKS(ADDR_SURF_8_BANK) | | |
1358 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | |
1359 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | |
1360 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); | |
1361 | break; | |
1362 | default: | |
1363 | gb_tile_moden = 0; | |
1364 | break; | |
1365 | } | |
1366 | WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); | |
1367 | } | |
1368 | } else | |
1369 | DRM_ERROR("unknown asic: 0x%x\n", rdev->family); | |
1370 | } | |
1371 | ||
1a8ca750 AD |
1372 | static void si_select_se_sh(struct radeon_device *rdev, |
1373 | u32 se_num, u32 sh_num) | |
1374 | { | |
1375 | u32 data = INSTANCE_BROADCAST_WRITES; | |
1376 | ||
1377 | if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) | |
1378 | data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; | |
1379 | else if (se_num == 0xffffffff) | |
1380 | data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); | |
1381 | else if (sh_num == 0xffffffff) | |
1382 | data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); | |
1383 | else | |
1384 | data |= SH_INDEX(sh_num) | SE_INDEX(se_num); | |
1385 | WREG32(GRBM_GFX_INDEX, data); | |
1386 | } | |
1387 | ||
1388 | static u32 si_create_bitmask(u32 bit_width) | |
1389 | { | |
1390 | u32 i, mask = 0; | |
1391 | ||
1392 | for (i = 0; i < bit_width; i++) { | |
1393 | mask <<= 1; | |
1394 | mask |= 1; | |
1395 | } | |
1396 | return mask; | |
1397 | } | |
1398 | ||
1399 | static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) | |
1400 | { | |
1401 | u32 data, mask; | |
1402 | ||
1403 | data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); | |
1404 | if (data & 1) | |
1405 | data &= INACTIVE_CUS_MASK; | |
1406 | else | |
1407 | data = 0; | |
1408 | data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); | |
1409 | ||
1410 | data >>= INACTIVE_CUS_SHIFT; | |
1411 | ||
1412 | mask = si_create_bitmask(cu_per_sh); | |
1413 | ||
1414 | return ~data & mask; | |
1415 | } | |
1416 | ||
1417 | static void si_setup_spi(struct radeon_device *rdev, | |
1418 | u32 se_num, u32 sh_per_se, | |
1419 | u32 cu_per_sh) | |
1420 | { | |
1421 | int i, j, k; | |
1422 | u32 data, mask, active_cu; | |
1423 | ||
1424 | for (i = 0; i < se_num; i++) { | |
1425 | for (j = 0; j < sh_per_se; j++) { | |
1426 | si_select_se_sh(rdev, i, j); | |
1427 | data = RREG32(SPI_STATIC_THREAD_MGMT_3); | |
1428 | active_cu = si_get_cu_enabled(rdev, cu_per_sh); | |
1429 | ||
1430 | mask = 1; | |
1431 | for (k = 0; k < 16; k++) { | |
1432 | mask <<= k; | |
1433 | if (active_cu & mask) { | |
1434 | data &= ~mask; | |
1435 | WREG32(SPI_STATIC_THREAD_MGMT_3, data); | |
1436 | break; | |
1437 | } | |
1438 | } | |
1439 | } | |
1440 | } | |
1441 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | |
1442 | } | |
1443 | ||
1444 | static u32 si_get_rb_disabled(struct radeon_device *rdev, | |
1445 | u32 max_rb_num, u32 se_num, | |
1446 | u32 sh_per_se) | |
1447 | { | |
1448 | u32 data, mask; | |
1449 | ||
1450 | data = RREG32(CC_RB_BACKEND_DISABLE); | |
1451 | if (data & 1) | |
1452 | data &= BACKEND_DISABLE_MASK; | |
1453 | else | |
1454 | data = 0; | |
1455 | data |= RREG32(GC_USER_RB_BACKEND_DISABLE); | |
1456 | ||
1457 | data >>= BACKEND_DISABLE_SHIFT; | |
1458 | ||
1459 | mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); | |
1460 | ||
1461 | return data & mask; | |
1462 | } | |
1463 | ||
1464 | static void si_setup_rb(struct radeon_device *rdev, | |
1465 | u32 se_num, u32 sh_per_se, | |
1466 | u32 max_rb_num) | |
1467 | { | |
1468 | int i, j; | |
1469 | u32 data, mask; | |
1470 | u32 disabled_rbs = 0; | |
1471 | u32 enabled_rbs = 0; | |
1472 | ||
1473 | for (i = 0; i < se_num; i++) { | |
1474 | for (j = 0; j < sh_per_se; j++) { | |
1475 | si_select_se_sh(rdev, i, j); | |
1476 | data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | |
1477 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); | |
1478 | } | |
1479 | } | |
1480 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | |
1481 | ||
1482 | mask = 1; | |
1483 | for (i = 0; i < max_rb_num; i++) { | |
1484 | if (!(disabled_rbs & mask)) | |
1485 | enabled_rbs |= mask; | |
1486 | mask <<= 1; | |
1487 | } | |
1488 | ||
1489 | for (i = 0; i < se_num; i++) { | |
1490 | si_select_se_sh(rdev, i, 0xffffffff); | |
1491 | data = 0; | |
1492 | for (j = 0; j < sh_per_se; j++) { | |
1493 | switch (enabled_rbs & 3) { | |
1494 | case 1: | |
1495 | data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); | |
1496 | break; | |
1497 | case 2: | |
1498 | data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); | |
1499 | break; | |
1500 | case 3: | |
1501 | default: | |
1502 | data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); | |
1503 | break; | |
1504 | } | |
1505 | enabled_rbs >>= 2; | |
1506 | } | |
1507 | WREG32(PA_SC_RASTER_CONFIG, data); | |
1508 | } | |
1509 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | |
1510 | } | |
1511 | ||
0a96d72b AD |
1512 | static void si_gpu_init(struct radeon_device *rdev) |
1513 | { | |
0a96d72b AD |
1514 | u32 gb_addr_config = 0; |
1515 | u32 mc_shared_chmap, mc_arb_ramcfg; | |
0a96d72b | 1516 | u32 sx_debug_1; |
0a96d72b AD |
1517 | u32 hdp_host_path_cntl; |
1518 | u32 tmp; | |
1519 | int i, j; | |
1520 | ||
1521 | switch (rdev->family) { | |
1522 | case CHIP_TAHITI: | |
1523 | rdev->config.si.max_shader_engines = 2; | |
0a96d72b | 1524 | rdev->config.si.max_tile_pipes = 12; |
1a8ca750 AD |
1525 | rdev->config.si.max_cu_per_sh = 8; |
1526 | rdev->config.si.max_sh_per_se = 2; | |
0a96d72b AD |
1527 | rdev->config.si.max_backends_per_se = 4; |
1528 | rdev->config.si.max_texture_channel_caches = 12; | |
1529 | rdev->config.si.max_gprs = 256; | |
1530 | rdev->config.si.max_gs_threads = 32; | |
1531 | rdev->config.si.max_hw_contexts = 8; | |
1532 | ||
1533 | rdev->config.si.sc_prim_fifo_size_frontend = 0x20; | |
1534 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | |
1535 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | |
1536 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | |
1a8ca750 | 1537 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; |
0a96d72b AD |
1538 | break; |
1539 | case CHIP_PITCAIRN: | |
1540 | rdev->config.si.max_shader_engines = 2; | |
0a96d72b | 1541 | rdev->config.si.max_tile_pipes = 8; |
1a8ca750 AD |
1542 | rdev->config.si.max_cu_per_sh = 5; |
1543 | rdev->config.si.max_sh_per_se = 2; | |
0a96d72b AD |
1544 | rdev->config.si.max_backends_per_se = 4; |
1545 | rdev->config.si.max_texture_channel_caches = 8; | |
1546 | rdev->config.si.max_gprs = 256; | |
1547 | rdev->config.si.max_gs_threads = 32; | |
1548 | rdev->config.si.max_hw_contexts = 8; | |
1549 | ||
1550 | rdev->config.si.sc_prim_fifo_size_frontend = 0x20; | |
1551 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | |
1552 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | |
1553 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | |
1a8ca750 | 1554 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; |
0a96d72b AD |
1555 | break; |
1556 | case CHIP_VERDE: | |
1557 | default: | |
1558 | rdev->config.si.max_shader_engines = 1; | |
0a96d72b | 1559 | rdev->config.si.max_tile_pipes = 4; |
1a8ca750 AD |
1560 | rdev->config.si.max_cu_per_sh = 2; |
1561 | rdev->config.si.max_sh_per_se = 2; | |
0a96d72b AD |
1562 | rdev->config.si.max_backends_per_se = 4; |
1563 | rdev->config.si.max_texture_channel_caches = 4; | |
1564 | rdev->config.si.max_gprs = 256; | |
1565 | rdev->config.si.max_gs_threads = 32; | |
1566 | rdev->config.si.max_hw_contexts = 8; | |
1567 | ||
1568 | rdev->config.si.sc_prim_fifo_size_frontend = 0x20; | |
1569 | rdev->config.si.sc_prim_fifo_size_backend = 0x40; | |
1570 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | |
1571 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | |
1a8ca750 | 1572 | gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; |
0a96d72b AD |
1573 | break; |
1574 | } | |
1575 | ||
1576 | /* Initialize HDP */ | |
1577 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | |
1578 | WREG32((0x2c14 + j), 0x00000000); | |
1579 | WREG32((0x2c18 + j), 0x00000000); | |
1580 | WREG32((0x2c1c + j), 0x00000000); | |
1581 | WREG32((0x2c20 + j), 0x00000000); | |
1582 | WREG32((0x2c24 + j), 0x00000000); | |
1583 | } | |
1584 | ||
1585 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | |
1586 | ||
1587 | evergreen_fix_pci_max_read_req_size(rdev); | |
1588 | ||
1589 | WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); | |
1590 | ||
1591 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | |
1592 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | |
1593 | ||
0a96d72b | 1594 | rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; |
0a96d72b AD |
1595 | rdev->config.si.mem_max_burst_length_bytes = 256; |
1596 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; | |
1597 | rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; | |
1598 | if (rdev->config.si.mem_row_size_in_kb > 4) | |
1599 | rdev->config.si.mem_row_size_in_kb = 4; | |
1600 | /* XXX use MC settings? */ | |
1601 | rdev->config.si.shader_engine_tile_size = 32; | |
1602 | rdev->config.si.num_gpus = 1; | |
1603 | rdev->config.si.multi_gpu_tile_size = 64; | |
1604 | ||
1a8ca750 AD |
1605 | /* fix up row size */ |
1606 | gb_addr_config &= ~ROW_SIZE_MASK; | |
0a96d72b AD |
1607 | switch (rdev->config.si.mem_row_size_in_kb) { |
1608 | case 1: | |
1609 | default: | |
1610 | gb_addr_config |= ROW_SIZE(0); | |
1611 | break; | |
1612 | case 2: | |
1613 | gb_addr_config |= ROW_SIZE(1); | |
1614 | break; | |
1615 | case 4: | |
1616 | gb_addr_config |= ROW_SIZE(2); | |
1617 | break; | |
1618 | } | |
1619 | ||
0a96d72b AD |
1620 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
1621 | * not have bank info, so create a custom tiling dword. | |
1622 | * bits 3:0 num_pipes | |
1623 | * bits 7:4 num_banks | |
1624 | * bits 11:8 group_size | |
1625 | * bits 15:12 row_size | |
1626 | */ | |
1627 | rdev->config.si.tile_config = 0; | |
1628 | switch (rdev->config.si.num_tile_pipes) { | |
1629 | case 1: | |
1630 | rdev->config.si.tile_config |= (0 << 0); | |
1631 | break; | |
1632 | case 2: | |
1633 | rdev->config.si.tile_config |= (1 << 0); | |
1634 | break; | |
1635 | case 4: | |
1636 | rdev->config.si.tile_config |= (2 << 0); | |
1637 | break; | |
1638 | case 8: | |
1639 | default: | |
1640 | /* XXX what about 12? */ | |
1641 | rdev->config.si.tile_config |= (3 << 0); | |
1642 | break; | |
dca571a6 CK |
1643 | } |
1644 | switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { | |
1645 | case 0: /* four banks */ | |
1a8ca750 | 1646 | rdev->config.si.tile_config |= 0 << 4; |
dca571a6 CK |
1647 | break; |
1648 | case 1: /* eight banks */ | |
1649 | rdev->config.si.tile_config |= 1 << 4; | |
1650 | break; | |
1651 | case 2: /* sixteen banks */ | |
1652 | default: | |
1653 | rdev->config.si.tile_config |= 2 << 4; | |
1654 | break; | |
1655 | } | |
0a96d72b AD |
1656 | rdev->config.si.tile_config |= |
1657 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; | |
1658 | rdev->config.si.tile_config |= | |
1659 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; | |
1660 | ||
0a96d72b AD |
1661 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
1662 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | |
1663 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | |
8c5fd7ef AD |
1664 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
1665 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); | |
0a96d72b | 1666 | |
1a8ca750 | 1667 | si_tiling_mode_table_init(rdev); |
0a96d72b | 1668 | |
1a8ca750 AD |
1669 | si_setup_rb(rdev, rdev->config.si.max_shader_engines, |
1670 | rdev->config.si.max_sh_per_se, | |
1671 | rdev->config.si.max_backends_per_se); | |
0a96d72b | 1672 | |
1a8ca750 AD |
1673 | si_setup_spi(rdev, rdev->config.si.max_shader_engines, |
1674 | rdev->config.si.max_sh_per_se, | |
1675 | rdev->config.si.max_cu_per_sh); | |
0a96d72b | 1676 | |
0a96d72b AD |
1677 | |
1678 | /* set HW defaults for 3D engine */ | |
1679 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | | |
1680 | ROQ_IB2_START(0x2b))); | |
1681 | WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); | |
1682 | ||
1683 | sx_debug_1 = RREG32(SX_DEBUG_1); | |
1684 | WREG32(SX_DEBUG_1, sx_debug_1); | |
1685 | ||
1686 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); | |
1687 | ||
1688 | WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) | | |
1689 | SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) | | |
1690 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) | | |
1691 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size))); | |
1692 | ||
1693 | WREG32(VGT_NUM_INSTANCES, 1); | |
1694 | ||
1695 | WREG32(CP_PERFMON_CNTL, 0); | |
1696 | ||
1697 | WREG32(SQ_CONFIG, 0); | |
1698 | ||
1699 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | | |
1700 | FORCE_EOV_MAX_REZ_CNT(255))); | |
1701 | ||
1702 | WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | | |
1703 | AUTO_INVLD_EN(ES_AND_GS_AUTO)); | |
1704 | ||
1705 | WREG32(VGT_GS_VERTEX_REUSE, 16); | |
1706 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | |
1707 | ||
1708 | WREG32(CB_PERFCOUNTER0_SELECT0, 0); | |
1709 | WREG32(CB_PERFCOUNTER0_SELECT1, 0); | |
1710 | WREG32(CB_PERFCOUNTER1_SELECT0, 0); | |
1711 | WREG32(CB_PERFCOUNTER1_SELECT1, 0); | |
1712 | WREG32(CB_PERFCOUNTER2_SELECT0, 0); | |
1713 | WREG32(CB_PERFCOUNTER2_SELECT1, 0); | |
1714 | WREG32(CB_PERFCOUNTER3_SELECT0, 0); | |
1715 | WREG32(CB_PERFCOUNTER3_SELECT1, 0); | |
1716 | ||
1717 | tmp = RREG32(HDP_MISC_CNTL); | |
1718 | tmp |= HDP_FLUSH_INVALIDATE_CACHE; | |
1719 | WREG32(HDP_MISC_CNTL, tmp); | |
1720 | ||
1721 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); | |
1722 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | |
1723 | ||
1724 | WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); | |
1725 | ||
1726 | udelay(50); | |
1727 | } | |
c476dde2 | 1728 | |
2ece2e8b AD |
1729 | /* |
1730 | * GPU scratch registers helpers function. | |
1731 | */ | |
1732 | static void si_scratch_init(struct radeon_device *rdev) | |
1733 | { | |
1734 | int i; | |
1735 | ||
1736 | rdev->scratch.num_reg = 7; | |
1737 | rdev->scratch.reg_base = SCRATCH_REG0; | |
1738 | for (i = 0; i < rdev->scratch.num_reg; i++) { | |
1739 | rdev->scratch.free[i] = true; | |
1740 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); | |
1741 | } | |
1742 | } | |
1743 | ||
1744 | void si_fence_ring_emit(struct radeon_device *rdev, | |
1745 | struct radeon_fence *fence) | |
1746 | { | |
1747 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | |
1748 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | |
1749 | ||
1750 | /* flush read cache over gart */ | |
1751 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | |
1752 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); | |
1753 | radeon_ring_write(ring, 0); | |
1754 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | |
1755 | radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | | |
1756 | PACKET3_TC_ACTION_ENA | | |
1757 | PACKET3_SH_KCACHE_ACTION_ENA | | |
1758 | PACKET3_SH_ICACHE_ACTION_ENA); | |
1759 | radeon_ring_write(ring, 0xFFFFFFFF); | |
1760 | radeon_ring_write(ring, 0); | |
1761 | radeon_ring_write(ring, 10); /* poll interval */ | |
1762 | /* EVENT_WRITE_EOP - flush caches, send int */ | |
1763 | radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | |
1764 | radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); | |
1765 | radeon_ring_write(ring, addr & 0xffffffff); | |
1766 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); | |
1767 | radeon_ring_write(ring, fence->seq); | |
1768 | radeon_ring_write(ring, 0); | |
1769 | } | |
1770 | ||
1771 | /* | |
1772 | * IB stuff | |
1773 | */ | |
1774 | void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |
1775 | { | |
876dc9f3 | 1776 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
2ece2e8b AD |
1777 | u32 header; |
1778 | ||
a85a7da4 AD |
1779 | if (ib->is_const_ib) { |
1780 | /* set switch buffer packet before const IB */ | |
1781 | radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | |
1782 | radeon_ring_write(ring, 0); | |
45df6803 | 1783 | |
2ece2e8b | 1784 | header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); |
a85a7da4 | 1785 | } else { |
89d35807 | 1786 | u32 next_rptr; |
a85a7da4 | 1787 | if (ring->rptr_save_reg) { |
89d35807 | 1788 | next_rptr = ring->wptr + 3 + 4 + 8; |
a85a7da4 AD |
1789 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
1790 | radeon_ring_write(ring, ((ring->rptr_save_reg - | |
1791 | PACKET3_SET_CONFIG_REG_START) >> 2)); | |
1792 | radeon_ring_write(ring, next_rptr); | |
89d35807 AD |
1793 | } else if (rdev->wb.enabled) { |
1794 | next_rptr = ring->wptr + 5 + 4 + 8; | |
1795 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
1796 | radeon_ring_write(ring, (1 << 8)); | |
1797 | radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | |
1798 | radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | |
1799 | radeon_ring_write(ring, next_rptr); | |
a85a7da4 AD |
1800 | } |
1801 | ||
2ece2e8b | 1802 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); |
a85a7da4 | 1803 | } |
2ece2e8b AD |
1804 | |
1805 | radeon_ring_write(ring, header); | |
1806 | radeon_ring_write(ring, | |
1807 | #ifdef __BIG_ENDIAN | |
1808 | (2 << 0) | | |
1809 | #endif | |
1810 | (ib->gpu_addr & 0xFFFFFFFC)); | |
1811 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); | |
4bf3dd92 CK |
1812 | radeon_ring_write(ring, ib->length_dw | |
1813 | (ib->vm ? (ib->vm->id << 24) : 0)); | |
2ece2e8b | 1814 | |
a85a7da4 AD |
1815 | if (!ib->is_const_ib) { |
1816 | /* flush read cache over gart for this vmid */ | |
1817 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | |
1818 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); | |
4bf3dd92 | 1819 | radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); |
a85a7da4 AD |
1820 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
1821 | radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | | |
1822 | PACKET3_TC_ACTION_ENA | | |
1823 | PACKET3_SH_KCACHE_ACTION_ENA | | |
1824 | PACKET3_SH_ICACHE_ACTION_ENA); | |
1825 | radeon_ring_write(ring, 0xFFFFFFFF); | |
1826 | radeon_ring_write(ring, 0); | |
1827 | radeon_ring_write(ring, 10); /* poll interval */ | |
1828 | } | |
2ece2e8b AD |
1829 | } |
1830 | ||
48c0c902 AD |
1831 | /* |
1832 | * CP. | |
1833 | */ | |
1834 | static void si_cp_enable(struct radeon_device *rdev, bool enable) | |
1835 | { | |
1836 | if (enable) | |
1837 | WREG32(CP_ME_CNTL, 0); | |
1838 | else { | |
1839 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | |
1840 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); | |
1841 | WREG32(SCRATCH_UMSK, 0); | |
8c5fd7ef AD |
1842 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1843 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | |
1844 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | |
48c0c902 AD |
1845 | } |
1846 | udelay(50); | |
1847 | } | |
1848 | ||
1849 | static int si_cp_load_microcode(struct radeon_device *rdev) | |
1850 | { | |
1851 | const __be32 *fw_data; | |
1852 | int i; | |
1853 | ||
1854 | if (!rdev->me_fw || !rdev->pfp_fw) | |
1855 | return -EINVAL; | |
1856 | ||
1857 | si_cp_enable(rdev, false); | |
1858 | ||
1859 | /* PFP */ | |
1860 | fw_data = (const __be32 *)rdev->pfp_fw->data; | |
1861 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
1862 | for (i = 0; i < SI_PFP_UCODE_SIZE; i++) | |
1863 | WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); | |
1864 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
1865 | ||
1866 | /* CE */ | |
1867 | fw_data = (const __be32 *)rdev->ce_fw->data; | |
1868 | WREG32(CP_CE_UCODE_ADDR, 0); | |
1869 | for (i = 0; i < SI_CE_UCODE_SIZE; i++) | |
1870 | WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); | |
1871 | WREG32(CP_CE_UCODE_ADDR, 0); | |
1872 | ||
1873 | /* ME */ | |
1874 | fw_data = (const __be32 *)rdev->me_fw->data; | |
1875 | WREG32(CP_ME_RAM_WADDR, 0); | |
1876 | for (i = 0; i < SI_PM4_UCODE_SIZE; i++) | |
1877 | WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); | |
1878 | WREG32(CP_ME_RAM_WADDR, 0); | |
1879 | ||
1880 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
1881 | WREG32(CP_CE_UCODE_ADDR, 0); | |
1882 | WREG32(CP_ME_RAM_WADDR, 0); | |
1883 | WREG32(CP_ME_RAM_RADDR, 0); | |
1884 | return 0; | |
1885 | } | |
1886 | ||
1887 | static int si_cp_start(struct radeon_device *rdev) | |
1888 | { | |
1889 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
1890 | int r, i; | |
1891 | ||
1892 | r = radeon_ring_lock(rdev, ring, 7 + 4); | |
1893 | if (r) { | |
1894 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | |
1895 | return r; | |
1896 | } | |
1897 | /* init the CP */ | |
1898 | radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); | |
1899 | radeon_ring_write(ring, 0x1); | |
1900 | radeon_ring_write(ring, 0x0); | |
1901 | radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1); | |
1902 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | |
1903 | radeon_ring_write(ring, 0); | |
1904 | radeon_ring_write(ring, 0); | |
1905 | ||
1906 | /* init the CE partitions */ | |
1907 | radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); | |
1908 | radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); | |
1909 | radeon_ring_write(ring, 0xc000); | |
1910 | radeon_ring_write(ring, 0xe000); | |
1911 | radeon_ring_unlock_commit(rdev, ring); | |
1912 | ||
1913 | si_cp_enable(rdev, true); | |
1914 | ||
1915 | r = radeon_ring_lock(rdev, ring, si_default_size + 10); | |
1916 | if (r) { | |
1917 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | |
1918 | return r; | |
1919 | } | |
1920 | ||
1921 | /* setup clear context state */ | |
1922 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
1923 | radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | |
1924 | ||
1925 | for (i = 0; i < si_default_size; i++) | |
1926 | radeon_ring_write(ring, si_default_state[i]); | |
1927 | ||
1928 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
1929 | radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); | |
1930 | ||
1931 | /* set clear context state */ | |
1932 | radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); | |
1933 | radeon_ring_write(ring, 0); | |
1934 | ||
1935 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | |
1936 | radeon_ring_write(ring, 0x00000316); | |
1937 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | |
1938 | radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ | |
1939 | ||
1940 | radeon_ring_unlock_commit(rdev, ring); | |
1941 | ||
1942 | for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { | |
1943 | ring = &rdev->ring[i]; | |
1944 | r = radeon_ring_lock(rdev, ring, 2); | |
1945 | ||
1946 | /* clear the compute context state */ | |
1947 | radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); | |
1948 | radeon_ring_write(ring, 0); | |
1949 | ||
1950 | radeon_ring_unlock_commit(rdev, ring); | |
1951 | } | |
1952 | ||
1953 | return 0; | |
1954 | } | |
1955 | ||
1956 | static void si_cp_fini(struct radeon_device *rdev) | |
1957 | { | |
45df6803 | 1958 | struct radeon_ring *ring; |
48c0c902 | 1959 | si_cp_enable(rdev, false); |
45df6803 CK |
1960 | |
1961 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
1962 | radeon_ring_fini(rdev, ring); | |
1963 | radeon_scratch_free(rdev, ring->rptr_save_reg); | |
1964 | ||
1965 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | |
1966 | radeon_ring_fini(rdev, ring); | |
1967 | radeon_scratch_free(rdev, ring->rptr_save_reg); | |
1968 | ||
1969 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | |
1970 | radeon_ring_fini(rdev, ring); | |
1971 | radeon_scratch_free(rdev, ring->rptr_save_reg); | |
48c0c902 AD |
1972 | } |
1973 | ||
1974 | static int si_cp_resume(struct radeon_device *rdev) | |
1975 | { | |
1976 | struct radeon_ring *ring; | |
1977 | u32 tmp; | |
1978 | u32 rb_bufsz; | |
1979 | int r; | |
1980 | ||
1981 | /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ | |
1982 | WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | | |
1983 | SOFT_RESET_PA | | |
1984 | SOFT_RESET_VGT | | |
1985 | SOFT_RESET_SPI | | |
1986 | SOFT_RESET_SX)); | |
1987 | RREG32(GRBM_SOFT_RESET); | |
1988 | mdelay(15); | |
1989 | WREG32(GRBM_SOFT_RESET, 0); | |
1990 | RREG32(GRBM_SOFT_RESET); | |
1991 | ||
1992 | WREG32(CP_SEM_WAIT_TIMER, 0x0); | |
1993 | WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); | |
1994 | ||
1995 | /* Set the write pointer delay */ | |
1996 | WREG32(CP_RB_WPTR_DELAY, 0); | |
1997 | ||
1998 | WREG32(CP_DEBUG, 0); | |
1999 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | |
2000 | ||
2001 | /* ring 0 - compute and gfx */ | |
2002 | /* Set ring buffer size */ | |
2003 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
2004 | rb_bufsz = drm_order(ring->ring_size / 8); | |
2005 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | |
2006 | #ifdef __BIG_ENDIAN | |
2007 | tmp |= BUF_SWAP_32BIT; | |
2008 | #endif | |
2009 | WREG32(CP_RB0_CNTL, tmp); | |
2010 | ||
2011 | /* Initialize the ring buffer's read and write pointers */ | |
2012 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); | |
2013 | ring->wptr = 0; | |
2014 | WREG32(CP_RB0_WPTR, ring->wptr); | |
2015 | ||
48fc7f7e | 2016 | /* set the wb address whether it's enabled or not */ |
48c0c902 AD |
2017 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); |
2018 | WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | |
2019 | ||
2020 | if (rdev->wb.enabled) | |
2021 | WREG32(SCRATCH_UMSK, 0xff); | |
2022 | else { | |
2023 | tmp |= RB_NO_UPDATE; | |
2024 | WREG32(SCRATCH_UMSK, 0); | |
2025 | } | |
2026 | ||
2027 | mdelay(1); | |
2028 | WREG32(CP_RB0_CNTL, tmp); | |
2029 | ||
2030 | WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); | |
2031 | ||
2032 | ring->rptr = RREG32(CP_RB0_RPTR); | |
2033 | ||
2034 | /* ring1 - compute only */ | |
2035 | /* Set ring buffer size */ | |
2036 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | |
2037 | rb_bufsz = drm_order(ring->ring_size / 8); | |
2038 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | |
2039 | #ifdef __BIG_ENDIAN | |
2040 | tmp |= BUF_SWAP_32BIT; | |
2041 | #endif | |
2042 | WREG32(CP_RB1_CNTL, tmp); | |
2043 | ||
2044 | /* Initialize the ring buffer's read and write pointers */ | |
2045 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); | |
2046 | ring->wptr = 0; | |
2047 | WREG32(CP_RB1_WPTR, ring->wptr); | |
2048 | ||
48fc7f7e | 2049 | /* set the wb address whether it's enabled or not */ |
48c0c902 AD |
2050 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); |
2051 | WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); | |
2052 | ||
2053 | mdelay(1); | |
2054 | WREG32(CP_RB1_CNTL, tmp); | |
2055 | ||
2056 | WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); | |
2057 | ||
2058 | ring->rptr = RREG32(CP_RB1_RPTR); | |
2059 | ||
2060 | /* ring2 - compute only */ | |
2061 | /* Set ring buffer size */ | |
2062 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | |
2063 | rb_bufsz = drm_order(ring->ring_size / 8); | |
2064 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | |
2065 | #ifdef __BIG_ENDIAN | |
2066 | tmp |= BUF_SWAP_32BIT; | |
2067 | #endif | |
2068 | WREG32(CP_RB2_CNTL, tmp); | |
2069 | ||
2070 | /* Initialize the ring buffer's read and write pointers */ | |
2071 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); | |
2072 | ring->wptr = 0; | |
2073 | WREG32(CP_RB2_WPTR, ring->wptr); | |
2074 | ||
48fc7f7e | 2075 | /* set the wb address whether it's enabled or not */ |
48c0c902 AD |
2076 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); |
2077 | WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); | |
2078 | ||
2079 | mdelay(1); | |
2080 | WREG32(CP_RB2_CNTL, tmp); | |
2081 | ||
2082 | WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); | |
2083 | ||
2084 | ring->rptr = RREG32(CP_RB2_RPTR); | |
2085 | ||
2086 | /* start the rings */ | |
2087 | si_cp_start(rdev); | |
2088 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; | |
2089 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true; | |
2090 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true; | |
2091 | r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); | |
2092 | if (r) { | |
2093 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | |
2094 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | |
2095 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | |
2096 | return r; | |
2097 | } | |
2098 | r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); | |
2099 | if (r) { | |
2100 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | |
2101 | } | |
2102 | r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); | |
2103 | if (r) { | |
2104 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | |
2105 | } | |
2106 | ||
2107 | return 0; | |
2108 | } | |
2109 | ||
c476dde2 AD |
2110 | bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
2111 | { | |
2112 | u32 srbm_status; | |
2113 | u32 grbm_status, grbm_status2; | |
2114 | u32 grbm_status_se0, grbm_status_se1; | |
c476dde2 AD |
2115 | |
2116 | srbm_status = RREG32(SRBM_STATUS); | |
2117 | grbm_status = RREG32(GRBM_STATUS); | |
2118 | grbm_status2 = RREG32(GRBM_STATUS2); | |
2119 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); | |
2120 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); | |
2121 | if (!(grbm_status & GUI_ACTIVE)) { | |
069211e5 | 2122 | radeon_ring_lockup_update(ring); |
c476dde2 AD |
2123 | return false; |
2124 | } | |
2125 | /* force CP activities */ | |
7b9ef16b | 2126 | radeon_ring_force_activity(rdev, ring); |
069211e5 | 2127 | return radeon_ring_test_lockup(rdev, ring); |
c476dde2 AD |
2128 | } |
2129 | ||
06bc6df0 AD |
2130 | static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) |
2131 | { | |
2132 | struct evergreen_mc_save save; | |
1c534671 AD |
2133 | u32 grbm_soft_reset = 0, srbm_soft_reset = 0; |
2134 | u32 tmp; | |
2135 | int ret = 0; | |
06bc6df0 | 2136 | |
19fc42ed | 2137 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) |
1c534671 | 2138 | reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP); |
19fc42ed AD |
2139 | |
2140 | if (RREG32(DMA_STATUS_REG) & DMA_IDLE) | |
2141 | reset_mask &= ~RADEON_RESET_DMA; | |
2142 | ||
06bc6df0 AD |
2143 | if (reset_mask == 0) |
2144 | return 0; | |
2145 | ||
2146 | dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); | |
2147 | ||
1c534671 | 2148 | evergreen_print_gpu_status_regs(rdev); |
06bc6df0 AD |
2149 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
2150 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); | |
2151 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | |
2152 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | |
2153 | ||
410a3418 AD |
2154 | r600_set_bios_scratch_engine_hung(rdev, true); |
2155 | ||
06bc6df0 | 2156 | evergreen_mc_stop(rdev, &save); |
1c534671 | 2157 | if (evergreen_mc_wait_for_idle(rdev)) { |
06bc6df0 AD |
2158 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
2159 | } | |
2160 | ||
1c534671 AD |
2161 | /* Disable CP parsing/prefetching */ |
2162 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); | |
2163 | ||
2164 | if (reset_mask & RADEON_RESET_DMA) { | |
2165 | /* dma0 */ | |
2166 | tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); | |
2167 | tmp &= ~DMA_RB_ENABLE; | |
2168 | WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); | |
2169 | ||
2170 | /* dma1 */ | |
2171 | tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); | |
2172 | tmp &= ~DMA_RB_ENABLE; | |
2173 | WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); | |
2174 | } | |
2175 | ||
2176 | if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) { | |
2177 | grbm_soft_reset = SOFT_RESET_CB | | |
2178 | SOFT_RESET_DB | | |
2179 | SOFT_RESET_GDS | | |
2180 | SOFT_RESET_PA | | |
2181 | SOFT_RESET_SC | | |
2182 | SOFT_RESET_BCI | | |
2183 | SOFT_RESET_SPI | | |
2184 | SOFT_RESET_SX | | |
2185 | SOFT_RESET_TC | | |
2186 | SOFT_RESET_TA | | |
2187 | SOFT_RESET_VGT | | |
2188 | SOFT_RESET_IA; | |
2189 | } | |
2190 | ||
2191 | if (reset_mask & RADEON_RESET_CP) { | |
2192 | grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT; | |
2193 | ||
2194 | srbm_soft_reset |= SOFT_RESET_GRBM; | |
2195 | } | |
06bc6df0 AD |
2196 | |
2197 | if (reset_mask & RADEON_RESET_DMA) | |
1c534671 AD |
2198 | srbm_soft_reset |= SOFT_RESET_DMA | SOFT_RESET_DMA1; |
2199 | ||
2200 | if (grbm_soft_reset) { | |
2201 | tmp = RREG32(GRBM_SOFT_RESET); | |
2202 | tmp |= grbm_soft_reset; | |
2203 | dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); | |
2204 | WREG32(GRBM_SOFT_RESET, tmp); | |
2205 | tmp = RREG32(GRBM_SOFT_RESET); | |
2206 | ||
2207 | udelay(50); | |
2208 | ||
2209 | tmp &= ~grbm_soft_reset; | |
2210 | WREG32(GRBM_SOFT_RESET, tmp); | |
2211 | tmp = RREG32(GRBM_SOFT_RESET); | |
2212 | } | |
2213 | ||
2214 | if (srbm_soft_reset) { | |
2215 | tmp = RREG32(SRBM_SOFT_RESET); | |
2216 | tmp |= srbm_soft_reset; | |
2217 | dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | |
2218 | WREG32(SRBM_SOFT_RESET, tmp); | |
2219 | tmp = RREG32(SRBM_SOFT_RESET); | |
2220 | ||
2221 | udelay(50); | |
2222 | ||
2223 | tmp &= ~srbm_soft_reset; | |
2224 | WREG32(SRBM_SOFT_RESET, tmp); | |
2225 | tmp = RREG32(SRBM_SOFT_RESET); | |
2226 | } | |
06bc6df0 AD |
2227 | |
2228 | /* Wait a little for things to settle down */ | |
2229 | udelay(50); | |
2230 | ||
c476dde2 | 2231 | evergreen_mc_resume(rdev, &save); |
1c534671 AD |
2232 | udelay(50); |
2233 | ||
2234 | #if 0 | |
2235 | if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) { | |
2236 | if (RREG32(GRBM_STATUS) & GUI_ACTIVE) | |
2237 | ret = -EAGAIN; | |
2238 | } | |
2239 | ||
2240 | if (reset_mask & RADEON_RESET_DMA) { | |
2241 | if (!(RREG32(DMA_STATUS_REG) & DMA_IDLE)) | |
2242 | ret = -EAGAIN; | |
2243 | } | |
2244 | #endif | |
2245 | ||
2246 | if (!ret) | |
2247 | r600_set_bios_scratch_engine_hung(rdev, false); | |
410a3418 | 2248 | |
1c534671 | 2249 | evergreen_print_gpu_status_regs(rdev); |
410a3418 | 2250 | |
c476dde2 AD |
2251 | return 0; |
2252 | } | |
2253 | ||
2254 | int si_asic_reset(struct radeon_device *rdev) | |
2255 | { | |
06bc6df0 AD |
2256 | return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX | |
2257 | RADEON_RESET_COMPUTE | | |
1c534671 AD |
2258 | RADEON_RESET_DMA | |
2259 | RADEON_RESET_CP)); | |
c476dde2 AD |
2260 | } |
2261 | ||
d2800ee5 AD |
2262 | /* MC */ |
2263 | static void si_mc_program(struct radeon_device *rdev) | |
2264 | { | |
2265 | struct evergreen_mc_save save; | |
2266 | u32 tmp; | |
2267 | int i, j; | |
2268 | ||
2269 | /* Initialize HDP */ | |
2270 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | |
2271 | WREG32((0x2c14 + j), 0x00000000); | |
2272 | WREG32((0x2c18 + j), 0x00000000); | |
2273 | WREG32((0x2c1c + j), 0x00000000); | |
2274 | WREG32((0x2c20 + j), 0x00000000); | |
2275 | WREG32((0x2c24 + j), 0x00000000); | |
2276 | } | |
2277 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | |
2278 | ||
2279 | evergreen_mc_stop(rdev, &save); | |
2280 | if (radeon_mc_wait_for_idle(rdev)) { | |
2281 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
2282 | } | |
2283 | /* Lockout access through VGA aperture*/ | |
2284 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | |
2285 | /* Update configuration */ | |
2286 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
2287 | rdev->mc.vram_start >> 12); | |
2288 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | |
2289 | rdev->mc.vram_end >> 12); | |
2290 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, | |
2291 | rdev->vram_scratch.gpu_addr >> 12); | |
2292 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; | |
2293 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | |
2294 | WREG32(MC_VM_FB_LOCATION, tmp); | |
2295 | /* XXX double check these! */ | |
2296 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | |
2297 | WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); | |
2298 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); | |
2299 | WREG32(MC_VM_AGP_BASE, 0); | |
2300 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | |
2301 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | |
2302 | if (radeon_mc_wait_for_idle(rdev)) { | |
2303 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
2304 | } | |
2305 | evergreen_mc_resume(rdev, &save); | |
2306 | /* we need to own VRAM, so turn off the VGA renderer here | |
2307 | * to stop it overwriting our objects */ | |
2308 | rv515_vga_render_disable(rdev); | |
2309 | } | |
2310 | ||
2311 | /* SI MC address space is 40 bits */ | |
2312 | static void si_vram_location(struct radeon_device *rdev, | |
2313 | struct radeon_mc *mc, u64 base) | |
2314 | { | |
2315 | mc->vram_start = base; | |
2316 | if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) { | |
2317 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); | |
2318 | mc->real_vram_size = mc->aper_size; | |
2319 | mc->mc_vram_size = mc->aper_size; | |
2320 | } | |
2321 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | |
2322 | dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", | |
2323 | mc->mc_vram_size >> 20, mc->vram_start, | |
2324 | mc->vram_end, mc->real_vram_size >> 20); | |
2325 | } | |
2326 | ||
2327 | static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |
2328 | { | |
2329 | u64 size_af, size_bf; | |
2330 | ||
2331 | size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; | |
2332 | size_bf = mc->vram_start & ~mc->gtt_base_align; | |
2333 | if (size_bf > size_af) { | |
2334 | if (mc->gtt_size > size_bf) { | |
2335 | dev_warn(rdev->dev, "limiting GTT\n"); | |
2336 | mc->gtt_size = size_bf; | |
2337 | } | |
2338 | mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; | |
2339 | } else { | |
2340 | if (mc->gtt_size > size_af) { | |
2341 | dev_warn(rdev->dev, "limiting GTT\n"); | |
2342 | mc->gtt_size = size_af; | |
2343 | } | |
2344 | mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; | |
2345 | } | |
2346 | mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; | |
2347 | dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", | |
2348 | mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); | |
2349 | } | |
2350 | ||
2351 | static void si_vram_gtt_location(struct radeon_device *rdev, | |
2352 | struct radeon_mc *mc) | |
2353 | { | |
2354 | if (mc->mc_vram_size > 0xFFC0000000ULL) { | |
2355 | /* leave room for at least 1024M GTT */ | |
2356 | dev_warn(rdev->dev, "limiting VRAM\n"); | |
2357 | mc->real_vram_size = 0xFFC0000000ULL; | |
2358 | mc->mc_vram_size = 0xFFC0000000ULL; | |
2359 | } | |
2360 | si_vram_location(rdev, &rdev->mc, 0); | |
2361 | rdev->mc.gtt_base_align = 0; | |
2362 | si_gtt_location(rdev, mc); | |
2363 | } | |
2364 | ||
2365 | static int si_mc_init(struct radeon_device *rdev) | |
2366 | { | |
2367 | u32 tmp; | |
2368 | int chansize, numchan; | |
2369 | ||
2370 | /* Get VRAM informations */ | |
2371 | rdev->mc.vram_is_ddr = true; | |
2372 | tmp = RREG32(MC_ARB_RAMCFG); | |
2373 | if (tmp & CHANSIZE_OVERRIDE) { | |
2374 | chansize = 16; | |
2375 | } else if (tmp & CHANSIZE_MASK) { | |
2376 | chansize = 64; | |
2377 | } else { | |
2378 | chansize = 32; | |
2379 | } | |
2380 | tmp = RREG32(MC_SHARED_CHMAP); | |
2381 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | |
2382 | case 0: | |
2383 | default: | |
2384 | numchan = 1; | |
2385 | break; | |
2386 | case 1: | |
2387 | numchan = 2; | |
2388 | break; | |
2389 | case 2: | |
2390 | numchan = 4; | |
2391 | break; | |
2392 | case 3: | |
2393 | numchan = 8; | |
2394 | break; | |
2395 | case 4: | |
2396 | numchan = 3; | |
2397 | break; | |
2398 | case 5: | |
2399 | numchan = 6; | |
2400 | break; | |
2401 | case 6: | |
2402 | numchan = 10; | |
2403 | break; | |
2404 | case 7: | |
2405 | numchan = 12; | |
2406 | break; | |
2407 | case 8: | |
2408 | numchan = 16; | |
2409 | break; | |
2410 | } | |
2411 | rdev->mc.vram_width = numchan * chansize; | |
2412 | /* Could aper size report 0 ? */ | |
2413 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | |
2414 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | |
2415 | /* size in MB on si */ | |
2416 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | |
2417 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | |
2418 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | |
2419 | si_vram_gtt_location(rdev, &rdev->mc); | |
2420 | radeon_update_bandwidth_info(rdev); | |
2421 | ||
2422 | return 0; | |
2423 | } | |
2424 | ||
2425 | /* | |
2426 | * GART | |
2427 | */ | |
2428 | void si_pcie_gart_tlb_flush(struct radeon_device *rdev) | |
2429 | { | |
2430 | /* flush hdp cache */ | |
2431 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | |
2432 | ||
2433 | /* bits 0-15 are the VM contexts0-15 */ | |
2434 | WREG32(VM_INVALIDATE_REQUEST, 1); | |
2435 | } | |
2436 | ||
1109ca09 | 2437 | static int si_pcie_gart_enable(struct radeon_device *rdev) |
d2800ee5 AD |
2438 | { |
2439 | int r, i; | |
2440 | ||
2441 | if (rdev->gart.robj == NULL) { | |
2442 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | |
2443 | return -EINVAL; | |
2444 | } | |
2445 | r = radeon_gart_table_vram_pin(rdev); | |
2446 | if (r) | |
2447 | return r; | |
2448 | radeon_gart_restore(rdev); | |
2449 | /* Setup TLB control */ | |
2450 | WREG32(MC_VM_MX_L1_TLB_CNTL, | |
2451 | (0xA << 7) | | |
2452 | ENABLE_L1_TLB | | |
2453 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | |
2454 | ENABLE_ADVANCED_DRIVER_MODEL | | |
2455 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); | |
2456 | /* Setup L2 cache */ | |
2457 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | | |
2458 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | |
2459 | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | | |
2460 | EFFECTIVE_L2_QUEUE_SIZE(7) | | |
2461 | CONTEXT1_IDENTITY_ACCESS_MODE(1)); | |
2462 | WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); | |
2463 | WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | | |
2464 | L2_CACHE_BIGK_FRAGMENT_SIZE(0)); | |
2465 | /* setup context0 */ | |
2466 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | |
2467 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); | |
2468 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | |
2469 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | |
2470 | (u32)(rdev->dummy_page.addr >> 12)); | |
2471 | WREG32(VM_CONTEXT0_CNTL2, 0); | |
2472 | WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | |
2473 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT)); | |
2474 | ||
2475 | WREG32(0x15D4, 0); | |
2476 | WREG32(0x15D8, 0); | |
2477 | WREG32(0x15DC, 0); | |
2478 | ||
2479 | /* empty context1-15 */ | |
d2800ee5 AD |
2480 | /* set vm size, must be a multiple of 4 */ |
2481 | WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); | |
c21b328e | 2482 | WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); |
23d4f1f2 AD |
2483 | /* Assign the pt base to something valid for now; the pts used for |
2484 | * the VMs are determined by the application and setup and assigned | |
2485 | * on the fly in the vm part of radeon_gart.c | |
2486 | */ | |
d2800ee5 AD |
2487 | for (i = 1; i < 16; i++) { |
2488 | if (i < 8) | |
2489 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), | |
2490 | rdev->gart.table_addr >> 12); | |
2491 | else | |
2492 | WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), | |
2493 | rdev->gart.table_addr >> 12); | |
2494 | } | |
2495 | ||
2496 | /* enable context1-15 */ | |
2497 | WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | |
2498 | (u32)(rdev->dummy_page.addr >> 12)); | |
ae133a11 | 2499 | WREG32(VM_CONTEXT1_CNTL2, 4); |
fa87e62d | 2500 | WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | |
ae133a11 CK |
2501 | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
2502 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | | |
2503 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | | |
2504 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | | |
2505 | PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | | |
2506 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | | |
2507 | VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | | |
2508 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT | | |
2509 | READ_PROTECTION_FAULT_ENABLE_INTERRUPT | | |
2510 | READ_PROTECTION_FAULT_ENABLE_DEFAULT | | |
2511 | WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | | |
2512 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); | |
d2800ee5 AD |
2513 | |
2514 | si_pcie_gart_tlb_flush(rdev); | |
2515 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | |
2516 | (unsigned)(rdev->mc.gtt_size >> 20), | |
2517 | (unsigned long long)rdev->gart.table_addr); | |
2518 | rdev->gart.ready = true; | |
2519 | return 0; | |
2520 | } | |
2521 | ||
1109ca09 | 2522 | static void si_pcie_gart_disable(struct radeon_device *rdev) |
d2800ee5 AD |
2523 | { |
2524 | /* Disable all tables */ | |
2525 | WREG32(VM_CONTEXT0_CNTL, 0); | |
2526 | WREG32(VM_CONTEXT1_CNTL, 0); | |
2527 | /* Setup TLB control */ | |
2528 | WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS | | |
2529 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); | |
2530 | /* Setup L2 cache */ | |
2531 | WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | |
2532 | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | | |
2533 | EFFECTIVE_L2_QUEUE_SIZE(7) | | |
2534 | CONTEXT1_IDENTITY_ACCESS_MODE(1)); | |
2535 | WREG32(VM_L2_CNTL2, 0); | |
2536 | WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | | |
2537 | L2_CACHE_BIGK_FRAGMENT_SIZE(0)); | |
2538 | radeon_gart_table_vram_unpin(rdev); | |
2539 | } | |
2540 | ||
1109ca09 | 2541 | static void si_pcie_gart_fini(struct radeon_device *rdev) |
d2800ee5 AD |
2542 | { |
2543 | si_pcie_gart_disable(rdev); | |
2544 | radeon_gart_table_vram_free(rdev); | |
2545 | radeon_gart_fini(rdev); | |
2546 | } | |
2547 | ||
498dd8b3 AD |
2548 | /* vm parser */ |
2549 | static bool si_vm_reg_valid(u32 reg) | |
2550 | { | |
2551 | /* context regs are fine */ | |
2552 | if (reg >= 0x28000) | |
2553 | return true; | |
2554 | ||
2555 | /* check config regs */ | |
2556 | switch (reg) { | |
2557 | case GRBM_GFX_INDEX: | |
f418b88a | 2558 | case CP_STRMOUT_CNTL: |
498dd8b3 AD |
2559 | case VGT_VTX_VECT_EJECT_REG: |
2560 | case VGT_CACHE_INVALIDATION: | |
2561 | case VGT_ESGS_RING_SIZE: | |
2562 | case VGT_GSVS_RING_SIZE: | |
2563 | case VGT_GS_VERTEX_REUSE: | |
2564 | case VGT_PRIMITIVE_TYPE: | |
2565 | case VGT_INDEX_TYPE: | |
2566 | case VGT_NUM_INDICES: | |
2567 | case VGT_NUM_INSTANCES: | |
2568 | case VGT_TF_RING_SIZE: | |
2569 | case VGT_HS_OFFCHIP_PARAM: | |
2570 | case VGT_TF_MEMORY_BASE: | |
2571 | case PA_CL_ENHANCE: | |
2572 | case PA_SU_LINE_STIPPLE_VALUE: | |
2573 | case PA_SC_LINE_STIPPLE_STATE: | |
2574 | case PA_SC_ENHANCE: | |
2575 | case SQC_CACHES: | |
2576 | case SPI_STATIC_THREAD_MGMT_1: | |
2577 | case SPI_STATIC_THREAD_MGMT_2: | |
2578 | case SPI_STATIC_THREAD_MGMT_3: | |
2579 | case SPI_PS_MAX_WAVE_ID: | |
2580 | case SPI_CONFIG_CNTL: | |
2581 | case SPI_CONFIG_CNTL_1: | |
2582 | case TA_CNTL_AUX: | |
2583 | return true; | |
2584 | default: | |
2585 | DRM_ERROR("Invalid register 0x%x in CS\n", reg); | |
2586 | return false; | |
2587 | } | |
2588 | } | |
2589 | ||
2590 | static int si_vm_packet3_ce_check(struct radeon_device *rdev, | |
2591 | u32 *ib, struct radeon_cs_packet *pkt) | |
2592 | { | |
2593 | switch (pkt->opcode) { | |
2594 | case PACKET3_NOP: | |
2595 | case PACKET3_SET_BASE: | |
2596 | case PACKET3_SET_CE_DE_COUNTERS: | |
2597 | case PACKET3_LOAD_CONST_RAM: | |
2598 | case PACKET3_WRITE_CONST_RAM: | |
2599 | case PACKET3_WRITE_CONST_RAM_OFFSET: | |
2600 | case PACKET3_DUMP_CONST_RAM: | |
2601 | case PACKET3_INCREMENT_CE_COUNTER: | |
2602 | case PACKET3_WAIT_ON_DE_COUNTER: | |
2603 | case PACKET3_CE_WRITE: | |
2604 | break; | |
2605 | default: | |
2606 | DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode); | |
2607 | return -EINVAL; | |
2608 | } | |
2609 | return 0; | |
2610 | } | |
2611 | ||
2612 | static int si_vm_packet3_gfx_check(struct radeon_device *rdev, | |
2613 | u32 *ib, struct radeon_cs_packet *pkt) | |
2614 | { | |
2615 | u32 idx = pkt->idx + 1; | |
2616 | u32 idx_value = ib[idx]; | |
2617 | u32 start_reg, end_reg, reg, i; | |
5aa709be | 2618 | u32 command, info; |
498dd8b3 AD |
2619 | |
2620 | switch (pkt->opcode) { | |
2621 | case PACKET3_NOP: | |
2622 | case PACKET3_SET_BASE: | |
2623 | case PACKET3_CLEAR_STATE: | |
2624 | case PACKET3_INDEX_BUFFER_SIZE: | |
2625 | case PACKET3_DISPATCH_DIRECT: | |
2626 | case PACKET3_DISPATCH_INDIRECT: | |
2627 | case PACKET3_ALLOC_GDS: | |
2628 | case PACKET3_WRITE_GDS_RAM: | |
2629 | case PACKET3_ATOMIC_GDS: | |
2630 | case PACKET3_ATOMIC: | |
2631 | case PACKET3_OCCLUSION_QUERY: | |
2632 | case PACKET3_SET_PREDICATION: | |
2633 | case PACKET3_COND_EXEC: | |
2634 | case PACKET3_PRED_EXEC: | |
2635 | case PACKET3_DRAW_INDIRECT: | |
2636 | case PACKET3_DRAW_INDEX_INDIRECT: | |
2637 | case PACKET3_INDEX_BASE: | |
2638 | case PACKET3_DRAW_INDEX_2: | |
2639 | case PACKET3_CONTEXT_CONTROL: | |
2640 | case PACKET3_INDEX_TYPE: | |
2641 | case PACKET3_DRAW_INDIRECT_MULTI: | |
2642 | case PACKET3_DRAW_INDEX_AUTO: | |
2643 | case PACKET3_DRAW_INDEX_IMMD: | |
2644 | case PACKET3_NUM_INSTANCES: | |
2645 | case PACKET3_DRAW_INDEX_MULTI_AUTO: | |
2646 | case PACKET3_STRMOUT_BUFFER_UPDATE: | |
2647 | case PACKET3_DRAW_INDEX_OFFSET_2: | |
2648 | case PACKET3_DRAW_INDEX_MULTI_ELEMENT: | |
2649 | case PACKET3_DRAW_INDEX_INDIRECT_MULTI: | |
2650 | case PACKET3_MPEG_INDEX: | |
2651 | case PACKET3_WAIT_REG_MEM: | |
2652 | case PACKET3_MEM_WRITE: | |
2653 | case PACKET3_PFP_SYNC_ME: | |
2654 | case PACKET3_SURFACE_SYNC: | |
2655 | case PACKET3_EVENT_WRITE: | |
2656 | case PACKET3_EVENT_WRITE_EOP: | |
2657 | case PACKET3_EVENT_WRITE_EOS: | |
2658 | case PACKET3_SET_CONTEXT_REG: | |
2659 | case PACKET3_SET_CONTEXT_REG_INDIRECT: | |
2660 | case PACKET3_SET_SH_REG: | |
2661 | case PACKET3_SET_SH_REG_OFFSET: | |
2662 | case PACKET3_INCREMENT_DE_COUNTER: | |
2663 | case PACKET3_WAIT_ON_CE_COUNTER: | |
2664 | case PACKET3_WAIT_ON_AVAIL_BUFFER: | |
2665 | case PACKET3_ME_WRITE: | |
2666 | break; | |
2667 | case PACKET3_COPY_DATA: | |
2668 | if ((idx_value & 0xf00) == 0) { | |
2669 | reg = ib[idx + 3] * 4; | |
2670 | if (!si_vm_reg_valid(reg)) | |
2671 | return -EINVAL; | |
2672 | } | |
2673 | break; | |
2674 | case PACKET3_WRITE_DATA: | |
2675 | if ((idx_value & 0xf00) == 0) { | |
2676 | start_reg = ib[idx + 1] * 4; | |
2677 | if (idx_value & 0x10000) { | |
2678 | if (!si_vm_reg_valid(start_reg)) | |
2679 | return -EINVAL; | |
2680 | } else { | |
2681 | for (i = 0; i < (pkt->count - 2); i++) { | |
2682 | reg = start_reg + (4 * i); | |
2683 | if (!si_vm_reg_valid(reg)) | |
2684 | return -EINVAL; | |
2685 | } | |
2686 | } | |
2687 | } | |
2688 | break; | |
2689 | case PACKET3_COND_WRITE: | |
2690 | if (idx_value & 0x100) { | |
2691 | reg = ib[idx + 5] * 4; | |
2692 | if (!si_vm_reg_valid(reg)) | |
2693 | return -EINVAL; | |
2694 | } | |
2695 | break; | |
2696 | case PACKET3_COPY_DW: | |
2697 | if (idx_value & 0x2) { | |
2698 | reg = ib[idx + 3] * 4; | |
2699 | if (!si_vm_reg_valid(reg)) | |
2700 | return -EINVAL; | |
2701 | } | |
2702 | break; | |
2703 | case PACKET3_SET_CONFIG_REG: | |
2704 | start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START; | |
2705 | end_reg = 4 * pkt->count + start_reg - 4; | |
2706 | if ((start_reg < PACKET3_SET_CONFIG_REG_START) || | |
2707 | (start_reg >= PACKET3_SET_CONFIG_REG_END) || | |
2708 | (end_reg >= PACKET3_SET_CONFIG_REG_END)) { | |
2709 | DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); | |
2710 | return -EINVAL; | |
2711 | } | |
2712 | for (i = 0; i < pkt->count; i++) { | |
2713 | reg = start_reg + (4 * i); | |
2714 | if (!si_vm_reg_valid(reg)) | |
2715 | return -EINVAL; | |
2716 | } | |
2717 | break; | |
5aa709be AD |
2718 | case PACKET3_CP_DMA: |
2719 | command = ib[idx + 4]; | |
2720 | info = ib[idx + 1]; | |
2721 | if (command & PACKET3_CP_DMA_CMD_SAS) { | |
2722 | /* src address space is register */ | |
2723 | if (((info & 0x60000000) >> 29) == 0) { | |
2724 | start_reg = idx_value << 2; | |
2725 | if (command & PACKET3_CP_DMA_CMD_SAIC) { | |
2726 | reg = start_reg; | |
2727 | if (!si_vm_reg_valid(reg)) { | |
2728 | DRM_ERROR("CP DMA Bad SRC register\n"); | |
2729 | return -EINVAL; | |
2730 | } | |
2731 | } else { | |
2732 | for (i = 0; i < (command & 0x1fffff); i++) { | |
2733 | reg = start_reg + (4 * i); | |
2734 | if (!si_vm_reg_valid(reg)) { | |
2735 | DRM_ERROR("CP DMA Bad SRC register\n"); | |
2736 | return -EINVAL; | |
2737 | } | |
2738 | } | |
2739 | } | |
2740 | } | |
2741 | } | |
2742 | if (command & PACKET3_CP_DMA_CMD_DAS) { | |
2743 | /* dst address space is register */ | |
2744 | if (((info & 0x00300000) >> 20) == 0) { | |
2745 | start_reg = ib[idx + 2]; | |
2746 | if (command & PACKET3_CP_DMA_CMD_DAIC) { | |
2747 | reg = start_reg; | |
2748 | if (!si_vm_reg_valid(reg)) { | |
2749 | DRM_ERROR("CP DMA Bad DST register\n"); | |
2750 | return -EINVAL; | |
2751 | } | |
2752 | } else { | |
2753 | for (i = 0; i < (command & 0x1fffff); i++) { | |
2754 | reg = start_reg + (4 * i); | |
2755 | if (!si_vm_reg_valid(reg)) { | |
2756 | DRM_ERROR("CP DMA Bad DST register\n"); | |
2757 | return -EINVAL; | |
2758 | } | |
2759 | } | |
2760 | } | |
2761 | } | |
2762 | } | |
2763 | break; | |
498dd8b3 AD |
2764 | default: |
2765 | DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); | |
2766 | return -EINVAL; | |
2767 | } | |
2768 | return 0; | |
2769 | } | |
2770 | ||
2771 | static int si_vm_packet3_compute_check(struct radeon_device *rdev, | |
2772 | u32 *ib, struct radeon_cs_packet *pkt) | |
2773 | { | |
2774 | u32 idx = pkt->idx + 1; | |
2775 | u32 idx_value = ib[idx]; | |
2776 | u32 start_reg, reg, i; | |
2777 | ||
2778 | switch (pkt->opcode) { | |
2779 | case PACKET3_NOP: | |
2780 | case PACKET3_SET_BASE: | |
2781 | case PACKET3_CLEAR_STATE: | |
2782 | case PACKET3_DISPATCH_DIRECT: | |
2783 | case PACKET3_DISPATCH_INDIRECT: | |
2784 | case PACKET3_ALLOC_GDS: | |
2785 | case PACKET3_WRITE_GDS_RAM: | |
2786 | case PACKET3_ATOMIC_GDS: | |
2787 | case PACKET3_ATOMIC: | |
2788 | case PACKET3_OCCLUSION_QUERY: | |
2789 | case PACKET3_SET_PREDICATION: | |
2790 | case PACKET3_COND_EXEC: | |
2791 | case PACKET3_PRED_EXEC: | |
2792 | case PACKET3_CONTEXT_CONTROL: | |
2793 | case PACKET3_STRMOUT_BUFFER_UPDATE: | |
2794 | case PACKET3_WAIT_REG_MEM: | |
2795 | case PACKET3_MEM_WRITE: | |
2796 | case PACKET3_PFP_SYNC_ME: | |
2797 | case PACKET3_SURFACE_SYNC: | |
2798 | case PACKET3_EVENT_WRITE: | |
2799 | case PACKET3_EVENT_WRITE_EOP: | |
2800 | case PACKET3_EVENT_WRITE_EOS: | |
2801 | case PACKET3_SET_CONTEXT_REG: | |
2802 | case PACKET3_SET_CONTEXT_REG_INDIRECT: | |
2803 | case PACKET3_SET_SH_REG: | |
2804 | case PACKET3_SET_SH_REG_OFFSET: | |
2805 | case PACKET3_INCREMENT_DE_COUNTER: | |
2806 | case PACKET3_WAIT_ON_CE_COUNTER: | |
2807 | case PACKET3_WAIT_ON_AVAIL_BUFFER: | |
2808 | case PACKET3_ME_WRITE: | |
2809 | break; | |
2810 | case PACKET3_COPY_DATA: | |
2811 | if ((idx_value & 0xf00) == 0) { | |
2812 | reg = ib[idx + 3] * 4; | |
2813 | if (!si_vm_reg_valid(reg)) | |
2814 | return -EINVAL; | |
2815 | } | |
2816 | break; | |
2817 | case PACKET3_WRITE_DATA: | |
2818 | if ((idx_value & 0xf00) == 0) { | |
2819 | start_reg = ib[idx + 1] * 4; | |
2820 | if (idx_value & 0x10000) { | |
2821 | if (!si_vm_reg_valid(start_reg)) | |
2822 | return -EINVAL; | |
2823 | } else { | |
2824 | for (i = 0; i < (pkt->count - 2); i++) { | |
2825 | reg = start_reg + (4 * i); | |
2826 | if (!si_vm_reg_valid(reg)) | |
2827 | return -EINVAL; | |
2828 | } | |
2829 | } | |
2830 | } | |
2831 | break; | |
2832 | case PACKET3_COND_WRITE: | |
2833 | if (idx_value & 0x100) { | |
2834 | reg = ib[idx + 5] * 4; | |
2835 | if (!si_vm_reg_valid(reg)) | |
2836 | return -EINVAL; | |
2837 | } | |
2838 | break; | |
2839 | case PACKET3_COPY_DW: | |
2840 | if (idx_value & 0x2) { | |
2841 | reg = ib[idx + 3] * 4; | |
2842 | if (!si_vm_reg_valid(reg)) | |
2843 | return -EINVAL; | |
2844 | } | |
2845 | break; | |
2846 | default: | |
2847 | DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); | |
2848 | return -EINVAL; | |
2849 | } | |
2850 | return 0; | |
2851 | } | |
2852 | ||
2853 | int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) | |
2854 | { | |
2855 | int ret = 0; | |
2856 | u32 idx = 0; | |
2857 | struct radeon_cs_packet pkt; | |
2858 | ||
2859 | do { | |
2860 | pkt.idx = idx; | |
4e872ae2 IH |
2861 | pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]); |
2862 | pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]); | |
498dd8b3 AD |
2863 | pkt.one_reg_wr = 0; |
2864 | switch (pkt.type) { | |
4e872ae2 | 2865 | case RADEON_PACKET_TYPE0: |
498dd8b3 AD |
2866 | dev_err(rdev->dev, "Packet0 not allowed!\n"); |
2867 | ret = -EINVAL; | |
2868 | break; | |
4e872ae2 | 2869 | case RADEON_PACKET_TYPE2: |
498dd8b3 AD |
2870 | idx += 1; |
2871 | break; | |
4e872ae2 IH |
2872 | case RADEON_PACKET_TYPE3: |
2873 | pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]); | |
498dd8b3 AD |
2874 | if (ib->is_const_ib) |
2875 | ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); | |
2876 | else { | |
876dc9f3 | 2877 | switch (ib->ring) { |
498dd8b3 AD |
2878 | case RADEON_RING_TYPE_GFX_INDEX: |
2879 | ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt); | |
2880 | break; | |
2881 | case CAYMAN_RING_TYPE_CP1_INDEX: | |
2882 | case CAYMAN_RING_TYPE_CP2_INDEX: | |
2883 | ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt); | |
2884 | break; | |
2885 | default: | |
876dc9f3 | 2886 | dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring); |
498dd8b3 AD |
2887 | ret = -EINVAL; |
2888 | break; | |
2889 | } | |
2890 | } | |
2891 | idx += pkt.count + 2; | |
2892 | break; | |
2893 | default: | |
2894 | dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type); | |
2895 | ret = -EINVAL; | |
2896 | break; | |
2897 | } | |
2898 | if (ret) | |
2899 | break; | |
2900 | } while (idx < ib->length_dw); | |
2901 | ||
2902 | return ret; | |
2903 | } | |
2904 | ||
d2800ee5 AD |
2905 | /* |
2906 | * vm | |
2907 | */ | |
2908 | int si_vm_init(struct radeon_device *rdev) | |
2909 | { | |
2910 | /* number of VMs */ | |
2911 | rdev->vm_manager.nvm = 16; | |
2912 | /* base offset of vram pages */ | |
2913 | rdev->vm_manager.vram_base_offset = 0; | |
2914 | ||
2915 | return 0; | |
2916 | } | |
2917 | ||
2918 | void si_vm_fini(struct radeon_device *rdev) | |
2919 | { | |
2920 | } | |
2921 | ||
82ffd92b AD |
2922 | /** |
2923 | * si_vm_set_page - update the page tables using the CP | |
2924 | * | |
2925 | * @rdev: radeon_device pointer | |
2926 | * @pe: addr of the page entry | |
2927 | * @addr: dst addr to write into pe | |
2928 | * @count: number of page entries to update | |
2929 | * @incr: increase next addr by incr bytes | |
2930 | * @flags: access flags | |
2931 | * | |
2932 | * Update the page tables using the CP (cayman-si). | |
2933 | */ | |
2934 | void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, | |
2935 | uint64_t addr, unsigned count, | |
2936 | uint32_t incr, uint32_t flags) | |
d2800ee5 | 2937 | { |
82ffd92b AD |
2938 | struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; |
2939 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | |
deab48f1 AD |
2940 | uint64_t value; |
2941 | unsigned ndw; | |
2942 | ||
2943 | if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { | |
2944 | while (count) { | |
2945 | ndw = 2 + count * 2; | |
2946 | if (ndw > 0x3FFE) | |
2947 | ndw = 0x3FFE; | |
2948 | ||
2949 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw)); | |
2950 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
2951 | WRITE_DATA_DST_SEL(1))); | |
2952 | radeon_ring_write(ring, pe); | |
2953 | radeon_ring_write(ring, upper_32_bits(pe)); | |
2954 | for (; ndw > 2; ndw -= 2, --count, pe += 8) { | |
2955 | if (flags & RADEON_VM_PAGE_SYSTEM) { | |
2956 | value = radeon_vm_map_gart(rdev, addr); | |
2957 | value &= 0xFFFFFFFFFFFFF000ULL; | |
2958 | } else if (flags & RADEON_VM_PAGE_VALID) { | |
2959 | value = addr; | |
2960 | } else { | |
2961 | value = 0; | |
2962 | } | |
2963 | addr += incr; | |
2964 | value |= r600_flags; | |
2965 | radeon_ring_write(ring, value); | |
2966 | radeon_ring_write(ring, upper_32_bits(value)); | |
2967 | } | |
2968 | } | |
2969 | } else { | |
2970 | /* DMA */ | |
2971 | if (flags & RADEON_VM_PAGE_SYSTEM) { | |
2972 | while (count) { | |
2973 | ndw = count * 2; | |
2974 | if (ndw > 0xFFFFE) | |
2975 | ndw = 0xFFFFE; | |
2976 | ||
2977 | /* for non-physically contiguous pages (system) */ | |
2978 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw)); | |
2979 | radeon_ring_write(ring, pe); | |
2980 | radeon_ring_write(ring, upper_32_bits(pe) & 0xff); | |
2981 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | |
2982 | if (flags & RADEON_VM_PAGE_SYSTEM) { | |
2983 | value = radeon_vm_map_gart(rdev, addr); | |
2984 | value &= 0xFFFFFFFFFFFFF000ULL; | |
2985 | } else if (flags & RADEON_VM_PAGE_VALID) { | |
2986 | value = addr; | |
2987 | } else { | |
2988 | value = 0; | |
2989 | } | |
2990 | addr += incr; | |
2991 | value |= r600_flags; | |
2992 | radeon_ring_write(ring, value); | |
2993 | radeon_ring_write(ring, upper_32_bits(value)); | |
2994 | } | |
2995 | } | |
2996 | } else { | |
2997 | while (count) { | |
2998 | ndw = count * 2; | |
2999 | if (ndw > 0xFFFFE) | |
3000 | ndw = 0xFFFFE; | |
3001 | ||
3002 | if (flags & RADEON_VM_PAGE_VALID) | |
3003 | value = addr; | |
3004 | else | |
3005 | value = 0; | |
3006 | /* for physically contiguous pages (vram) */ | |
3007 | radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw)); | |
3008 | radeon_ring_write(ring, pe); /* dst addr */ | |
3009 | radeon_ring_write(ring, upper_32_bits(pe) & 0xff); | |
3010 | radeon_ring_write(ring, r600_flags); /* mask */ | |
3011 | radeon_ring_write(ring, 0); | |
3012 | radeon_ring_write(ring, value); /* value */ | |
3013 | radeon_ring_write(ring, upper_32_bits(value)); | |
3014 | radeon_ring_write(ring, incr); /* increment size */ | |
3015 | radeon_ring_write(ring, 0); | |
3016 | pe += ndw * 4; | |
3017 | addr += (ndw / 2) * incr; | |
3018 | count -= ndw / 2; | |
3019 | } | |
d7025d89 | 3020 | } |
82ffd92b | 3021 | } |
d2800ee5 AD |
3022 | } |
3023 | ||
498522b4 | 3024 | void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
d2800ee5 | 3025 | { |
498522b4 | 3026 | struct radeon_ring *ring = &rdev->ring[ridx]; |
d2800ee5 | 3027 | |
ee60e29f | 3028 | if (vm == NULL) |
d2800ee5 AD |
3029 | return; |
3030 | ||
76c44f2c AD |
3031 | /* write new base address */ |
3032 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
3033 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
3034 | WRITE_DATA_DST_SEL(0))); | |
3035 | ||
ee60e29f | 3036 | if (vm->id < 8) { |
76c44f2c AD |
3037 | radeon_ring_write(ring, |
3038 | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); | |
ee60e29f | 3039 | } else { |
76c44f2c AD |
3040 | radeon_ring_write(ring, |
3041 | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); | |
ee60e29f | 3042 | } |
76c44f2c | 3043 | radeon_ring_write(ring, 0); |
fa87e62d | 3044 | radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
ee60e29f | 3045 | |
d2800ee5 | 3046 | /* flush hdp cache */ |
76c44f2c AD |
3047 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
3048 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
3049 | WRITE_DATA_DST_SEL(0))); | |
3050 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | |
3051 | radeon_ring_write(ring, 0); | |
ee60e29f CK |
3052 | radeon_ring_write(ring, 0x1); |
3053 | ||
d2800ee5 | 3054 | /* bits 0-15 are the VM contexts0-15 */ |
76c44f2c AD |
3055 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
3056 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
3057 | WRITE_DATA_DST_SEL(0))); | |
3058 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | |
3059 | radeon_ring_write(ring, 0); | |
498522b4 | 3060 | radeon_ring_write(ring, 1 << vm->id); |
58f8cf56 CK |
3061 | |
3062 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | |
3063 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | |
3064 | radeon_ring_write(ring, 0x0); | |
d2800ee5 AD |
3065 | } |
3066 | ||
8c5fd7ef AD |
3067 | void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
3068 | { | |
3069 | struct radeon_ring *ring = &rdev->ring[ridx]; | |
3070 | ||
3071 | if (vm == NULL) | |
3072 | return; | |
3073 | ||
3074 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
3075 | if (vm->id < 8) { | |
3076 | radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); | |
3077 | } else { | |
3078 | radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); | |
3079 | } | |
3080 | radeon_ring_write(ring, vm->pd_gpu_addr >> 12); | |
3081 | ||
3082 | /* flush hdp cache */ | |
3083 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
3084 | radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); | |
3085 | radeon_ring_write(ring, 1); | |
3086 | ||
3087 | /* bits 0-7 are the VM contexts0-7 */ | |
3088 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | |
3089 | radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); | |
3090 | radeon_ring_write(ring, 1 << vm->id); | |
3091 | } | |
3092 | ||
347e7592 AD |
3093 | /* |
3094 | * RLC | |
3095 | */ | |
c420c745 | 3096 | void si_rlc_fini(struct radeon_device *rdev) |
347e7592 AD |
3097 | { |
3098 | int r; | |
3099 | ||
3100 | /* save restore block */ | |
3101 | if (rdev->rlc.save_restore_obj) { | |
3102 | r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); | |
3103 | if (unlikely(r != 0)) | |
3104 | dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); | |
3105 | radeon_bo_unpin(rdev->rlc.save_restore_obj); | |
3106 | radeon_bo_unreserve(rdev->rlc.save_restore_obj); | |
3107 | ||
3108 | radeon_bo_unref(&rdev->rlc.save_restore_obj); | |
3109 | rdev->rlc.save_restore_obj = NULL; | |
3110 | } | |
3111 | ||
3112 | /* clear state block */ | |
3113 | if (rdev->rlc.clear_state_obj) { | |
3114 | r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); | |
3115 | if (unlikely(r != 0)) | |
3116 | dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); | |
3117 | radeon_bo_unpin(rdev->rlc.clear_state_obj); | |
3118 | radeon_bo_unreserve(rdev->rlc.clear_state_obj); | |
3119 | ||
3120 | radeon_bo_unref(&rdev->rlc.clear_state_obj); | |
3121 | rdev->rlc.clear_state_obj = NULL; | |
3122 | } | |
3123 | } | |
3124 | ||
c420c745 | 3125 | int si_rlc_init(struct radeon_device *rdev) |
347e7592 AD |
3126 | { |
3127 | int r; | |
3128 | ||
3129 | /* save restore block */ | |
3130 | if (rdev->rlc.save_restore_obj == NULL) { | |
3131 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, | |
40f5cf99 AD |
3132 | RADEON_GEM_DOMAIN_VRAM, NULL, |
3133 | &rdev->rlc.save_restore_obj); | |
347e7592 AD |
3134 | if (r) { |
3135 | dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); | |
3136 | return r; | |
3137 | } | |
3138 | } | |
3139 | ||
3140 | r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); | |
3141 | if (unlikely(r != 0)) { | |
3142 | si_rlc_fini(rdev); | |
3143 | return r; | |
3144 | } | |
3145 | r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, | |
3146 | &rdev->rlc.save_restore_gpu_addr); | |
5273db70 | 3147 | radeon_bo_unreserve(rdev->rlc.save_restore_obj); |
347e7592 | 3148 | if (r) { |
347e7592 AD |
3149 | dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); |
3150 | si_rlc_fini(rdev); | |
3151 | return r; | |
3152 | } | |
3153 | ||
3154 | /* clear state block */ | |
3155 | if (rdev->rlc.clear_state_obj == NULL) { | |
3156 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, | |
40f5cf99 AD |
3157 | RADEON_GEM_DOMAIN_VRAM, NULL, |
3158 | &rdev->rlc.clear_state_obj); | |
347e7592 AD |
3159 | if (r) { |
3160 | dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); | |
3161 | si_rlc_fini(rdev); | |
3162 | return r; | |
3163 | } | |
3164 | } | |
3165 | r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); | |
3166 | if (unlikely(r != 0)) { | |
3167 | si_rlc_fini(rdev); | |
3168 | return r; | |
3169 | } | |
3170 | r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, | |
3171 | &rdev->rlc.clear_state_gpu_addr); | |
5273db70 | 3172 | radeon_bo_unreserve(rdev->rlc.clear_state_obj); |
347e7592 | 3173 | if (r) { |
347e7592 AD |
3174 | dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); |
3175 | si_rlc_fini(rdev); | |
3176 | return r; | |
3177 | } | |
3178 | ||
3179 | return 0; | |
3180 | } | |
3181 | ||
3182 | static void si_rlc_stop(struct radeon_device *rdev) | |
3183 | { | |
3184 | WREG32(RLC_CNTL, 0); | |
3185 | } | |
3186 | ||
3187 | static void si_rlc_start(struct radeon_device *rdev) | |
3188 | { | |
3189 | WREG32(RLC_CNTL, RLC_ENABLE); | |
3190 | } | |
3191 | ||
3192 | static int si_rlc_resume(struct radeon_device *rdev) | |
3193 | { | |
3194 | u32 i; | |
3195 | const __be32 *fw_data; | |
3196 | ||
3197 | if (!rdev->rlc_fw) | |
3198 | return -EINVAL; | |
3199 | ||
3200 | si_rlc_stop(rdev); | |
3201 | ||
3202 | WREG32(RLC_RL_BASE, 0); | |
3203 | WREG32(RLC_RL_SIZE, 0); | |
3204 | WREG32(RLC_LB_CNTL, 0); | |
3205 | WREG32(RLC_LB_CNTR_MAX, 0xffffffff); | |
3206 | WREG32(RLC_LB_CNTR_INIT, 0); | |
3207 | ||
3208 | WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); | |
3209 | WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); | |
3210 | ||
3211 | WREG32(RLC_MC_CNTL, 0); | |
3212 | WREG32(RLC_UCODE_CNTL, 0); | |
3213 | ||
3214 | fw_data = (const __be32 *)rdev->rlc_fw->data; | |
3215 | for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { | |
3216 | WREG32(RLC_UCODE_ADDR, i); | |
3217 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | |
3218 | } | |
3219 | WREG32(RLC_UCODE_ADDR, 0); | |
3220 | ||
3221 | si_rlc_start(rdev); | |
3222 | ||
3223 | return 0; | |
3224 | } | |
3225 | ||
25a857fb AD |
3226 | static void si_enable_interrupts(struct radeon_device *rdev) |
3227 | { | |
3228 | u32 ih_cntl = RREG32(IH_CNTL); | |
3229 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | |
3230 | ||
3231 | ih_cntl |= ENABLE_INTR; | |
3232 | ih_rb_cntl |= IH_RB_ENABLE; | |
3233 | WREG32(IH_CNTL, ih_cntl); | |
3234 | WREG32(IH_RB_CNTL, ih_rb_cntl); | |
3235 | rdev->ih.enabled = true; | |
3236 | } | |
3237 | ||
3238 | static void si_disable_interrupts(struct radeon_device *rdev) | |
3239 | { | |
3240 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | |
3241 | u32 ih_cntl = RREG32(IH_CNTL); | |
3242 | ||
3243 | ih_rb_cntl &= ~IH_RB_ENABLE; | |
3244 | ih_cntl &= ~ENABLE_INTR; | |
3245 | WREG32(IH_RB_CNTL, ih_rb_cntl); | |
3246 | WREG32(IH_CNTL, ih_cntl); | |
3247 | /* set rptr, wptr to 0 */ | |
3248 | WREG32(IH_RB_RPTR, 0); | |
3249 | WREG32(IH_RB_WPTR, 0); | |
3250 | rdev->ih.enabled = false; | |
25a857fb AD |
3251 | rdev->ih.rptr = 0; |
3252 | } | |
3253 | ||
3254 | static void si_disable_interrupt_state(struct radeon_device *rdev) | |
3255 | { | |
3256 | u32 tmp; | |
3257 | ||
3258 | WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | |
3259 | WREG32(CP_INT_CNTL_RING1, 0); | |
3260 | WREG32(CP_INT_CNTL_RING2, 0); | |
8c5fd7ef AD |
3261 | tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
3262 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp); | |
3263 | tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; | |
3264 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); | |
25a857fb AD |
3265 | WREG32(GRBM_INT_CNTL, 0); |
3266 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | |
3267 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | |
3268 | if (rdev->num_crtc >= 4) { | |
3269 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | |
3270 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | |
3271 | } | |
3272 | if (rdev->num_crtc >= 6) { | |
3273 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | |
3274 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | |
3275 | } | |
3276 | ||
3277 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | |
3278 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | |
3279 | if (rdev->num_crtc >= 4) { | |
3280 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | |
3281 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | |
3282 | } | |
3283 | if (rdev->num_crtc >= 6) { | |
3284 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | |
3285 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | |
3286 | } | |
3287 | ||
3288 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | |
3289 | ||
3290 | tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
3291 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
3292 | tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
3293 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
3294 | tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
3295 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
3296 | tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
3297 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
3298 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
3299 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
3300 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
3301 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
3302 | ||
3303 | } | |
3304 | ||
3305 | static int si_irq_init(struct radeon_device *rdev) | |
3306 | { | |
3307 | int ret = 0; | |
3308 | int rb_bufsz; | |
3309 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | |
3310 | ||
3311 | /* allocate ring */ | |
3312 | ret = r600_ih_ring_alloc(rdev); | |
3313 | if (ret) | |
3314 | return ret; | |
3315 | ||
3316 | /* disable irqs */ | |
3317 | si_disable_interrupts(rdev); | |
3318 | ||
3319 | /* init rlc */ | |
3320 | ret = si_rlc_resume(rdev); | |
3321 | if (ret) { | |
3322 | r600_ih_ring_fini(rdev); | |
3323 | return ret; | |
3324 | } | |
3325 | ||
3326 | /* setup interrupt control */ | |
3327 | /* set dummy read address to ring address */ | |
3328 | WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); | |
3329 | interrupt_cntl = RREG32(INTERRUPT_CNTL); | |
3330 | /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi | |
3331 | * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN | |
3332 | */ | |
3333 | interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; | |
3334 | /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ | |
3335 | interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; | |
3336 | WREG32(INTERRUPT_CNTL, interrupt_cntl); | |
3337 | ||
3338 | WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); | |
3339 | rb_bufsz = drm_order(rdev->ih.ring_size / 4); | |
3340 | ||
3341 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | | |
3342 | IH_WPTR_OVERFLOW_CLEAR | | |
3343 | (rb_bufsz << 1)); | |
3344 | ||
3345 | if (rdev->wb.enabled) | |
3346 | ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; | |
3347 | ||
3348 | /* set the writeback address whether it's enabled or not */ | |
3349 | WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); | |
3350 | WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); | |
3351 | ||
3352 | WREG32(IH_RB_CNTL, ih_rb_cntl); | |
3353 | ||
3354 | /* set rptr, wptr to 0 */ | |
3355 | WREG32(IH_RB_RPTR, 0); | |
3356 | WREG32(IH_RB_WPTR, 0); | |
3357 | ||
3358 | /* Default settings for IH_CNTL (disabled at first) */ | |
3359 | ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0); | |
3360 | /* RPTR_REARM only works if msi's are enabled */ | |
3361 | if (rdev->msi_enabled) | |
3362 | ih_cntl |= RPTR_REARM; | |
3363 | WREG32(IH_CNTL, ih_cntl); | |
3364 | ||
3365 | /* force the active interrupt state to all disabled */ | |
3366 | si_disable_interrupt_state(rdev); | |
3367 | ||
2099810f DA |
3368 | pci_set_master(rdev->pdev); |
3369 | ||
25a857fb AD |
3370 | /* enable irqs */ |
3371 | si_enable_interrupts(rdev); | |
3372 | ||
3373 | return ret; | |
3374 | } | |
3375 | ||
3376 | int si_irq_set(struct radeon_device *rdev) | |
3377 | { | |
3378 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | |
3379 | u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; | |
3380 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | |
3381 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | |
3382 | u32 grbm_int_cntl = 0; | |
3383 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | |
8c5fd7ef | 3384 | u32 dma_cntl, dma_cntl1; |
25a857fb AD |
3385 | |
3386 | if (!rdev->irq.installed) { | |
3387 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); | |
3388 | return -EINVAL; | |
3389 | } | |
3390 | /* don't enable anything if the ih is disabled */ | |
3391 | if (!rdev->ih.enabled) { | |
3392 | si_disable_interrupts(rdev); | |
3393 | /* force the active interrupt state to all disabled */ | |
3394 | si_disable_interrupt_state(rdev); | |
3395 | return 0; | |
3396 | } | |
3397 | ||
3398 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3399 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3400 | hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3401 | hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3402 | hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3403 | hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
3404 | ||
8c5fd7ef AD |
3405 | dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
3406 | dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; | |
3407 | ||
25a857fb | 3408 | /* enable CP interrupts on all rings */ |
736fc37f | 3409 | if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
25a857fb AD |
3410 | DRM_DEBUG("si_irq_set: sw int gfx\n"); |
3411 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; | |
3412 | } | |
736fc37f | 3413 | if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { |
25a857fb AD |
3414 | DRM_DEBUG("si_irq_set: sw int cp1\n"); |
3415 | cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; | |
3416 | } | |
736fc37f | 3417 | if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { |
25a857fb AD |
3418 | DRM_DEBUG("si_irq_set: sw int cp2\n"); |
3419 | cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; | |
3420 | } | |
8c5fd7ef AD |
3421 | if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { |
3422 | DRM_DEBUG("si_irq_set: sw int dma\n"); | |
3423 | dma_cntl |= TRAP_ENABLE; | |
3424 | } | |
3425 | ||
3426 | if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { | |
3427 | DRM_DEBUG("si_irq_set: sw int dma1\n"); | |
3428 | dma_cntl1 |= TRAP_ENABLE; | |
3429 | } | |
25a857fb | 3430 | if (rdev->irq.crtc_vblank_int[0] || |
736fc37f | 3431 | atomic_read(&rdev->irq.pflip[0])) { |
25a857fb AD |
3432 | DRM_DEBUG("si_irq_set: vblank 0\n"); |
3433 | crtc1 |= VBLANK_INT_MASK; | |
3434 | } | |
3435 | if (rdev->irq.crtc_vblank_int[1] || | |
736fc37f | 3436 | atomic_read(&rdev->irq.pflip[1])) { |
25a857fb AD |
3437 | DRM_DEBUG("si_irq_set: vblank 1\n"); |
3438 | crtc2 |= VBLANK_INT_MASK; | |
3439 | } | |
3440 | if (rdev->irq.crtc_vblank_int[2] || | |
736fc37f | 3441 | atomic_read(&rdev->irq.pflip[2])) { |
25a857fb AD |
3442 | DRM_DEBUG("si_irq_set: vblank 2\n"); |
3443 | crtc3 |= VBLANK_INT_MASK; | |
3444 | } | |
3445 | if (rdev->irq.crtc_vblank_int[3] || | |
736fc37f | 3446 | atomic_read(&rdev->irq.pflip[3])) { |
25a857fb AD |
3447 | DRM_DEBUG("si_irq_set: vblank 3\n"); |
3448 | crtc4 |= VBLANK_INT_MASK; | |
3449 | } | |
3450 | if (rdev->irq.crtc_vblank_int[4] || | |
736fc37f | 3451 | atomic_read(&rdev->irq.pflip[4])) { |
25a857fb AD |
3452 | DRM_DEBUG("si_irq_set: vblank 4\n"); |
3453 | crtc5 |= VBLANK_INT_MASK; | |
3454 | } | |
3455 | if (rdev->irq.crtc_vblank_int[5] || | |
736fc37f | 3456 | atomic_read(&rdev->irq.pflip[5])) { |
25a857fb AD |
3457 | DRM_DEBUG("si_irq_set: vblank 5\n"); |
3458 | crtc6 |= VBLANK_INT_MASK; | |
3459 | } | |
3460 | if (rdev->irq.hpd[0]) { | |
3461 | DRM_DEBUG("si_irq_set: hpd 1\n"); | |
3462 | hpd1 |= DC_HPDx_INT_EN; | |
3463 | } | |
3464 | if (rdev->irq.hpd[1]) { | |
3465 | DRM_DEBUG("si_irq_set: hpd 2\n"); | |
3466 | hpd2 |= DC_HPDx_INT_EN; | |
3467 | } | |
3468 | if (rdev->irq.hpd[2]) { | |
3469 | DRM_DEBUG("si_irq_set: hpd 3\n"); | |
3470 | hpd3 |= DC_HPDx_INT_EN; | |
3471 | } | |
3472 | if (rdev->irq.hpd[3]) { | |
3473 | DRM_DEBUG("si_irq_set: hpd 4\n"); | |
3474 | hpd4 |= DC_HPDx_INT_EN; | |
3475 | } | |
3476 | if (rdev->irq.hpd[4]) { | |
3477 | DRM_DEBUG("si_irq_set: hpd 5\n"); | |
3478 | hpd5 |= DC_HPDx_INT_EN; | |
3479 | } | |
3480 | if (rdev->irq.hpd[5]) { | |
3481 | DRM_DEBUG("si_irq_set: hpd 6\n"); | |
3482 | hpd6 |= DC_HPDx_INT_EN; | |
3483 | } | |
25a857fb AD |
3484 | |
3485 | WREG32(CP_INT_CNTL_RING0, cp_int_cntl); | |
3486 | WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); | |
3487 | WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); | |
3488 | ||
8c5fd7ef AD |
3489 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl); |
3490 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1); | |
3491 | ||
25a857fb AD |
3492 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
3493 | ||
3494 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); | |
3495 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); | |
3496 | if (rdev->num_crtc >= 4) { | |
3497 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); | |
3498 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); | |
3499 | } | |
3500 | if (rdev->num_crtc >= 6) { | |
3501 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); | |
3502 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | |
3503 | } | |
3504 | ||
3505 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); | |
3506 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); | |
3507 | if (rdev->num_crtc >= 4) { | |
3508 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); | |
3509 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); | |
3510 | } | |
3511 | if (rdev->num_crtc >= 6) { | |
3512 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); | |
3513 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); | |
3514 | } | |
3515 | ||
3516 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | |
3517 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | |
3518 | WREG32(DC_HPD3_INT_CONTROL, hpd3); | |
3519 | WREG32(DC_HPD4_INT_CONTROL, hpd4); | |
3520 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | |
3521 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | |
3522 | ||
3523 | return 0; | |
3524 | } | |
3525 | ||
3526 | static inline void si_irq_ack(struct radeon_device *rdev) | |
3527 | { | |
3528 | u32 tmp; | |
3529 | ||
3530 | rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); | |
3531 | rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | |
3532 | rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); | |
3533 | rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); | |
3534 | rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); | |
3535 | rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); | |
3536 | rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); | |
3537 | rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); | |
3538 | if (rdev->num_crtc >= 4) { | |
3539 | rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); | |
3540 | rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); | |
3541 | } | |
3542 | if (rdev->num_crtc >= 6) { | |
3543 | rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); | |
3544 | rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); | |
3545 | } | |
3546 | ||
3547 | if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) | |
3548 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
3549 | if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) | |
3550 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
3551 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) | |
3552 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); | |
3553 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) | |
3554 | WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); | |
3555 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) | |
3556 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); | |
3557 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) | |
3558 | WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); | |
3559 | ||
3560 | if (rdev->num_crtc >= 4) { | |
3561 | if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) | |
3562 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
3563 | if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) | |
3564 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
3565 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) | |
3566 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); | |
3567 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) | |
3568 | WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); | |
3569 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) | |
3570 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); | |
3571 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) | |
3572 | WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); | |
3573 | } | |
3574 | ||
3575 | if (rdev->num_crtc >= 6) { | |
3576 | if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) | |
3577 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
3578 | if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) | |
3579 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); | |
3580 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) | |
3581 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | |
3582 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | |
3583 | WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); | |
3584 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) | |
3585 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); | |
3586 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) | |
3587 | WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); | |
3588 | } | |
3589 | ||
3590 | if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { | |
3591 | tmp = RREG32(DC_HPD1_INT_CONTROL); | |
3592 | tmp |= DC_HPDx_INT_ACK; | |
3593 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
3594 | } | |
3595 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { | |
3596 | tmp = RREG32(DC_HPD2_INT_CONTROL); | |
3597 | tmp |= DC_HPDx_INT_ACK; | |
3598 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
3599 | } | |
3600 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { | |
3601 | tmp = RREG32(DC_HPD3_INT_CONTROL); | |
3602 | tmp |= DC_HPDx_INT_ACK; | |
3603 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
3604 | } | |
3605 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { | |
3606 | tmp = RREG32(DC_HPD4_INT_CONTROL); | |
3607 | tmp |= DC_HPDx_INT_ACK; | |
3608 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
3609 | } | |
3610 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { | |
3611 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
3612 | tmp |= DC_HPDx_INT_ACK; | |
3613 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
3614 | } | |
3615 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { | |
3616 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
3617 | tmp |= DC_HPDx_INT_ACK; | |
3618 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
3619 | } | |
3620 | } | |
3621 | ||
3622 | static void si_irq_disable(struct radeon_device *rdev) | |
3623 | { | |
3624 | si_disable_interrupts(rdev); | |
3625 | /* Wait and acknowledge irq */ | |
3626 | mdelay(1); | |
3627 | si_irq_ack(rdev); | |
3628 | si_disable_interrupt_state(rdev); | |
3629 | } | |
3630 | ||
3631 | static void si_irq_suspend(struct radeon_device *rdev) | |
3632 | { | |
3633 | si_irq_disable(rdev); | |
3634 | si_rlc_stop(rdev); | |
3635 | } | |
3636 | ||
9b136d51 AD |
3637 | static void si_irq_fini(struct radeon_device *rdev) |
3638 | { | |
3639 | si_irq_suspend(rdev); | |
3640 | r600_ih_ring_fini(rdev); | |
3641 | } | |
3642 | ||
25a857fb AD |
3643 | static inline u32 si_get_ih_wptr(struct radeon_device *rdev) |
3644 | { | |
3645 | u32 wptr, tmp; | |
3646 | ||
3647 | if (rdev->wb.enabled) | |
3648 | wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); | |
3649 | else | |
3650 | wptr = RREG32(IH_RB_WPTR); | |
3651 | ||
3652 | if (wptr & RB_OVERFLOW) { | |
3653 | /* When a ring buffer overflow happen start parsing interrupt | |
3654 | * from the last not overwritten vector (wptr + 16). Hopefully | |
3655 | * this should allow us to catchup. | |
3656 | */ | |
3657 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | |
3658 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | |
3659 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | |
3660 | tmp = RREG32(IH_RB_CNTL); | |
3661 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | |
3662 | WREG32(IH_RB_CNTL, tmp); | |
3663 | } | |
3664 | return (wptr & rdev->ih.ptr_mask); | |
3665 | } | |
3666 | ||
3667 | /* SI IV Ring | |
3668 | * Each IV ring entry is 128 bits: | |
3669 | * [7:0] - interrupt source id | |
3670 | * [31:8] - reserved | |
3671 | * [59:32] - interrupt source data | |
3672 | * [63:60] - reserved | |
3673 | * [71:64] - RINGID | |
3674 | * [79:72] - VMID | |
3675 | * [127:80] - reserved | |
3676 | */ | |
3677 | int si_irq_process(struct radeon_device *rdev) | |
3678 | { | |
3679 | u32 wptr; | |
3680 | u32 rptr; | |
3681 | u32 src_id, src_data, ring_id; | |
3682 | u32 ring_index; | |
25a857fb AD |
3683 | bool queue_hotplug = false; |
3684 | ||
3685 | if (!rdev->ih.enabled || rdev->shutdown) | |
3686 | return IRQ_NONE; | |
3687 | ||
3688 | wptr = si_get_ih_wptr(rdev); | |
c20dc369 CK |
3689 | |
3690 | restart_ih: | |
3691 | /* is somebody else already processing irqs? */ | |
3692 | if (atomic_xchg(&rdev->ih.lock, 1)) | |
3693 | return IRQ_NONE; | |
3694 | ||
25a857fb AD |
3695 | rptr = rdev->ih.rptr; |
3696 | DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | |
3697 | ||
25a857fb AD |
3698 | /* Order reading of wptr vs. reading of IH ring data */ |
3699 | rmb(); | |
3700 | ||
3701 | /* display interrupts */ | |
3702 | si_irq_ack(rdev); | |
3703 | ||
25a857fb AD |
3704 | while (rptr != wptr) { |
3705 | /* wptr/rptr are in bytes! */ | |
3706 | ring_index = rptr / 4; | |
3707 | src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; | |
3708 | src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; | |
3709 | ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; | |
3710 | ||
3711 | switch (src_id) { | |
3712 | case 1: /* D1 vblank/vline */ | |
3713 | switch (src_data) { | |
3714 | case 0: /* D1 vblank */ | |
3715 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { | |
3716 | if (rdev->irq.crtc_vblank_int[0]) { | |
3717 | drm_handle_vblank(rdev->ddev, 0); | |
3718 | rdev->pm.vblank_sync = true; | |
3719 | wake_up(&rdev->irq.vblank_queue); | |
3720 | } | |
736fc37f | 3721 | if (atomic_read(&rdev->irq.pflip[0])) |
25a857fb AD |
3722 | radeon_crtc_handle_flip(rdev, 0); |
3723 | rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; | |
3724 | DRM_DEBUG("IH: D1 vblank\n"); | |
3725 | } | |
3726 | break; | |
3727 | case 1: /* D1 vline */ | |
3728 | if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { | |
3729 | rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; | |
3730 | DRM_DEBUG("IH: D1 vline\n"); | |
3731 | } | |
3732 | break; | |
3733 | default: | |
3734 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3735 | break; | |
3736 | } | |
3737 | break; | |
3738 | case 2: /* D2 vblank/vline */ | |
3739 | switch (src_data) { | |
3740 | case 0: /* D2 vblank */ | |
3741 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | |
3742 | if (rdev->irq.crtc_vblank_int[1]) { | |
3743 | drm_handle_vblank(rdev->ddev, 1); | |
3744 | rdev->pm.vblank_sync = true; | |
3745 | wake_up(&rdev->irq.vblank_queue); | |
3746 | } | |
736fc37f | 3747 | if (atomic_read(&rdev->irq.pflip[1])) |
25a857fb AD |
3748 | radeon_crtc_handle_flip(rdev, 1); |
3749 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | |
3750 | DRM_DEBUG("IH: D2 vblank\n"); | |
3751 | } | |
3752 | break; | |
3753 | case 1: /* D2 vline */ | |
3754 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { | |
3755 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; | |
3756 | DRM_DEBUG("IH: D2 vline\n"); | |
3757 | } | |
3758 | break; | |
3759 | default: | |
3760 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3761 | break; | |
3762 | } | |
3763 | break; | |
3764 | case 3: /* D3 vblank/vline */ | |
3765 | switch (src_data) { | |
3766 | case 0: /* D3 vblank */ | |
3767 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | |
3768 | if (rdev->irq.crtc_vblank_int[2]) { | |
3769 | drm_handle_vblank(rdev->ddev, 2); | |
3770 | rdev->pm.vblank_sync = true; | |
3771 | wake_up(&rdev->irq.vblank_queue); | |
3772 | } | |
736fc37f | 3773 | if (atomic_read(&rdev->irq.pflip[2])) |
25a857fb AD |
3774 | radeon_crtc_handle_flip(rdev, 2); |
3775 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | |
3776 | DRM_DEBUG("IH: D3 vblank\n"); | |
3777 | } | |
3778 | break; | |
3779 | case 1: /* D3 vline */ | |
3780 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { | |
3781 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; | |
3782 | DRM_DEBUG("IH: D3 vline\n"); | |
3783 | } | |
3784 | break; | |
3785 | default: | |
3786 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3787 | break; | |
3788 | } | |
3789 | break; | |
3790 | case 4: /* D4 vblank/vline */ | |
3791 | switch (src_data) { | |
3792 | case 0: /* D4 vblank */ | |
3793 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | |
3794 | if (rdev->irq.crtc_vblank_int[3]) { | |
3795 | drm_handle_vblank(rdev->ddev, 3); | |
3796 | rdev->pm.vblank_sync = true; | |
3797 | wake_up(&rdev->irq.vblank_queue); | |
3798 | } | |
736fc37f | 3799 | if (atomic_read(&rdev->irq.pflip[3])) |
25a857fb AD |
3800 | radeon_crtc_handle_flip(rdev, 3); |
3801 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | |
3802 | DRM_DEBUG("IH: D4 vblank\n"); | |
3803 | } | |
3804 | break; | |
3805 | case 1: /* D4 vline */ | |
3806 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { | |
3807 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; | |
3808 | DRM_DEBUG("IH: D4 vline\n"); | |
3809 | } | |
3810 | break; | |
3811 | default: | |
3812 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3813 | break; | |
3814 | } | |
3815 | break; | |
3816 | case 5: /* D5 vblank/vline */ | |
3817 | switch (src_data) { | |
3818 | case 0: /* D5 vblank */ | |
3819 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | |
3820 | if (rdev->irq.crtc_vblank_int[4]) { | |
3821 | drm_handle_vblank(rdev->ddev, 4); | |
3822 | rdev->pm.vblank_sync = true; | |
3823 | wake_up(&rdev->irq.vblank_queue); | |
3824 | } | |
736fc37f | 3825 | if (atomic_read(&rdev->irq.pflip[4])) |
25a857fb AD |
3826 | radeon_crtc_handle_flip(rdev, 4); |
3827 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | |
3828 | DRM_DEBUG("IH: D5 vblank\n"); | |
3829 | } | |
3830 | break; | |
3831 | case 1: /* D5 vline */ | |
3832 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { | |
3833 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; | |
3834 | DRM_DEBUG("IH: D5 vline\n"); | |
3835 | } | |
3836 | break; | |
3837 | default: | |
3838 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3839 | break; | |
3840 | } | |
3841 | break; | |
3842 | case 6: /* D6 vblank/vline */ | |
3843 | switch (src_data) { | |
3844 | case 0: /* D6 vblank */ | |
3845 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | |
3846 | if (rdev->irq.crtc_vblank_int[5]) { | |
3847 | drm_handle_vblank(rdev->ddev, 5); | |
3848 | rdev->pm.vblank_sync = true; | |
3849 | wake_up(&rdev->irq.vblank_queue); | |
3850 | } | |
736fc37f | 3851 | if (atomic_read(&rdev->irq.pflip[5])) |
25a857fb AD |
3852 | radeon_crtc_handle_flip(rdev, 5); |
3853 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | |
3854 | DRM_DEBUG("IH: D6 vblank\n"); | |
3855 | } | |
3856 | break; | |
3857 | case 1: /* D6 vline */ | |
3858 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { | |
3859 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; | |
3860 | DRM_DEBUG("IH: D6 vline\n"); | |
3861 | } | |
3862 | break; | |
3863 | default: | |
3864 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3865 | break; | |
3866 | } | |
3867 | break; | |
3868 | case 42: /* HPD hotplug */ | |
3869 | switch (src_data) { | |
3870 | case 0: | |
3871 | if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { | |
3872 | rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; | |
3873 | queue_hotplug = true; | |
3874 | DRM_DEBUG("IH: HPD1\n"); | |
3875 | } | |
3876 | break; | |
3877 | case 1: | |
3878 | if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { | |
3879 | rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; | |
3880 | queue_hotplug = true; | |
3881 | DRM_DEBUG("IH: HPD2\n"); | |
3882 | } | |
3883 | break; | |
3884 | case 2: | |
3885 | if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { | |
3886 | rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; | |
3887 | queue_hotplug = true; | |
3888 | DRM_DEBUG("IH: HPD3\n"); | |
3889 | } | |
3890 | break; | |
3891 | case 3: | |
3892 | if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { | |
3893 | rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; | |
3894 | queue_hotplug = true; | |
3895 | DRM_DEBUG("IH: HPD4\n"); | |
3896 | } | |
3897 | break; | |
3898 | case 4: | |
3899 | if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { | |
3900 | rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; | |
3901 | queue_hotplug = true; | |
3902 | DRM_DEBUG("IH: HPD5\n"); | |
3903 | } | |
3904 | break; | |
3905 | case 5: | |
3906 | if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { | |
3907 | rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; | |
3908 | queue_hotplug = true; | |
3909 | DRM_DEBUG("IH: HPD6\n"); | |
3910 | } | |
3911 | break; | |
3912 | default: | |
3913 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3914 | break; | |
3915 | } | |
3916 | break; | |
ae133a11 CK |
3917 | case 146: |
3918 | case 147: | |
3919 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); | |
3920 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | |
3921 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); | |
3922 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | |
3923 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | |
3924 | /* reset addr and status */ | |
3925 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); | |
3926 | break; | |
25a857fb AD |
3927 | case 176: /* RINGID0 CP_INT */ |
3928 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); | |
3929 | break; | |
3930 | case 177: /* RINGID1 CP_INT */ | |
3931 | radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); | |
3932 | break; | |
3933 | case 178: /* RINGID2 CP_INT */ | |
3934 | radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); | |
3935 | break; | |
3936 | case 181: /* CP EOP event */ | |
3937 | DRM_DEBUG("IH: CP EOP\n"); | |
3938 | switch (ring_id) { | |
3939 | case 0: | |
3940 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); | |
3941 | break; | |
3942 | case 1: | |
3943 | radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); | |
3944 | break; | |
3945 | case 2: | |
3946 | radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); | |
3947 | break; | |
3948 | } | |
3949 | break; | |
8c5fd7ef AD |
3950 | case 224: /* DMA trap event */ |
3951 | DRM_DEBUG("IH: DMA trap\n"); | |
3952 | radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); | |
3953 | break; | |
25a857fb AD |
3954 | case 233: /* GUI IDLE */ |
3955 | DRM_DEBUG("IH: GUI idle\n"); | |
25a857fb | 3956 | break; |
8c5fd7ef AD |
3957 | case 244: /* DMA trap event */ |
3958 | DRM_DEBUG("IH: DMA1 trap\n"); | |
3959 | radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); | |
3960 | break; | |
25a857fb AD |
3961 | default: |
3962 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
3963 | break; | |
3964 | } | |
3965 | ||
3966 | /* wptr/rptr are in bytes! */ | |
3967 | rptr += 16; | |
3968 | rptr &= rdev->ih.ptr_mask; | |
3969 | } | |
25a857fb AD |
3970 | if (queue_hotplug) |
3971 | schedule_work(&rdev->hotplug_work); | |
3972 | rdev->ih.rptr = rptr; | |
3973 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | |
c20dc369 CK |
3974 | atomic_set(&rdev->ih.lock, 0); |
3975 | ||
3976 | /* make sure wptr hasn't changed while processing */ | |
3977 | wptr = si_get_ih_wptr(rdev); | |
3978 | if (wptr != rptr) | |
3979 | goto restart_ih; | |
3980 | ||
25a857fb AD |
3981 | return IRQ_HANDLED; |
3982 | } | |
3983 | ||
8c5fd7ef AD |
3984 | /** |
3985 | * si_copy_dma - copy pages using the DMA engine | |
3986 | * | |
3987 | * @rdev: radeon_device pointer | |
3988 | * @src_offset: src GPU address | |
3989 | * @dst_offset: dst GPU address | |
3990 | * @num_gpu_pages: number of GPU pages to xfer | |
3991 | * @fence: radeon fence object | |
3992 | * | |
3993 | * Copy GPU paging using the DMA engine (SI). | |
3994 | * Used by the radeon ttm implementation to move pages if | |
3995 | * registered as the asic copy callback. | |
3996 | */ | |
3997 | int si_copy_dma(struct radeon_device *rdev, | |
3998 | uint64_t src_offset, uint64_t dst_offset, | |
3999 | unsigned num_gpu_pages, | |
4000 | struct radeon_fence **fence) | |
4001 | { | |
4002 | struct radeon_semaphore *sem = NULL; | |
4003 | int ring_index = rdev->asic->copy.dma_ring_index; | |
4004 | struct radeon_ring *ring = &rdev->ring[ring_index]; | |
4005 | u32 size_in_bytes, cur_size_in_bytes; | |
4006 | int i, num_loops; | |
4007 | int r = 0; | |
4008 | ||
4009 | r = radeon_semaphore_create(rdev, &sem); | |
4010 | if (r) { | |
4011 | DRM_ERROR("radeon: moving bo (%d).\n", r); | |
4012 | return r; | |
4013 | } | |
4014 | ||
4015 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | |
4016 | num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); | |
4017 | r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); | |
4018 | if (r) { | |
4019 | DRM_ERROR("radeon: moving bo (%d).\n", r); | |
4020 | radeon_semaphore_free(rdev, &sem, NULL); | |
4021 | return r; | |
4022 | } | |
4023 | ||
4024 | if (radeon_fence_need_sync(*fence, ring->idx)) { | |
4025 | radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, | |
4026 | ring->idx); | |
4027 | radeon_fence_note_sync(*fence, ring->idx); | |
4028 | } else { | |
4029 | radeon_semaphore_free(rdev, &sem, NULL); | |
4030 | } | |
4031 | ||
4032 | for (i = 0; i < num_loops; i++) { | |
4033 | cur_size_in_bytes = size_in_bytes; | |
4034 | if (cur_size_in_bytes > 0xFFFFF) | |
4035 | cur_size_in_bytes = 0xFFFFF; | |
4036 | size_in_bytes -= cur_size_in_bytes; | |
4037 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); | |
4038 | radeon_ring_write(ring, dst_offset & 0xffffffff); | |
4039 | radeon_ring_write(ring, src_offset & 0xffffffff); | |
4040 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); | |
4041 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); | |
4042 | src_offset += cur_size_in_bytes; | |
4043 | dst_offset += cur_size_in_bytes; | |
4044 | } | |
4045 | ||
4046 | r = radeon_fence_emit(rdev, fence, ring->idx); | |
4047 | if (r) { | |
4048 | radeon_ring_unlock_undo(rdev, ring); | |
4049 | return r; | |
4050 | } | |
4051 | ||
4052 | radeon_ring_unlock_commit(rdev, ring); | |
4053 | radeon_semaphore_free(rdev, &sem, *fence); | |
4054 | ||
4055 | return r; | |
4056 | } | |
4057 | ||
9b136d51 AD |
4058 | /* |
4059 | * startup/shutdown callbacks | |
4060 | */ | |
4061 | static int si_startup(struct radeon_device *rdev) | |
4062 | { | |
4063 | struct radeon_ring *ring; | |
4064 | int r; | |
4065 | ||
4066 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | |
4067 | !rdev->rlc_fw || !rdev->mc_fw) { | |
4068 | r = si_init_microcode(rdev); | |
4069 | if (r) { | |
4070 | DRM_ERROR("Failed to load firmware!\n"); | |
4071 | return r; | |
4072 | } | |
4073 | } | |
4074 | ||
4075 | r = si_mc_load_microcode(rdev); | |
4076 | if (r) { | |
4077 | DRM_ERROR("Failed to load MC firmware!\n"); | |
4078 | return r; | |
4079 | } | |
4080 | ||
4081 | r = r600_vram_scratch_init(rdev); | |
4082 | if (r) | |
4083 | return r; | |
4084 | ||
4085 | si_mc_program(rdev); | |
4086 | r = si_pcie_gart_enable(rdev); | |
4087 | if (r) | |
4088 | return r; | |
4089 | si_gpu_init(rdev); | |
4090 | ||
4091 | #if 0 | |
4092 | r = evergreen_blit_init(rdev); | |
4093 | if (r) { | |
4094 | r600_blit_fini(rdev); | |
4095 | rdev->asic->copy = NULL; | |
4096 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | |
4097 | } | |
4098 | #endif | |
4099 | /* allocate rlc buffers */ | |
4100 | r = si_rlc_init(rdev); | |
4101 | if (r) { | |
4102 | DRM_ERROR("Failed to init rlc BOs!\n"); | |
4103 | return r; | |
4104 | } | |
4105 | ||
4106 | /* allocate wb buffer */ | |
4107 | r = radeon_wb_init(rdev); | |
4108 | if (r) | |
4109 | return r; | |
4110 | ||
4111 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); | |
4112 | if (r) { | |
4113 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); | |
4114 | return r; | |
4115 | } | |
4116 | ||
4117 | r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); | |
4118 | if (r) { | |
4119 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); | |
4120 | return r; | |
4121 | } | |
4122 | ||
4123 | r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); | |
4124 | if (r) { | |
4125 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); | |
4126 | return r; | |
4127 | } | |
4128 | ||
8c5fd7ef AD |
4129 | r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
4130 | if (r) { | |
4131 | dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); | |
4132 | return r; | |
4133 | } | |
4134 | ||
4135 | r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); | |
4136 | if (r) { | |
4137 | dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); | |
4138 | return r; | |
4139 | } | |
4140 | ||
9b136d51 AD |
4141 | /* Enable IRQ */ |
4142 | r = si_irq_init(rdev); | |
4143 | if (r) { | |
4144 | DRM_ERROR("radeon: IH init failed (%d).\n", r); | |
4145 | radeon_irq_kms_fini(rdev); | |
4146 | return r; | |
4147 | } | |
4148 | si_irq_set(rdev); | |
4149 | ||
4150 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
4151 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, | |
4152 | CP_RB0_RPTR, CP_RB0_WPTR, | |
4153 | 0, 0xfffff, RADEON_CP_PACKET2); | |
4154 | if (r) | |
4155 | return r; | |
4156 | ||
4157 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | |
4158 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, | |
4159 | CP_RB1_RPTR, CP_RB1_WPTR, | |
4160 | 0, 0xfffff, RADEON_CP_PACKET2); | |
4161 | if (r) | |
4162 | return r; | |
4163 | ||
4164 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | |
4165 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, | |
4166 | CP_RB2_RPTR, CP_RB2_WPTR, | |
4167 | 0, 0xfffff, RADEON_CP_PACKET2); | |
4168 | if (r) | |
4169 | return r; | |
4170 | ||
8c5fd7ef AD |
4171 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
4172 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | |
4173 | DMA_RB_RPTR + DMA0_REGISTER_OFFSET, | |
4174 | DMA_RB_WPTR + DMA0_REGISTER_OFFSET, | |
4175 | 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); | |
4176 | if (r) | |
4177 | return r; | |
4178 | ||
4179 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | |
4180 | r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, | |
4181 | DMA_RB_RPTR + DMA1_REGISTER_OFFSET, | |
4182 | DMA_RB_WPTR + DMA1_REGISTER_OFFSET, | |
4183 | 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); | |
4184 | if (r) | |
4185 | return r; | |
4186 | ||
9b136d51 AD |
4187 | r = si_cp_load_microcode(rdev); |
4188 | if (r) | |
4189 | return r; | |
4190 | r = si_cp_resume(rdev); | |
4191 | if (r) | |
4192 | return r; | |
4193 | ||
8c5fd7ef AD |
4194 | r = cayman_dma_resume(rdev); |
4195 | if (r) | |
4196 | return r; | |
4197 | ||
2898c348 CK |
4198 | r = radeon_ib_pool_init(rdev); |
4199 | if (r) { | |
4200 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | |
9b136d51 | 4201 | return r; |
2898c348 | 4202 | } |
9b136d51 | 4203 | |
c6105f24 CK |
4204 | r = radeon_vm_manager_init(rdev); |
4205 | if (r) { | |
4206 | dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); | |
9b136d51 | 4207 | return r; |
c6105f24 | 4208 | } |
9b136d51 AD |
4209 | |
4210 | return 0; | |
4211 | } | |
4212 | ||
4213 | int si_resume(struct radeon_device *rdev) | |
4214 | { | |
4215 | int r; | |
4216 | ||
4217 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, | |
4218 | * posting will perform necessary task to bring back GPU into good | |
4219 | * shape. | |
4220 | */ | |
4221 | /* post card */ | |
4222 | atom_asic_init(rdev->mode_info.atom_context); | |
4223 | ||
4224 | rdev->accel_working = true; | |
4225 | r = si_startup(rdev); | |
4226 | if (r) { | |
4227 | DRM_ERROR("si startup failed on resume\n"); | |
4228 | rdev->accel_working = false; | |
4229 | return r; | |
4230 | } | |
4231 | ||
4232 | return r; | |
4233 | ||
4234 | } | |
4235 | ||
4236 | int si_suspend(struct radeon_device *rdev) | |
4237 | { | |
9b136d51 | 4238 | si_cp_enable(rdev, false); |
8c5fd7ef | 4239 | cayman_dma_stop(rdev); |
9b136d51 AD |
4240 | si_irq_suspend(rdev); |
4241 | radeon_wb_disable(rdev); | |
4242 | si_pcie_gart_disable(rdev); | |
4243 | return 0; | |
4244 | } | |
4245 | ||
4246 | /* Plan is to move initialization in that function and use | |
4247 | * helper function so that radeon_device_init pretty much | |
4248 | * do nothing more than calling asic specific function. This | |
4249 | * should also allow to remove a bunch of callback function | |
4250 | * like vram_info. | |
4251 | */ | |
4252 | int si_init(struct radeon_device *rdev) | |
4253 | { | |
4254 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
4255 | int r; | |
4256 | ||
9b136d51 AD |
4257 | /* Read BIOS */ |
4258 | if (!radeon_get_bios(rdev)) { | |
4259 | if (ASIC_IS_AVIVO(rdev)) | |
4260 | return -EINVAL; | |
4261 | } | |
4262 | /* Must be an ATOMBIOS */ | |
4263 | if (!rdev->is_atom_bios) { | |
4264 | dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); | |
4265 | return -EINVAL; | |
4266 | } | |
4267 | r = radeon_atombios_init(rdev); | |
4268 | if (r) | |
4269 | return r; | |
4270 | ||
4271 | /* Post card if necessary */ | |
4272 | if (!radeon_card_posted(rdev)) { | |
4273 | if (!rdev->bios) { | |
4274 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | |
4275 | return -EINVAL; | |
4276 | } | |
4277 | DRM_INFO("GPU not posted. posting now...\n"); | |
4278 | atom_asic_init(rdev->mode_info.atom_context); | |
4279 | } | |
4280 | /* Initialize scratch registers */ | |
4281 | si_scratch_init(rdev); | |
4282 | /* Initialize surface registers */ | |
4283 | radeon_surface_init(rdev); | |
4284 | /* Initialize clocks */ | |
4285 | radeon_get_clock_info(rdev->ddev); | |
4286 | ||
4287 | /* Fence driver */ | |
4288 | r = radeon_fence_driver_init(rdev); | |
4289 | if (r) | |
4290 | return r; | |
4291 | ||
4292 | /* initialize memory controller */ | |
4293 | r = si_mc_init(rdev); | |
4294 | if (r) | |
4295 | return r; | |
4296 | /* Memory manager */ | |
4297 | r = radeon_bo_init(rdev); | |
4298 | if (r) | |
4299 | return r; | |
4300 | ||
4301 | r = radeon_irq_kms_init(rdev); | |
4302 | if (r) | |
4303 | return r; | |
4304 | ||
4305 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | |
4306 | ring->ring_obj = NULL; | |
4307 | r600_ring_init(rdev, ring, 1024 * 1024); | |
4308 | ||
4309 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; | |
4310 | ring->ring_obj = NULL; | |
4311 | r600_ring_init(rdev, ring, 1024 * 1024); | |
4312 | ||
4313 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; | |
4314 | ring->ring_obj = NULL; | |
4315 | r600_ring_init(rdev, ring, 1024 * 1024); | |
4316 | ||
8c5fd7ef AD |
4317 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
4318 | ring->ring_obj = NULL; | |
4319 | r600_ring_init(rdev, ring, 64 * 1024); | |
4320 | ||
4321 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | |
4322 | ring->ring_obj = NULL; | |
4323 | r600_ring_init(rdev, ring, 64 * 1024); | |
4324 | ||
9b136d51 AD |
4325 | rdev->ih.ring_obj = NULL; |
4326 | r600_ih_ring_init(rdev, 64 * 1024); | |
4327 | ||
4328 | r = r600_pcie_gart_init(rdev); | |
4329 | if (r) | |
4330 | return r; | |
4331 | ||
9b136d51 | 4332 | rdev->accel_working = true; |
9b136d51 AD |
4333 | r = si_startup(rdev); |
4334 | if (r) { | |
4335 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | |
4336 | si_cp_fini(rdev); | |
8c5fd7ef | 4337 | cayman_dma_fini(rdev); |
9b136d51 AD |
4338 | si_irq_fini(rdev); |
4339 | si_rlc_fini(rdev); | |
4340 | radeon_wb_fini(rdev); | |
2898c348 | 4341 | radeon_ib_pool_fini(rdev); |
9b136d51 AD |
4342 | radeon_vm_manager_fini(rdev); |
4343 | radeon_irq_kms_fini(rdev); | |
4344 | si_pcie_gart_fini(rdev); | |
4345 | rdev->accel_working = false; | |
4346 | } | |
4347 | ||
4348 | /* Don't start up if the MC ucode is missing. | |
4349 | * The default clocks and voltages before the MC ucode | |
4350 | * is loaded are not suffient for advanced operations. | |
4351 | */ | |
4352 | if (!rdev->mc_fw) { | |
4353 | DRM_ERROR("radeon: MC ucode required for NI+.\n"); | |
4354 | return -EINVAL; | |
4355 | } | |
4356 | ||
4357 | return 0; | |
4358 | } | |
4359 | ||
4360 | void si_fini(struct radeon_device *rdev) | |
4361 | { | |
4362 | #if 0 | |
4363 | r600_blit_fini(rdev); | |
4364 | #endif | |
4365 | si_cp_fini(rdev); | |
8c5fd7ef | 4366 | cayman_dma_fini(rdev); |
9b136d51 AD |
4367 | si_irq_fini(rdev); |
4368 | si_rlc_fini(rdev); | |
4369 | radeon_wb_fini(rdev); | |
4370 | radeon_vm_manager_fini(rdev); | |
2898c348 | 4371 | radeon_ib_pool_fini(rdev); |
9b136d51 AD |
4372 | radeon_irq_kms_fini(rdev); |
4373 | si_pcie_gart_fini(rdev); | |
4374 | r600_vram_scratch_fini(rdev); | |
4375 | radeon_gem_fini(rdev); | |
9b136d51 AD |
4376 | radeon_fence_driver_fini(rdev); |
4377 | radeon_bo_fini(rdev); | |
4378 | radeon_atombios_fini(rdev); | |
4379 | kfree(rdev->bios); | |
4380 | rdev->bios = NULL; | |
4381 | } | |
4382 | ||
6759a0a7 MO |
4383 | /** |
4384 | * si_get_gpu_clock - return GPU clock counter snapshot | |
4385 | * | |
4386 | * @rdev: radeon_device pointer | |
4387 | * | |
4388 | * Fetches a GPU clock counter snapshot (SI). | |
4389 | * Returns the 64 bit clock counter snapshot. | |
4390 | */ | |
4391 | uint64_t si_get_gpu_clock(struct radeon_device *rdev) | |
4392 | { | |
4393 | uint64_t clock; | |
4394 | ||
4395 | mutex_lock(&rdev->gpu_clock_mutex); | |
4396 | WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); | |
4397 | clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | | |
4398 | ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); | |
4399 | mutex_unlock(&rdev->gpu_clock_mutex); | |
4400 | return clock; | |
4401 | } |