2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include <linux/module.h>
34 #include "radeon_drm.h"
36 #include "radeon_asic.h"
37 #include "radeon_mode.h"
42 #define PFP_UCODE_SIZE 576
43 #define PM4_UCODE_SIZE 1792
44 #define RLC_UCODE_SIZE 768
45 #define R700_PFP_UCODE_SIZE 848
46 #define R700_PM4_UCODE_SIZE 1360
47 #define R700_RLC_UCODE_SIZE 1024
48 #define EVERGREEN_PFP_UCODE_SIZE 1120
49 #define EVERGREEN_PM4_UCODE_SIZE 1376
50 #define EVERGREEN_RLC_UCODE_SIZE 768
51 #define CAYMAN_RLC_UCODE_SIZE 1024
52 #define ARUBA_RLC_UCODE_SIZE 1536
55 MODULE_FIRMWARE("radeon/R600_pfp.bin");
56 MODULE_FIRMWARE("radeon/R600_me.bin");
57 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV610_me.bin");
59 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV630_me.bin");
61 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV620_me.bin");
63 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV635_me.bin");
65 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
66 MODULE_FIRMWARE("radeon/RV670_me.bin");
67 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
68 MODULE_FIRMWARE("radeon/RS780_me.bin");
69 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
70 MODULE_FIRMWARE("radeon/RV770_me.bin");
71 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
72 MODULE_FIRMWARE("radeon/RV730_me.bin");
73 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
74 MODULE_FIRMWARE("radeon/RV710_me.bin");
75 MODULE_FIRMWARE("radeon/R600_rlc.bin");
76 MODULE_FIRMWARE("radeon/R700_rlc.bin");
77 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
78 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
79 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
80 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
81 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
82 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
83 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
84 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
85 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
86 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
87 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
88 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
89 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
90 MODULE_FIRMWARE("radeon/PALM_me.bin");
91 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
92 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
93 MODULE_FIRMWARE("radeon/SUMO_me.bin");
94 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
95 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
97 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
99 /* r600,rv610,rv630,rv620,rv635,rv670 */
100 int r600_mc_wait_for_idle(struct radeon_device *rdev);
101 void r600_gpu_init(struct radeon_device *rdev);
102 void r600_fini(struct radeon_device *rdev);
103 void r600_irq_disable(struct radeon_device *rdev);
104 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
106 /* get temperature in millidegrees */
107 int rv6xx_get_temp(struct radeon_device *rdev)
109 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
111 int actual_temp = temp & 0xff;
116 return actual_temp * 1000;
119 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
123 rdev->pm.dynpm_can_upclock = true;
124 rdev->pm.dynpm_can_downclock = true;
126 /* power state array is low to high, default is first */
127 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
128 int min_power_state_index = 0;
130 if (rdev->pm.num_power_states > 2)
131 min_power_state_index = 1;
133 switch (rdev->pm.dynpm_planned_action) {
134 case DYNPM_ACTION_MINIMUM:
135 rdev->pm.requested_power_state_index = min_power_state_index;
136 rdev->pm.requested_clock_mode_index = 0;
137 rdev->pm.dynpm_can_downclock = false;
139 case DYNPM_ACTION_DOWNCLOCK:
140 if (rdev->pm.current_power_state_index == min_power_state_index) {
141 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
142 rdev->pm.dynpm_can_downclock = false;
144 if (rdev->pm.active_crtc_count > 1) {
145 for (i = 0; i < rdev->pm.num_power_states; i++) {
146 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
148 else if (i >= rdev->pm.current_power_state_index) {
149 rdev->pm.requested_power_state_index =
150 rdev->pm.current_power_state_index;
153 rdev->pm.requested_power_state_index = i;
158 if (rdev->pm.current_power_state_index == 0)
159 rdev->pm.requested_power_state_index =
160 rdev->pm.num_power_states - 1;
162 rdev->pm.requested_power_state_index =
163 rdev->pm.current_power_state_index - 1;
166 rdev->pm.requested_clock_mode_index = 0;
167 /* don't use the power state if crtcs are active and no display flag is set */
168 if ((rdev->pm.active_crtc_count > 0) &&
169 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
170 clock_info[rdev->pm.requested_clock_mode_index].flags &
171 RADEON_PM_MODE_NO_DISPLAY)) {
172 rdev->pm.requested_power_state_index++;
175 case DYNPM_ACTION_UPCLOCK:
176 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
177 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
178 rdev->pm.dynpm_can_upclock = false;
180 if (rdev->pm.active_crtc_count > 1) {
181 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
182 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
184 else if (i <= rdev->pm.current_power_state_index) {
185 rdev->pm.requested_power_state_index =
186 rdev->pm.current_power_state_index;
189 rdev->pm.requested_power_state_index = i;
194 rdev->pm.requested_power_state_index =
195 rdev->pm.current_power_state_index + 1;
197 rdev->pm.requested_clock_mode_index = 0;
199 case DYNPM_ACTION_DEFAULT:
200 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
201 rdev->pm.requested_clock_mode_index = 0;
202 rdev->pm.dynpm_can_upclock = false;
204 case DYNPM_ACTION_NONE:
206 DRM_ERROR("Requested mode for not defined action\n");
210 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
211 /* for now just select the first power state and switch between clock modes */
212 /* power state array is low to high, default is first (0) */
213 if (rdev->pm.active_crtc_count > 1) {
214 rdev->pm.requested_power_state_index = -1;
215 /* start at 1 as we don't want the default mode */
216 for (i = 1; i < rdev->pm.num_power_states; i++) {
217 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
219 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
220 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
221 rdev->pm.requested_power_state_index = i;
225 /* if nothing selected, grab the default state. */
226 if (rdev->pm.requested_power_state_index == -1)
227 rdev->pm.requested_power_state_index = 0;
229 rdev->pm.requested_power_state_index = 1;
231 switch (rdev->pm.dynpm_planned_action) {
232 case DYNPM_ACTION_MINIMUM:
233 rdev->pm.requested_clock_mode_index = 0;
234 rdev->pm.dynpm_can_downclock = false;
236 case DYNPM_ACTION_DOWNCLOCK:
237 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
238 if (rdev->pm.current_clock_mode_index == 0) {
239 rdev->pm.requested_clock_mode_index = 0;
240 rdev->pm.dynpm_can_downclock = false;
242 rdev->pm.requested_clock_mode_index =
243 rdev->pm.current_clock_mode_index - 1;
245 rdev->pm.requested_clock_mode_index = 0;
246 rdev->pm.dynpm_can_downclock = false;
248 /* don't use the power state if crtcs are active and no display flag is set */
249 if ((rdev->pm.active_crtc_count > 0) &&
250 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
251 clock_info[rdev->pm.requested_clock_mode_index].flags &
252 RADEON_PM_MODE_NO_DISPLAY)) {
253 rdev->pm.requested_clock_mode_index++;
256 case DYNPM_ACTION_UPCLOCK:
257 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
258 if (rdev->pm.current_clock_mode_index ==
259 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
260 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
261 rdev->pm.dynpm_can_upclock = false;
263 rdev->pm.requested_clock_mode_index =
264 rdev->pm.current_clock_mode_index + 1;
266 rdev->pm.requested_clock_mode_index =
267 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
268 rdev->pm.dynpm_can_upclock = false;
271 case DYNPM_ACTION_DEFAULT:
272 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
273 rdev->pm.requested_clock_mode_index = 0;
274 rdev->pm.dynpm_can_upclock = false;
276 case DYNPM_ACTION_NONE:
278 DRM_ERROR("Requested mode for not defined action\n");
283 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
284 rdev->pm.power_state[rdev->pm.requested_power_state_index].
285 clock_info[rdev->pm.requested_clock_mode_index].sclk,
286 rdev->pm.power_state[rdev->pm.requested_power_state_index].
287 clock_info[rdev->pm.requested_clock_mode_index].mclk,
288 rdev->pm.power_state[rdev->pm.requested_power_state_index].
292 void rs780_pm_init_profile(struct radeon_device *rdev)
294 if (rdev->pm.num_power_states == 2) {
296 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
297 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
298 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
299 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
301 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
304 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
309 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
312 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
313 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
328 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
330 } else if (rdev->pm.num_power_states == 3) {
332 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
333 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
334 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
337 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
338 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
339 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
342 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
343 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
344 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
347 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
348 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
349 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
350 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
352 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
353 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
354 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
355 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
357 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
358 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
359 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
360 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
362 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
363 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
364 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
365 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
368 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
369 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
370 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
371 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
373 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
374 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
375 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
376 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
378 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
379 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
380 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
381 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
383 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
384 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
385 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
386 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
388 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
389 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
390 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
391 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
394 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
395 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
396 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
399 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
400 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
401 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
405 void r600_pm_init_profile(struct radeon_device *rdev)
409 if (rdev->family == CHIP_R600) {
412 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
413 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
414 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
415 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
417 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
418 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
419 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
420 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
422 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
424 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
425 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
427 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
429 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
430 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
432 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
434 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
435 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
437 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
439 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
440 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
442 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
445 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
447 if (rdev->pm.num_power_states < 4) {
449 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
450 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
451 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
452 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
454 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
455 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
456 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
457 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
459 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
460 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
461 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
462 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
464 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
465 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
466 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
467 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
469 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
470 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
471 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
472 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
474 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
475 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
476 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
477 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
479 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
480 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
481 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
482 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
485 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
488 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
490 if (rdev->flags & RADEON_IS_MOBILITY)
491 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
493 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
497 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
502 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
504 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
508 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
510 if (rdev->flags & RADEON_IS_MOBILITY)
511 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
513 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
517 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
522 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
524 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
528 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
533 void r600_pm_misc(struct radeon_device *rdev)
535 int req_ps_idx = rdev->pm.requested_power_state_index;
536 int req_cm_idx = rdev->pm.requested_clock_mode_index;
537 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
538 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
540 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
541 /* 0xff01 is a flag rather then an actual voltage */
542 if (voltage->voltage == 0xff01)
544 if (voltage->voltage != rdev->pm.current_vddc) {
545 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
546 rdev->pm.current_vddc = voltage->voltage;
547 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
552 bool r600_gui_idle(struct radeon_device *rdev)
554 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
560 /* hpd for digital panel detect/disconnect */
561 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
563 bool connected = false;
565 if (ASIC_IS_DCE3(rdev)) {
568 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
572 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
576 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
580 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
585 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
589 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
598 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
602 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
606 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
616 void r600_hpd_set_polarity(struct radeon_device *rdev,
617 enum radeon_hpd_id hpd)
620 bool connected = r600_hpd_sense(rdev, hpd);
622 if (ASIC_IS_DCE3(rdev)) {
625 tmp = RREG32(DC_HPD1_INT_CONTROL);
627 tmp &= ~DC_HPDx_INT_POLARITY;
629 tmp |= DC_HPDx_INT_POLARITY;
630 WREG32(DC_HPD1_INT_CONTROL, tmp);
633 tmp = RREG32(DC_HPD2_INT_CONTROL);
635 tmp &= ~DC_HPDx_INT_POLARITY;
637 tmp |= DC_HPDx_INT_POLARITY;
638 WREG32(DC_HPD2_INT_CONTROL, tmp);
641 tmp = RREG32(DC_HPD3_INT_CONTROL);
643 tmp &= ~DC_HPDx_INT_POLARITY;
645 tmp |= DC_HPDx_INT_POLARITY;
646 WREG32(DC_HPD3_INT_CONTROL, tmp);
649 tmp = RREG32(DC_HPD4_INT_CONTROL);
651 tmp &= ~DC_HPDx_INT_POLARITY;
653 tmp |= DC_HPDx_INT_POLARITY;
654 WREG32(DC_HPD4_INT_CONTROL, tmp);
657 tmp = RREG32(DC_HPD5_INT_CONTROL);
659 tmp &= ~DC_HPDx_INT_POLARITY;
661 tmp |= DC_HPDx_INT_POLARITY;
662 WREG32(DC_HPD5_INT_CONTROL, tmp);
666 tmp = RREG32(DC_HPD6_INT_CONTROL);
668 tmp &= ~DC_HPDx_INT_POLARITY;
670 tmp |= DC_HPDx_INT_POLARITY;
671 WREG32(DC_HPD6_INT_CONTROL, tmp);
679 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
681 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
683 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
684 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
687 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
689 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
691 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
692 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
695 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
697 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
699 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
700 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
708 void r600_hpd_init(struct radeon_device *rdev)
710 struct drm_device *dev = rdev->ddev;
711 struct drm_connector *connector;
714 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
715 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
717 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
718 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
719 /* don't try to enable hpd on eDP or LVDS avoid breaking the
720 * aux dp channel on imac and help (but not completely fix)
721 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
725 if (ASIC_IS_DCE3(rdev)) {
726 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
727 if (ASIC_IS_DCE32(rdev))
730 switch (radeon_connector->hpd.hpd) {
732 WREG32(DC_HPD1_CONTROL, tmp);
735 WREG32(DC_HPD2_CONTROL, tmp);
738 WREG32(DC_HPD3_CONTROL, tmp);
741 WREG32(DC_HPD4_CONTROL, tmp);
745 WREG32(DC_HPD5_CONTROL, tmp);
748 WREG32(DC_HPD6_CONTROL, tmp);
754 switch (radeon_connector->hpd.hpd) {
756 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
759 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
762 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
768 enable |= 1 << radeon_connector->hpd.hpd;
769 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
771 radeon_irq_kms_enable_hpd(rdev, enable);
774 void r600_hpd_fini(struct radeon_device *rdev)
776 struct drm_device *dev = rdev->ddev;
777 struct drm_connector *connector;
778 unsigned disable = 0;
780 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
781 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
782 if (ASIC_IS_DCE3(rdev)) {
783 switch (radeon_connector->hpd.hpd) {
785 WREG32(DC_HPD1_CONTROL, 0);
788 WREG32(DC_HPD2_CONTROL, 0);
791 WREG32(DC_HPD3_CONTROL, 0);
794 WREG32(DC_HPD4_CONTROL, 0);
798 WREG32(DC_HPD5_CONTROL, 0);
801 WREG32(DC_HPD6_CONTROL, 0);
807 switch (radeon_connector->hpd.hpd) {
809 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
812 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
815 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
821 disable |= 1 << radeon_connector->hpd.hpd;
823 radeon_irq_kms_disable_hpd(rdev, disable);
829 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
834 /* flush hdp cache so updates hit vram */
835 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
836 !(rdev->flags & RADEON_IS_AGP)) {
837 void __iomem *ptr = (void *)rdev->gart.ptr;
840 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
841 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
842 * This seems to cause problems on some AGP cards. Just use the old
845 WREG32(HDP_DEBUG1, 0);
846 tmp = readl((void __iomem *)ptr);
848 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
850 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
851 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
852 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
853 for (i = 0; i < rdev->usec_timeout; i++) {
855 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
856 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
858 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
868 int r600_pcie_gart_init(struct radeon_device *rdev)
872 if (rdev->gart.robj) {
873 WARN(1, "R600 PCIE GART already initialized\n");
876 /* Initialize common gart structure */
877 r = radeon_gart_init(rdev);
880 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
881 return radeon_gart_table_vram_alloc(rdev);
884 int r600_pcie_gart_enable(struct radeon_device *rdev)
889 if (rdev->gart.robj == NULL) {
890 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
893 r = radeon_gart_table_vram_pin(rdev);
896 radeon_gart_restore(rdev);
899 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
900 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
901 EFFECTIVE_L2_QUEUE_SIZE(7));
902 WREG32(VM_L2_CNTL2, 0);
903 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
904 /* Setup TLB control */
905 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
906 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
907 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
908 ENABLE_WAIT_L2_QUERY;
909 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
910 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
911 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
912 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
913 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
914 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
915 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
916 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
917 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
918 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
919 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
920 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
921 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
922 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
923 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
924 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
925 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
926 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
927 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
928 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
929 (u32)(rdev->dummy_page.addr >> 12));
930 for (i = 1; i < 7; i++)
931 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
933 r600_pcie_gart_tlb_flush(rdev);
934 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
935 (unsigned)(rdev->mc.gtt_size >> 20),
936 (unsigned long long)rdev->gart.table_addr);
937 rdev->gart.ready = true;
941 void r600_pcie_gart_disable(struct radeon_device *rdev)
946 /* Disable all tables */
947 for (i = 0; i < 7; i++)
948 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
950 /* Disable L2 cache */
951 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
952 EFFECTIVE_L2_QUEUE_SIZE(7));
953 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
954 /* Setup L1 TLB control */
955 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
956 ENABLE_WAIT_L2_QUERY;
957 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
958 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
959 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
960 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
961 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
962 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
963 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
964 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
965 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
966 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
967 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
968 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
969 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
970 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
971 radeon_gart_table_vram_unpin(rdev);
974 void r600_pcie_gart_fini(struct radeon_device *rdev)
976 radeon_gart_fini(rdev);
977 r600_pcie_gart_disable(rdev);
978 radeon_gart_table_vram_free(rdev);
981 void r600_agp_enable(struct radeon_device *rdev)
987 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
988 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
989 EFFECTIVE_L2_QUEUE_SIZE(7));
990 WREG32(VM_L2_CNTL2, 0);
991 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
992 /* Setup TLB control */
993 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
994 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
995 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
996 ENABLE_WAIT_L2_QUERY;
997 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
998 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
999 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1000 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1001 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1002 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1003 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1004 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1005 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1006 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1007 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1008 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1009 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1010 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1011 for (i = 0; i < 7; i++)
1012 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1015 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1020 for (i = 0; i < rdev->usec_timeout; i++) {
1021 /* read MC_STATUS */
1022 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1030 static void r600_mc_program(struct radeon_device *rdev)
1032 struct rv515_mc_save save;
1036 /* Initialize HDP */
1037 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1038 WREG32((0x2c14 + j), 0x00000000);
1039 WREG32((0x2c18 + j), 0x00000000);
1040 WREG32((0x2c1c + j), 0x00000000);
1041 WREG32((0x2c20 + j), 0x00000000);
1042 WREG32((0x2c24 + j), 0x00000000);
1044 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1046 rv515_mc_stop(rdev, &save);
1047 if (r600_mc_wait_for_idle(rdev)) {
1048 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1050 /* Lockout access through VGA aperture (doesn't exist before R600) */
1051 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1052 /* Update configuration */
1053 if (rdev->flags & RADEON_IS_AGP) {
1054 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1055 /* VRAM before AGP */
1056 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1057 rdev->mc.vram_start >> 12);
1058 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1059 rdev->mc.gtt_end >> 12);
1061 /* VRAM after AGP */
1062 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1063 rdev->mc.gtt_start >> 12);
1064 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1065 rdev->mc.vram_end >> 12);
1068 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1069 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1071 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1072 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1073 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1074 WREG32(MC_VM_FB_LOCATION, tmp);
1075 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1076 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1077 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1078 if (rdev->flags & RADEON_IS_AGP) {
1079 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1080 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1081 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1083 WREG32(MC_VM_AGP_BASE, 0);
1084 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1085 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1087 if (r600_mc_wait_for_idle(rdev)) {
1088 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1090 rv515_mc_resume(rdev, &save);
1091 /* we need to own VRAM, so turn off the VGA renderer here
1092 * to stop it overwriting our objects */
1093 rv515_vga_render_disable(rdev);
1097 * r600_vram_gtt_location - try to find VRAM & GTT location
1098 * @rdev: radeon device structure holding all necessary informations
1099 * @mc: memory controller structure holding memory informations
1101 * Function will place try to place VRAM at same place as in CPU (PCI)
1102 * address space as some GPU seems to have issue when we reprogram at
1103 * different address space.
1105 * If there is not enough space to fit the unvisible VRAM after the
1106 * aperture then we limit the VRAM size to the aperture.
1108 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1109 * them to be in one from GPU point of view so that we can program GPU to
1110 * catch access outside them (weird GPU policy see ??).
1112 * This function will never fails, worst case are limiting VRAM or GTT.
1114 * Note: GTT start, end, size should be initialized before calling this
1115 * function on AGP platform.
1117 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1119 u64 size_bf, size_af;
1121 if (mc->mc_vram_size > 0xE0000000) {
1122 /* leave room for at least 512M GTT */
1123 dev_warn(rdev->dev, "limiting VRAM\n");
1124 mc->real_vram_size = 0xE0000000;
1125 mc->mc_vram_size = 0xE0000000;
1127 if (rdev->flags & RADEON_IS_AGP) {
1128 size_bf = mc->gtt_start;
1129 size_af = 0xFFFFFFFF - mc->gtt_end;
1130 if (size_bf > size_af) {
1131 if (mc->mc_vram_size > size_bf) {
1132 dev_warn(rdev->dev, "limiting VRAM\n");
1133 mc->real_vram_size = size_bf;
1134 mc->mc_vram_size = size_bf;
1136 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1138 if (mc->mc_vram_size > size_af) {
1139 dev_warn(rdev->dev, "limiting VRAM\n");
1140 mc->real_vram_size = size_af;
1141 mc->mc_vram_size = size_af;
1143 mc->vram_start = mc->gtt_end + 1;
1145 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1146 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1147 mc->mc_vram_size >> 20, mc->vram_start,
1148 mc->vram_end, mc->real_vram_size >> 20);
1151 if (rdev->flags & RADEON_IS_IGP) {
1152 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1155 radeon_vram_location(rdev, &rdev->mc, base);
1156 rdev->mc.gtt_base_align = 0;
1157 radeon_gtt_location(rdev, mc);
1161 int r600_mc_init(struct radeon_device *rdev)
1164 int chansize, numchan;
1166 /* Get VRAM informations */
1167 rdev->mc.vram_is_ddr = true;
1168 tmp = RREG32(RAMCFG);
1169 if (tmp & CHANSIZE_OVERRIDE) {
1171 } else if (tmp & CHANSIZE_MASK) {
1176 tmp = RREG32(CHMAP);
1177 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1192 rdev->mc.vram_width = numchan * chansize;
1193 /* Could aper size report 0 ? */
1194 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1195 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1196 /* Setup GPU memory space */
1197 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1198 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1199 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1200 r600_vram_gtt_location(rdev, &rdev->mc);
1202 if (rdev->flags & RADEON_IS_IGP) {
1203 rs690_pm_info(rdev);
1204 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1206 radeon_update_bandwidth_info(rdev);
1210 int r600_vram_scratch_init(struct radeon_device *rdev)
1214 if (rdev->vram_scratch.robj == NULL) {
1215 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1216 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1217 NULL, &rdev->vram_scratch.robj);
1223 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1224 if (unlikely(r != 0))
1226 r = radeon_bo_pin(rdev->vram_scratch.robj,
1227 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1229 radeon_bo_unreserve(rdev->vram_scratch.robj);
1232 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1233 (void **)&rdev->vram_scratch.ptr);
1235 radeon_bo_unpin(rdev->vram_scratch.robj);
1236 radeon_bo_unreserve(rdev->vram_scratch.robj);
1241 void r600_vram_scratch_fini(struct radeon_device *rdev)
1245 if (rdev->vram_scratch.robj == NULL) {
1248 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1249 if (likely(r == 0)) {
1250 radeon_bo_kunmap(rdev->vram_scratch.robj);
1251 radeon_bo_unpin(rdev->vram_scratch.robj);
1252 radeon_bo_unreserve(rdev->vram_scratch.robj);
1254 radeon_bo_unref(&rdev->vram_scratch.robj);
1257 /* We doesn't check that the GPU really needs a reset we simply do the
1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that.
1261 int r600_gpu_soft_reset(struct radeon_device *rdev)
1263 struct rv515_mc_save save;
1264 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1265 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1266 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1267 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1268 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1269 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1270 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1271 S_008010_GUI_ACTIVE(1);
1272 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1273 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1274 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1275 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1276 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1277 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1278 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1279 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1282 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1285 dev_info(rdev->dev, "GPU softreset \n");
1286 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1287 RREG32(R_008010_GRBM_STATUS));
1288 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1289 RREG32(R_008014_GRBM_STATUS2));
1290 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1291 RREG32(R_000E50_SRBM_STATUS));
1292 rv515_mc_stop(rdev, &save);
1293 if (r600_mc_wait_for_idle(rdev)) {
1294 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1296 /* Disable CP parsing/prefetching */
1297 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1298 /* Check if any of the rendering block is busy and reset it */
1299 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1300 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1301 tmp = S_008020_SOFT_RESET_CR(1) |
1302 S_008020_SOFT_RESET_DB(1) |
1303 S_008020_SOFT_RESET_CB(1) |
1304 S_008020_SOFT_RESET_PA(1) |
1305 S_008020_SOFT_RESET_SC(1) |
1306 S_008020_SOFT_RESET_SMX(1) |
1307 S_008020_SOFT_RESET_SPI(1) |
1308 S_008020_SOFT_RESET_SX(1) |
1309 S_008020_SOFT_RESET_SH(1) |
1310 S_008020_SOFT_RESET_TC(1) |
1311 S_008020_SOFT_RESET_TA(1) |
1312 S_008020_SOFT_RESET_VC(1) |
1313 S_008020_SOFT_RESET_VGT(1);
1314 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1315 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1316 RREG32(R_008020_GRBM_SOFT_RESET);
1318 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1320 /* Reset CP (we always reset CP) */
1321 tmp = S_008020_SOFT_RESET_CP(1);
1322 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1323 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1324 RREG32(R_008020_GRBM_SOFT_RESET);
1326 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1327 /* Wait a little for things to settle down */
1329 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1330 RREG32(R_008010_GRBM_STATUS));
1331 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1332 RREG32(R_008014_GRBM_STATUS2));
1333 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1334 RREG32(R_000E50_SRBM_STATUS));
1335 rv515_mc_resume(rdev, &save);
1339 bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1345 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1346 grbm_status = RREG32(R_008010_GRBM_STATUS);
1347 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1348 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1349 radeon_ring_lockup_update(ring);
1352 /* force CP activities */
1353 radeon_ring_force_activity(rdev, ring);
1354 return radeon_ring_test_lockup(rdev, ring);
1357 int r600_asic_reset(struct radeon_device *rdev)
1359 return r600_gpu_soft_reset(rdev);
1362 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1363 u32 tiling_pipe_num,
1365 u32 total_max_rb_num,
1366 u32 disabled_rb_mask)
1368 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1369 u32 pipe_rb_ratio, pipe_rb_remain;
1370 u32 data = 0, mask = 1 << (max_rb_num - 1);
1373 /* mask out the RBs that don't exist on that asic */
1374 disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
1376 rendering_pipe_num = 1 << tiling_pipe_num;
1377 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1378 BUG_ON(rendering_pipe_num < req_rb_num);
1380 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1381 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1383 if (rdev->family <= CHIP_RV740) {
1391 for (i = 0; i < max_rb_num; i++) {
1392 if (!(mask & disabled_rb_mask)) {
1393 for (j = 0; j < pipe_rb_ratio; j++) {
1394 data <<= rb_num_width;
1395 data |= max_rb_num - i - 1;
1397 if (pipe_rb_remain) {
1398 data <<= rb_num_width;
1399 data |= max_rb_num - i - 1;
1409 int r600_count_pipe_bits(uint32_t val)
1413 for (i = 0; i < 32; i++) {
1420 void r600_gpu_init(struct radeon_device *rdev)
1424 u32 cc_rb_backend_disable;
1425 u32 cc_gc_shader_pipe_config;
1429 u32 sq_gpr_resource_mgmt_1 = 0;
1430 u32 sq_gpr_resource_mgmt_2 = 0;
1431 u32 sq_thread_resource_mgmt = 0;
1432 u32 sq_stack_resource_mgmt_1 = 0;
1433 u32 sq_stack_resource_mgmt_2 = 0;
1434 u32 disabled_rb_mask;
1436 rdev->config.r600.tiling_group_size = 256;
1437 switch (rdev->family) {
1439 rdev->config.r600.max_pipes = 4;
1440 rdev->config.r600.max_tile_pipes = 8;
1441 rdev->config.r600.max_simds = 4;
1442 rdev->config.r600.max_backends = 4;
1443 rdev->config.r600.max_gprs = 256;
1444 rdev->config.r600.max_threads = 192;
1445 rdev->config.r600.max_stack_entries = 256;
1446 rdev->config.r600.max_hw_contexts = 8;
1447 rdev->config.r600.max_gs_threads = 16;
1448 rdev->config.r600.sx_max_export_size = 128;
1449 rdev->config.r600.sx_max_export_pos_size = 16;
1450 rdev->config.r600.sx_max_export_smx_size = 128;
1451 rdev->config.r600.sq_num_cf_insts = 2;
1455 rdev->config.r600.max_pipes = 2;
1456 rdev->config.r600.max_tile_pipes = 2;
1457 rdev->config.r600.max_simds = 3;
1458 rdev->config.r600.max_backends = 1;
1459 rdev->config.r600.max_gprs = 128;
1460 rdev->config.r600.max_threads = 192;
1461 rdev->config.r600.max_stack_entries = 128;
1462 rdev->config.r600.max_hw_contexts = 8;
1463 rdev->config.r600.max_gs_threads = 4;
1464 rdev->config.r600.sx_max_export_size = 128;
1465 rdev->config.r600.sx_max_export_pos_size = 16;
1466 rdev->config.r600.sx_max_export_smx_size = 128;
1467 rdev->config.r600.sq_num_cf_insts = 2;
1473 rdev->config.r600.max_pipes = 1;
1474 rdev->config.r600.max_tile_pipes = 1;
1475 rdev->config.r600.max_simds = 2;
1476 rdev->config.r600.max_backends = 1;
1477 rdev->config.r600.max_gprs = 128;
1478 rdev->config.r600.max_threads = 192;
1479 rdev->config.r600.max_stack_entries = 128;
1480 rdev->config.r600.max_hw_contexts = 4;
1481 rdev->config.r600.max_gs_threads = 4;
1482 rdev->config.r600.sx_max_export_size = 128;
1483 rdev->config.r600.sx_max_export_pos_size = 16;
1484 rdev->config.r600.sx_max_export_smx_size = 128;
1485 rdev->config.r600.sq_num_cf_insts = 1;
1488 rdev->config.r600.max_pipes = 4;
1489 rdev->config.r600.max_tile_pipes = 4;
1490 rdev->config.r600.max_simds = 4;
1491 rdev->config.r600.max_backends = 4;
1492 rdev->config.r600.max_gprs = 192;
1493 rdev->config.r600.max_threads = 192;
1494 rdev->config.r600.max_stack_entries = 256;
1495 rdev->config.r600.max_hw_contexts = 8;
1496 rdev->config.r600.max_gs_threads = 16;
1497 rdev->config.r600.sx_max_export_size = 128;
1498 rdev->config.r600.sx_max_export_pos_size = 16;
1499 rdev->config.r600.sx_max_export_smx_size = 128;
1500 rdev->config.r600.sq_num_cf_insts = 2;
1506 /* Initialize HDP */
1507 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1508 WREG32((0x2c14 + j), 0x00000000);
1509 WREG32((0x2c18 + j), 0x00000000);
1510 WREG32((0x2c1c + j), 0x00000000);
1511 WREG32((0x2c20 + j), 0x00000000);
1512 WREG32((0x2c24 + j), 0x00000000);
1515 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1519 ramcfg = RREG32(RAMCFG);
1520 switch (rdev->config.r600.max_tile_pipes) {
1522 tiling_config |= PIPE_TILING(0);
1525 tiling_config |= PIPE_TILING(1);
1528 tiling_config |= PIPE_TILING(2);
1531 tiling_config |= PIPE_TILING(3);
1536 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1537 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1538 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1539 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1541 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1543 tiling_config |= ROW_TILING(3);
1544 tiling_config |= SAMPLE_SPLIT(3);
1546 tiling_config |= ROW_TILING(tmp);
1547 tiling_config |= SAMPLE_SPLIT(tmp);
1549 tiling_config |= BANK_SWAPS(1);
1551 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1552 tmp = R6XX_MAX_BACKENDS -
1553 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1554 if (tmp < rdev->config.r600.max_backends) {
1555 rdev->config.r600.max_backends = tmp;
1558 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1559 tmp = R6XX_MAX_PIPES -
1560 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1561 if (tmp < rdev->config.r600.max_pipes) {
1562 rdev->config.r600.max_pipes = tmp;
1564 tmp = R6XX_MAX_SIMDS -
1565 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1566 if (tmp < rdev->config.r600.max_simds) {
1567 rdev->config.r600.max_simds = tmp;
1570 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1571 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1572 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1573 R6XX_MAX_BACKENDS, disabled_rb_mask);
1574 tiling_config |= tmp << 16;
1575 rdev->config.r600.backend_map = tmp;
1577 rdev->config.r600.tile_config = tiling_config;
1578 WREG32(GB_TILING_CONFIG, tiling_config);
1579 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1580 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1582 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1583 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1584 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1586 /* Setup some CP states */
1587 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1588 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1590 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1591 SYNC_WALKER | SYNC_ALIGNER));
1592 /* Setup various GPU states */
1593 if (rdev->family == CHIP_RV670)
1594 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1596 tmp = RREG32(SX_DEBUG_1);
1597 tmp |= SMX_EVENT_RELEASE;
1598 if ((rdev->family > CHIP_R600))
1599 tmp |= ENABLE_NEW_SMX_ADDRESS;
1600 WREG32(SX_DEBUG_1, tmp);
1602 if (((rdev->family) == CHIP_R600) ||
1603 ((rdev->family) == CHIP_RV630) ||
1604 ((rdev->family) == CHIP_RV610) ||
1605 ((rdev->family) == CHIP_RV620) ||
1606 ((rdev->family) == CHIP_RS780) ||
1607 ((rdev->family) == CHIP_RS880)) {
1608 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1610 WREG32(DB_DEBUG, 0);
1612 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1613 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1615 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1616 WREG32(VGT_NUM_INSTANCES, 0);
1618 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1619 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1621 tmp = RREG32(SQ_MS_FIFO_SIZES);
1622 if (((rdev->family) == CHIP_RV610) ||
1623 ((rdev->family) == CHIP_RV620) ||
1624 ((rdev->family) == CHIP_RS780) ||
1625 ((rdev->family) == CHIP_RS880)) {
1626 tmp = (CACHE_FIFO_SIZE(0xa) |
1627 FETCH_FIFO_HIWATER(0xa) |
1628 DONE_FIFO_HIWATER(0xe0) |
1629 ALU_UPDATE_FIFO_HIWATER(0x8));
1630 } else if (((rdev->family) == CHIP_R600) ||
1631 ((rdev->family) == CHIP_RV630)) {
1632 tmp &= ~DONE_FIFO_HIWATER(0xff);
1633 tmp |= DONE_FIFO_HIWATER(0x4);
1635 WREG32(SQ_MS_FIFO_SIZES, tmp);
1637 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1638 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1640 sq_config = RREG32(SQ_CONFIG);
1641 sq_config &= ~(PS_PRIO(3) |
1645 sq_config |= (DX9_CONSTS |
1652 if ((rdev->family) == CHIP_R600) {
1653 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1655 NUM_CLAUSE_TEMP_GPRS(4));
1656 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1658 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1659 NUM_VS_THREADS(48) |
1662 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1663 NUM_VS_STACK_ENTRIES(128));
1664 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1665 NUM_ES_STACK_ENTRIES(0));
1666 } else if (((rdev->family) == CHIP_RV610) ||
1667 ((rdev->family) == CHIP_RV620) ||
1668 ((rdev->family) == CHIP_RS780) ||
1669 ((rdev->family) == CHIP_RS880)) {
1670 /* no vertex cache */
1671 sq_config &= ~VC_ENABLE;
1673 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1675 NUM_CLAUSE_TEMP_GPRS(2));
1676 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1678 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1679 NUM_VS_THREADS(78) |
1681 NUM_ES_THREADS(31));
1682 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1683 NUM_VS_STACK_ENTRIES(40));
1684 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1685 NUM_ES_STACK_ENTRIES(16));
1686 } else if (((rdev->family) == CHIP_RV630) ||
1687 ((rdev->family) == CHIP_RV635)) {
1688 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1690 NUM_CLAUSE_TEMP_GPRS(2));
1691 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1693 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1694 NUM_VS_THREADS(78) |
1696 NUM_ES_THREADS(31));
1697 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1698 NUM_VS_STACK_ENTRIES(40));
1699 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1700 NUM_ES_STACK_ENTRIES(16));
1701 } else if ((rdev->family) == CHIP_RV670) {
1702 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1704 NUM_CLAUSE_TEMP_GPRS(2));
1705 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1707 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1708 NUM_VS_THREADS(78) |
1710 NUM_ES_THREADS(31));
1711 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1712 NUM_VS_STACK_ENTRIES(64));
1713 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1714 NUM_ES_STACK_ENTRIES(64));
1717 WREG32(SQ_CONFIG, sq_config);
1718 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1719 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1720 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1721 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1722 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1724 if (((rdev->family) == CHIP_RV610) ||
1725 ((rdev->family) == CHIP_RV620) ||
1726 ((rdev->family) == CHIP_RS780) ||
1727 ((rdev->family) == CHIP_RS880)) {
1728 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1730 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1733 /* More default values. 2D/3D driver should adjust as needed */
1734 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1735 S1_X(0x4) | S1_Y(0xc)));
1736 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1737 S1_X(0x2) | S1_Y(0x2) |
1738 S2_X(0xa) | S2_Y(0x6) |
1739 S3_X(0x6) | S3_Y(0xa)));
1740 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1741 S1_X(0x4) | S1_Y(0xc) |
1742 S2_X(0x1) | S2_Y(0x6) |
1743 S3_X(0xa) | S3_Y(0xe)));
1744 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1745 S5_X(0x0) | S5_Y(0x0) |
1746 S6_X(0xb) | S6_Y(0x4) |
1747 S7_X(0x7) | S7_Y(0x8)));
1749 WREG32(VGT_STRMOUT_EN, 0);
1750 tmp = rdev->config.r600.max_pipes * 16;
1751 switch (rdev->family) {
1767 WREG32(VGT_ES_PER_GS, 128);
1768 WREG32(VGT_GS_PER_ES, tmp);
1769 WREG32(VGT_GS_PER_VS, 2);
1770 WREG32(VGT_GS_VERTEX_REUSE, 16);
1772 /* more default values. 2D/3D driver should adjust as needed */
1773 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1774 WREG32(VGT_STRMOUT_EN, 0);
1776 WREG32(PA_SC_MODE_CNTL, 0);
1777 WREG32(PA_SC_AA_CONFIG, 0);
1778 WREG32(PA_SC_LINE_STIPPLE, 0);
1779 WREG32(SPI_INPUT_Z, 0);
1780 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1781 WREG32(CB_COLOR7_FRAG, 0);
1783 /* Clear render buffer base addresses */
1784 WREG32(CB_COLOR0_BASE, 0);
1785 WREG32(CB_COLOR1_BASE, 0);
1786 WREG32(CB_COLOR2_BASE, 0);
1787 WREG32(CB_COLOR3_BASE, 0);
1788 WREG32(CB_COLOR4_BASE, 0);
1789 WREG32(CB_COLOR5_BASE, 0);
1790 WREG32(CB_COLOR6_BASE, 0);
1791 WREG32(CB_COLOR7_BASE, 0);
1792 WREG32(CB_COLOR7_FRAG, 0);
1794 switch (rdev->family) {
1799 tmp = TC_L2_SIZE(8);
1803 tmp = TC_L2_SIZE(4);
1806 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1809 tmp = TC_L2_SIZE(0);
1812 WREG32(TC_CNTL, tmp);
1814 tmp = RREG32(HDP_HOST_PATH_CNTL);
1815 WREG32(HDP_HOST_PATH_CNTL, tmp);
1817 tmp = RREG32(ARB_POP);
1818 tmp |= ENABLE_TC128;
1819 WREG32(ARB_POP, tmp);
1821 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1822 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1824 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1825 WREG32(VC_ENHANCE, 0);
1830 * Indirect registers accessor
1832 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1836 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1837 (void)RREG32(PCIE_PORT_INDEX);
1838 r = RREG32(PCIE_PORT_DATA);
1842 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1844 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1845 (void)RREG32(PCIE_PORT_INDEX);
1846 WREG32(PCIE_PORT_DATA, (v));
1847 (void)RREG32(PCIE_PORT_DATA);
1853 void r600_cp_stop(struct radeon_device *rdev)
1855 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1856 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1857 WREG32(SCRATCH_UMSK, 0);
1860 int r600_init_microcode(struct radeon_device *rdev)
1862 struct platform_device *pdev;
1863 const char *chip_name;
1864 const char *rlc_chip_name;
1865 size_t pfp_req_size, me_req_size, rlc_req_size;
1871 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1874 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1878 switch (rdev->family) {
1881 rlc_chip_name = "R600";
1884 chip_name = "RV610";
1885 rlc_chip_name = "R600";
1888 chip_name = "RV630";
1889 rlc_chip_name = "R600";
1892 chip_name = "RV620";
1893 rlc_chip_name = "R600";
1896 chip_name = "RV635";
1897 rlc_chip_name = "R600";
1900 chip_name = "RV670";
1901 rlc_chip_name = "R600";
1905 chip_name = "RS780";
1906 rlc_chip_name = "R600";
1909 chip_name = "RV770";
1910 rlc_chip_name = "R700";
1914 chip_name = "RV730";
1915 rlc_chip_name = "R700";
1918 chip_name = "RV710";
1919 rlc_chip_name = "R700";
1922 chip_name = "CEDAR";
1923 rlc_chip_name = "CEDAR";
1926 chip_name = "REDWOOD";
1927 rlc_chip_name = "REDWOOD";
1930 chip_name = "JUNIPER";
1931 rlc_chip_name = "JUNIPER";
1935 chip_name = "CYPRESS";
1936 rlc_chip_name = "CYPRESS";
1940 rlc_chip_name = "SUMO";
1944 rlc_chip_name = "SUMO";
1947 chip_name = "SUMO2";
1948 rlc_chip_name = "SUMO";
1953 if (rdev->family >= CHIP_CEDAR) {
1954 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1955 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1956 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1957 } else if (rdev->family >= CHIP_RV770) {
1958 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1959 me_req_size = R700_PM4_UCODE_SIZE * 4;
1960 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1962 pfp_req_size = PFP_UCODE_SIZE * 4;
1963 me_req_size = PM4_UCODE_SIZE * 12;
1964 rlc_req_size = RLC_UCODE_SIZE * 4;
1967 DRM_INFO("Loading %s Microcode\n", chip_name);
1969 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1970 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1973 if (rdev->pfp_fw->size != pfp_req_size) {
1975 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1976 rdev->pfp_fw->size, fw_name);
1981 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1982 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1985 if (rdev->me_fw->size != me_req_size) {
1987 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1988 rdev->me_fw->size, fw_name);
1992 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1993 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1996 if (rdev->rlc_fw->size != rlc_req_size) {
1998 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1999 rdev->rlc_fw->size, fw_name);
2004 platform_device_unregister(pdev);
2009 "r600_cp: Failed to load firmware \"%s\"\n",
2011 release_firmware(rdev->pfp_fw);
2012 rdev->pfp_fw = NULL;
2013 release_firmware(rdev->me_fw);
2015 release_firmware(rdev->rlc_fw);
2016 rdev->rlc_fw = NULL;
2021 static int r600_cp_load_microcode(struct radeon_device *rdev)
2023 const __be32 *fw_data;
2026 if (!rdev->me_fw || !rdev->pfp_fw)
2035 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2038 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2039 RREG32(GRBM_SOFT_RESET);
2041 WREG32(GRBM_SOFT_RESET, 0);
2043 WREG32(CP_ME_RAM_WADDR, 0);
2045 fw_data = (const __be32 *)rdev->me_fw->data;
2046 WREG32(CP_ME_RAM_WADDR, 0);
2047 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2048 WREG32(CP_ME_RAM_DATA,
2049 be32_to_cpup(fw_data++));
2051 fw_data = (const __be32 *)rdev->pfp_fw->data;
2052 WREG32(CP_PFP_UCODE_ADDR, 0);
2053 for (i = 0; i < PFP_UCODE_SIZE; i++)
2054 WREG32(CP_PFP_UCODE_DATA,
2055 be32_to_cpup(fw_data++));
2057 WREG32(CP_PFP_UCODE_ADDR, 0);
2058 WREG32(CP_ME_RAM_WADDR, 0);
2059 WREG32(CP_ME_RAM_RADDR, 0);
2063 int r600_cp_start(struct radeon_device *rdev)
2065 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2069 r = radeon_ring_lock(rdev, ring, 7);
2071 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2074 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2075 radeon_ring_write(ring, 0x1);
2076 if (rdev->family >= CHIP_RV770) {
2077 radeon_ring_write(ring, 0x0);
2078 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2080 radeon_ring_write(ring, 0x3);
2081 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2083 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2084 radeon_ring_write(ring, 0);
2085 radeon_ring_write(ring, 0);
2086 radeon_ring_unlock_commit(rdev, ring);
2089 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2093 int r600_cp_resume(struct radeon_device *rdev)
2095 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2101 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2102 RREG32(GRBM_SOFT_RESET);
2104 WREG32(GRBM_SOFT_RESET, 0);
2106 /* Set ring buffer size */
2107 rb_bufsz = drm_order(ring->ring_size / 8);
2108 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2110 tmp |= BUF_SWAP_32BIT;
2112 WREG32(CP_RB_CNTL, tmp);
2113 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2115 /* Set the write pointer delay */
2116 WREG32(CP_RB_WPTR_DELAY, 0);
2118 /* Initialize the ring buffer's read and write pointers */
2119 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2120 WREG32(CP_RB_RPTR_WR, 0);
2122 WREG32(CP_RB_WPTR, ring->wptr);
2124 /* set the wb address whether it's enabled or not */
2125 WREG32(CP_RB_RPTR_ADDR,
2126 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2127 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2128 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2130 if (rdev->wb.enabled)
2131 WREG32(SCRATCH_UMSK, 0xff);
2133 tmp |= RB_NO_UPDATE;
2134 WREG32(SCRATCH_UMSK, 0);
2138 WREG32(CP_RB_CNTL, tmp);
2140 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2141 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2143 ring->rptr = RREG32(CP_RB_RPTR);
2145 r600_cp_start(rdev);
2147 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2149 ring->ready = false;
2155 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2159 /* Align ring size */
2160 rb_bufsz = drm_order(ring_size / 8);
2161 ring_size = (1 << (rb_bufsz + 1)) * 4;
2162 ring->ring_size = ring_size;
2163 ring->align_mask = 16 - 1;
2166 void r600_cp_fini(struct radeon_device *rdev)
2169 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2174 * GPU scratch registers helpers function.
2176 void r600_scratch_init(struct radeon_device *rdev)
2180 rdev->scratch.num_reg = 7;
2181 rdev->scratch.reg_base = SCRATCH_REG0;
2182 for (i = 0; i < rdev->scratch.num_reg; i++) {
2183 rdev->scratch.free[i] = true;
2184 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2188 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2192 unsigned i, ridx = radeon_ring_index(rdev, ring);
2195 r = radeon_scratch_get(rdev, &scratch);
2197 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2200 WREG32(scratch, 0xCAFEDEAD);
2201 r = radeon_ring_lock(rdev, ring, 3);
2203 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
2204 radeon_scratch_free(rdev, scratch);
2207 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2208 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2209 radeon_ring_write(ring, 0xDEADBEEF);
2210 radeon_ring_unlock_commit(rdev, ring);
2211 for (i = 0; i < rdev->usec_timeout; i++) {
2212 tmp = RREG32(scratch);
2213 if (tmp == 0xDEADBEEF)
2217 if (i < rdev->usec_timeout) {
2218 DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
2220 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2221 ridx, scratch, tmp);
2224 radeon_scratch_free(rdev, scratch);
2228 void r600_fence_ring_emit(struct radeon_device *rdev,
2229 struct radeon_fence *fence)
2231 struct radeon_ring *ring = &rdev->ring[fence->ring];
2233 if (rdev->wb.use_event) {
2234 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2235 /* flush read cache over gart */
2236 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2237 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2238 PACKET3_VC_ACTION_ENA |
2239 PACKET3_SH_ACTION_ENA);
2240 radeon_ring_write(ring, 0xFFFFFFFF);
2241 radeon_ring_write(ring, 0);
2242 radeon_ring_write(ring, 10); /* poll interval */
2243 /* EVENT_WRITE_EOP - flush caches, send int */
2244 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2245 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2246 radeon_ring_write(ring, addr & 0xffffffff);
2247 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2248 radeon_ring_write(ring, fence->seq);
2249 radeon_ring_write(ring, 0);
2251 /* flush read cache over gart */
2252 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2253 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2254 PACKET3_VC_ACTION_ENA |
2255 PACKET3_SH_ACTION_ENA);
2256 radeon_ring_write(ring, 0xFFFFFFFF);
2257 radeon_ring_write(ring, 0);
2258 radeon_ring_write(ring, 10); /* poll interval */
2259 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2260 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2261 /* wait for 3D idle clean */
2262 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2263 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2264 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2265 /* Emit fence sequence & fire IRQ */
2266 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2267 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2268 radeon_ring_write(ring, fence->seq);
2269 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2270 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2271 radeon_ring_write(ring, RB_INT_STAT);
2275 void r600_semaphore_ring_emit(struct radeon_device *rdev,
2276 struct radeon_ring *ring,
2277 struct radeon_semaphore *semaphore,
2280 uint64_t addr = semaphore->gpu_addr;
2281 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2283 if (rdev->family < CHIP_CAYMAN)
2284 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2286 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2287 radeon_ring_write(ring, addr & 0xffffffff);
2288 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2291 int r600_copy_blit(struct radeon_device *rdev,
2292 uint64_t src_offset,
2293 uint64_t dst_offset,
2294 unsigned num_gpu_pages,
2295 struct radeon_fence **fence)
2297 struct radeon_semaphore *sem = NULL;
2298 struct radeon_sa_bo *vb = NULL;
2301 r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
2305 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
2306 r600_blit_done_copy(rdev, fence, vb, sem);
2310 void r600_blit_suspend(struct radeon_device *rdev)
2314 /* unpin shaders bo */
2315 if (rdev->r600_blit.shader_obj) {
2316 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2318 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2319 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2324 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2325 uint32_t tiling_flags, uint32_t pitch,
2326 uint32_t offset, uint32_t obj_size)
2328 /* FIXME: implement */
2332 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2334 /* FIXME: implement */
2337 int r600_startup(struct radeon_device *rdev)
2339 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2342 /* enable pcie gen2 link */
2343 r600_pcie_gen2_enable(rdev);
2345 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2346 r = r600_init_microcode(rdev);
2348 DRM_ERROR("Failed to load firmware!\n");
2353 r = r600_vram_scratch_init(rdev);
2357 r600_mc_program(rdev);
2358 if (rdev->flags & RADEON_IS_AGP) {
2359 r600_agp_enable(rdev);
2361 r = r600_pcie_gart_enable(rdev);
2365 r600_gpu_init(rdev);
2366 r = r600_blit_init(rdev);
2368 r600_blit_fini(rdev);
2369 rdev->asic->copy.copy = NULL;
2370 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2373 /* allocate wb buffer */
2374 r = radeon_wb_init(rdev);
2378 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2380 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2385 r = r600_irq_init(rdev);
2387 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2388 radeon_irq_kms_fini(rdev);
2393 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2394 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2395 0, 0xfffff, RADEON_CP_PACKET2);
2399 r = r600_cp_load_microcode(rdev);
2402 r = r600_cp_resume(rdev);
2406 r = radeon_ib_pool_start(rdev);
2410 r = radeon_ib_ring_tests(rdev);
2414 r = r600_audio_init(rdev);
2416 DRM_ERROR("radeon: audio init failed\n");
2423 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2427 temp = RREG32(CONFIG_CNTL);
2428 if (state == false) {
2434 WREG32(CONFIG_CNTL, temp);
2437 int r600_resume(struct radeon_device *rdev)
2441 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2442 * posting will perform necessary task to bring back GPU into good
2446 atom_asic_init(rdev->mode_info.atom_context);
2448 rdev->accel_working = true;
2449 r = r600_startup(rdev);
2451 DRM_ERROR("r600 startup failed on resume\n");
2452 rdev->accel_working = false;
2459 int r600_suspend(struct radeon_device *rdev)
2461 r600_audio_fini(rdev);
2462 radeon_ib_pool_suspend(rdev);
2463 r600_blit_suspend(rdev);
2464 /* FIXME: we should wait for ring to be empty */
2466 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2467 r600_irq_suspend(rdev);
2468 radeon_wb_disable(rdev);
2469 r600_pcie_gart_disable(rdev);
2474 /* Plan is to move initialization in that function and use
2475 * helper function so that radeon_device_init pretty much
2476 * do nothing more than calling asic specific function. This
2477 * should also allow to remove a bunch of callback function
2480 int r600_init(struct radeon_device *rdev)
2484 if (r600_debugfs_mc_info_init(rdev)) {
2485 DRM_ERROR("Failed to register debugfs file for mc !\n");
2488 if (!radeon_get_bios(rdev)) {
2489 if (ASIC_IS_AVIVO(rdev))
2492 /* Must be an ATOMBIOS */
2493 if (!rdev->is_atom_bios) {
2494 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2497 r = radeon_atombios_init(rdev);
2500 /* Post card if necessary */
2501 if (!radeon_card_posted(rdev)) {
2503 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2506 DRM_INFO("GPU not posted. posting now...\n");
2507 atom_asic_init(rdev->mode_info.atom_context);
2509 /* Initialize scratch registers */
2510 r600_scratch_init(rdev);
2511 /* Initialize surface registers */
2512 radeon_surface_init(rdev);
2513 /* Initialize clocks */
2514 radeon_get_clock_info(rdev->ddev);
2516 r = radeon_fence_driver_init(rdev);
2519 if (rdev->flags & RADEON_IS_AGP) {
2520 r = radeon_agp_init(rdev);
2522 radeon_agp_disable(rdev);
2524 r = r600_mc_init(rdev);
2527 /* Memory manager */
2528 r = radeon_bo_init(rdev);
2532 r = radeon_irq_kms_init(rdev);
2536 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2537 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2539 rdev->ih.ring_obj = NULL;
2540 r600_ih_ring_init(rdev, 64 * 1024);
2542 r = r600_pcie_gart_init(rdev);
2546 r = radeon_ib_pool_init(rdev);
2547 rdev->accel_working = true;
2549 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2550 rdev->accel_working = false;
2553 r = r600_startup(rdev);
2555 dev_err(rdev->dev, "disabling GPU acceleration\n");
2557 r600_irq_fini(rdev);
2558 radeon_wb_fini(rdev);
2560 radeon_irq_kms_fini(rdev);
2561 r600_pcie_gart_fini(rdev);
2562 rdev->accel_working = false;
2568 void r600_fini(struct radeon_device *rdev)
2570 r600_audio_fini(rdev);
2571 r600_blit_fini(rdev);
2573 r600_irq_fini(rdev);
2574 radeon_wb_fini(rdev);
2576 radeon_irq_kms_fini(rdev);
2577 r600_pcie_gart_fini(rdev);
2578 r600_vram_scratch_fini(rdev);
2579 radeon_agp_fini(rdev);
2580 radeon_gem_fini(rdev);
2581 radeon_fence_driver_fini(rdev);
2582 radeon_bo_fini(rdev);
2583 radeon_atombios_fini(rdev);
2592 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2594 struct radeon_ring *ring = &rdev->ring[ib->ring];
2596 /* FIXME: implement */
2597 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2598 radeon_ring_write(ring,
2602 (ib->gpu_addr & 0xFFFFFFFC));
2603 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2604 radeon_ring_write(ring, ib->length_dw);
2607 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2609 struct radeon_ib ib;
2614 int ring_index = radeon_ring_index(rdev, ring);
2616 r = radeon_scratch_get(rdev, &scratch);
2618 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2621 WREG32(scratch, 0xCAFEDEAD);
2622 r = radeon_ib_get(rdev, ring_index, &ib, 256);
2624 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2627 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2628 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2629 ib.ptr[2] = 0xDEADBEEF;
2631 r = radeon_ib_schedule(rdev, &ib);
2633 radeon_scratch_free(rdev, scratch);
2634 radeon_ib_free(rdev, &ib);
2635 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2638 r = radeon_fence_wait(ib.fence, false);
2640 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2643 for (i = 0; i < rdev->usec_timeout; i++) {
2644 tmp = RREG32(scratch);
2645 if (tmp == 0xDEADBEEF)
2649 if (i < rdev->usec_timeout) {
2650 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
2652 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2656 radeon_scratch_free(rdev, scratch);
2657 radeon_ib_free(rdev, &ib);
2664 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2665 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2666 * writing to the ring and the GPU consuming, the GPU writes to the ring
2667 * and host consumes. As the host irq handler processes interrupts, it
2668 * increments the rptr. When the rptr catches up with the wptr, all the
2669 * current interrupts have been processed.
2672 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2676 /* Align ring size */
2677 rb_bufsz = drm_order(ring_size / 4);
2678 ring_size = (1 << rb_bufsz) * 4;
2679 rdev->ih.ring_size = ring_size;
2680 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2684 int r600_ih_ring_alloc(struct radeon_device *rdev)
2688 /* Allocate ring buffer */
2689 if (rdev->ih.ring_obj == NULL) {
2690 r = radeon_bo_create(rdev, rdev->ih.ring_size,
2692 RADEON_GEM_DOMAIN_GTT,
2693 NULL, &rdev->ih.ring_obj);
2695 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2698 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2699 if (unlikely(r != 0))
2701 r = radeon_bo_pin(rdev->ih.ring_obj,
2702 RADEON_GEM_DOMAIN_GTT,
2703 &rdev->ih.gpu_addr);
2705 radeon_bo_unreserve(rdev->ih.ring_obj);
2706 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2709 r = radeon_bo_kmap(rdev->ih.ring_obj,
2710 (void **)&rdev->ih.ring);
2711 radeon_bo_unreserve(rdev->ih.ring_obj);
2713 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2720 void r600_ih_ring_fini(struct radeon_device *rdev)
2723 if (rdev->ih.ring_obj) {
2724 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2725 if (likely(r == 0)) {
2726 radeon_bo_kunmap(rdev->ih.ring_obj);
2727 radeon_bo_unpin(rdev->ih.ring_obj);
2728 radeon_bo_unreserve(rdev->ih.ring_obj);
2730 radeon_bo_unref(&rdev->ih.ring_obj);
2731 rdev->ih.ring = NULL;
2732 rdev->ih.ring_obj = NULL;
2736 void r600_rlc_stop(struct radeon_device *rdev)
2739 if ((rdev->family >= CHIP_RV770) &&
2740 (rdev->family <= CHIP_RV740)) {
2741 /* r7xx asics need to soft reset RLC before halting */
2742 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2743 RREG32(SRBM_SOFT_RESET);
2745 WREG32(SRBM_SOFT_RESET, 0);
2746 RREG32(SRBM_SOFT_RESET);
2749 WREG32(RLC_CNTL, 0);
2752 static void r600_rlc_start(struct radeon_device *rdev)
2754 WREG32(RLC_CNTL, RLC_ENABLE);
2757 static int r600_rlc_init(struct radeon_device *rdev)
2760 const __be32 *fw_data;
2765 r600_rlc_stop(rdev);
2767 WREG32(RLC_HB_CNTL, 0);
2769 if (rdev->family == CHIP_ARUBA) {
2770 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
2771 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
2773 if (rdev->family <= CHIP_CAYMAN) {
2774 WREG32(RLC_HB_BASE, 0);
2775 WREG32(RLC_HB_RPTR, 0);
2776 WREG32(RLC_HB_WPTR, 0);
2778 if (rdev->family <= CHIP_CAICOS) {
2779 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2780 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2782 WREG32(RLC_MC_CNTL, 0);
2783 WREG32(RLC_UCODE_CNTL, 0);
2785 fw_data = (const __be32 *)rdev->rlc_fw->data;
2786 if (rdev->family >= CHIP_ARUBA) {
2787 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
2788 WREG32(RLC_UCODE_ADDR, i);
2789 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2791 } else if (rdev->family >= CHIP_CAYMAN) {
2792 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2793 WREG32(RLC_UCODE_ADDR, i);
2794 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2796 } else if (rdev->family >= CHIP_CEDAR) {
2797 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2798 WREG32(RLC_UCODE_ADDR, i);
2799 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2801 } else if (rdev->family >= CHIP_RV770) {
2802 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2803 WREG32(RLC_UCODE_ADDR, i);
2804 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2807 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2808 WREG32(RLC_UCODE_ADDR, i);
2809 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2812 WREG32(RLC_UCODE_ADDR, 0);
2814 r600_rlc_start(rdev);
2819 static void r600_enable_interrupts(struct radeon_device *rdev)
2821 u32 ih_cntl = RREG32(IH_CNTL);
2822 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2824 ih_cntl |= ENABLE_INTR;
2825 ih_rb_cntl |= IH_RB_ENABLE;
2826 WREG32(IH_CNTL, ih_cntl);
2827 WREG32(IH_RB_CNTL, ih_rb_cntl);
2828 rdev->ih.enabled = true;
2831 void r600_disable_interrupts(struct radeon_device *rdev)
2833 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2834 u32 ih_cntl = RREG32(IH_CNTL);
2836 ih_rb_cntl &= ~IH_RB_ENABLE;
2837 ih_cntl &= ~ENABLE_INTR;
2838 WREG32(IH_RB_CNTL, ih_rb_cntl);
2839 WREG32(IH_CNTL, ih_cntl);
2840 /* set rptr, wptr to 0 */
2841 WREG32(IH_RB_RPTR, 0);
2842 WREG32(IH_RB_WPTR, 0);
2843 rdev->ih.enabled = false;
2847 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2851 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2852 WREG32(GRBM_INT_CNTL, 0);
2853 WREG32(DxMODE_INT_MASK, 0);
2854 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2855 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2856 if (ASIC_IS_DCE3(rdev)) {
2857 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2858 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2859 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2860 WREG32(DC_HPD1_INT_CONTROL, tmp);
2861 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2862 WREG32(DC_HPD2_INT_CONTROL, tmp);
2863 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2864 WREG32(DC_HPD3_INT_CONTROL, tmp);
2865 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2866 WREG32(DC_HPD4_INT_CONTROL, tmp);
2867 if (ASIC_IS_DCE32(rdev)) {
2868 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2869 WREG32(DC_HPD5_INT_CONTROL, tmp);
2870 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2871 WREG32(DC_HPD6_INT_CONTROL, tmp);
2872 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2873 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
2874 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2875 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
2877 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2878 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
2879 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2880 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
2883 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2884 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2885 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2886 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2887 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2888 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2889 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2890 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2891 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2892 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
2893 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
2894 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
2898 int r600_irq_init(struct radeon_device *rdev)
2902 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2905 ret = r600_ih_ring_alloc(rdev);
2910 r600_disable_interrupts(rdev);
2913 ret = r600_rlc_init(rdev);
2915 r600_ih_ring_fini(rdev);
2919 /* setup interrupt control */
2920 /* set dummy read address to ring address */
2921 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2922 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2923 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2924 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2926 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2927 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2928 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2929 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2931 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2932 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2934 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2935 IH_WPTR_OVERFLOW_CLEAR |
2938 if (rdev->wb.enabled)
2939 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2941 /* set the writeback address whether it's enabled or not */
2942 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2943 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
2945 WREG32(IH_RB_CNTL, ih_rb_cntl);
2947 /* set rptr, wptr to 0 */
2948 WREG32(IH_RB_RPTR, 0);
2949 WREG32(IH_RB_WPTR, 0);
2951 /* Default settings for IH_CNTL (disabled at first) */
2952 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2953 /* RPTR_REARM only works if msi's are enabled */
2954 if (rdev->msi_enabled)
2955 ih_cntl |= RPTR_REARM;
2956 WREG32(IH_CNTL, ih_cntl);
2958 /* force the active interrupt state to all disabled */
2959 if (rdev->family >= CHIP_CEDAR)
2960 evergreen_disable_interrupt_state(rdev);
2962 r600_disable_interrupt_state(rdev);
2964 /* at this point everything should be setup correctly to enable master */
2965 pci_set_master(rdev->pdev);
2968 r600_enable_interrupts(rdev);
2973 void r600_irq_suspend(struct radeon_device *rdev)
2975 r600_irq_disable(rdev);
2976 r600_rlc_stop(rdev);
2979 void r600_irq_fini(struct radeon_device *rdev)
2981 r600_irq_suspend(rdev);
2982 r600_ih_ring_fini(rdev);
2985 int r600_irq_set(struct radeon_device *rdev)
2987 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2989 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2990 u32 grbm_int_cntl = 0;
2992 u32 d1grph = 0, d2grph = 0;
2994 if (!rdev->irq.installed) {
2995 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2998 /* don't enable anything if the ih is disabled */
2999 if (!rdev->ih.enabled) {
3000 r600_disable_interrupts(rdev);
3001 /* force the active interrupt state to all disabled */
3002 r600_disable_interrupt_state(rdev);
3006 if (ASIC_IS_DCE3(rdev)) {
3007 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3008 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3009 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3010 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3011 if (ASIC_IS_DCE32(rdev)) {
3012 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3013 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3014 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3015 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3017 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3018 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3021 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3022 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3023 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3024 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3025 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3028 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3029 DRM_DEBUG("r600_irq_set: sw int\n");
3030 cp_int_cntl |= RB_INT_ENABLE;
3031 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3033 if (rdev->irq.crtc_vblank_int[0] ||
3034 atomic_read(&rdev->irq.pflip[0])) {
3035 DRM_DEBUG("r600_irq_set: vblank 0\n");
3036 mode_int |= D1MODE_VBLANK_INT_MASK;
3038 if (rdev->irq.crtc_vblank_int[1] ||
3039 atomic_read(&rdev->irq.pflip[1])) {
3040 DRM_DEBUG("r600_irq_set: vblank 1\n");
3041 mode_int |= D2MODE_VBLANK_INT_MASK;
3043 if (rdev->irq.hpd[0]) {
3044 DRM_DEBUG("r600_irq_set: hpd 1\n");
3045 hpd1 |= DC_HPDx_INT_EN;
3047 if (rdev->irq.hpd[1]) {
3048 DRM_DEBUG("r600_irq_set: hpd 2\n");
3049 hpd2 |= DC_HPDx_INT_EN;
3051 if (rdev->irq.hpd[2]) {
3052 DRM_DEBUG("r600_irq_set: hpd 3\n");
3053 hpd3 |= DC_HPDx_INT_EN;
3055 if (rdev->irq.hpd[3]) {
3056 DRM_DEBUG("r600_irq_set: hpd 4\n");
3057 hpd4 |= DC_HPDx_INT_EN;
3059 if (rdev->irq.hpd[4]) {
3060 DRM_DEBUG("r600_irq_set: hpd 5\n");
3061 hpd5 |= DC_HPDx_INT_EN;
3063 if (rdev->irq.hpd[5]) {
3064 DRM_DEBUG("r600_irq_set: hpd 6\n");
3065 hpd6 |= DC_HPDx_INT_EN;
3067 if (rdev->irq.afmt[0]) {
3068 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3069 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3071 if (rdev->irq.afmt[1]) {
3072 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3073 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3075 if (rdev->irq.gui_idle) {
3076 DRM_DEBUG("gui idle\n");
3077 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3080 WREG32(CP_INT_CNTL, cp_int_cntl);
3081 WREG32(DxMODE_INT_MASK, mode_int);
3082 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3083 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3084 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3085 if (ASIC_IS_DCE3(rdev)) {
3086 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3087 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3088 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3089 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3090 if (ASIC_IS_DCE32(rdev)) {
3091 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3092 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3093 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3094 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3096 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3097 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3100 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3101 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3102 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3103 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3104 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3110 static void r600_irq_ack(struct radeon_device *rdev)
3114 if (ASIC_IS_DCE3(rdev)) {
3115 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3116 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3117 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3118 if (ASIC_IS_DCE32(rdev)) {
3119 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3120 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3122 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3123 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3126 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3127 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3128 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3129 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3130 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3132 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3133 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3135 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3136 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3137 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3138 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3139 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3140 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3141 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3142 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3143 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3144 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3145 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3146 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3147 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3148 if (ASIC_IS_DCE3(rdev)) {
3149 tmp = RREG32(DC_HPD1_INT_CONTROL);
3150 tmp |= DC_HPDx_INT_ACK;
3151 WREG32(DC_HPD1_INT_CONTROL, tmp);
3153 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3154 tmp |= DC_HPDx_INT_ACK;
3155 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3158 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3159 if (ASIC_IS_DCE3(rdev)) {
3160 tmp = RREG32(DC_HPD2_INT_CONTROL);
3161 tmp |= DC_HPDx_INT_ACK;
3162 WREG32(DC_HPD2_INT_CONTROL, tmp);
3164 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3165 tmp |= DC_HPDx_INT_ACK;
3166 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3169 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3170 if (ASIC_IS_DCE3(rdev)) {
3171 tmp = RREG32(DC_HPD3_INT_CONTROL);
3172 tmp |= DC_HPDx_INT_ACK;
3173 WREG32(DC_HPD3_INT_CONTROL, tmp);
3175 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3176 tmp |= DC_HPDx_INT_ACK;
3177 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3180 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3181 tmp = RREG32(DC_HPD4_INT_CONTROL);
3182 tmp |= DC_HPDx_INT_ACK;
3183 WREG32(DC_HPD4_INT_CONTROL, tmp);
3185 if (ASIC_IS_DCE32(rdev)) {
3186 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3187 tmp = RREG32(DC_HPD5_INT_CONTROL);
3188 tmp |= DC_HPDx_INT_ACK;
3189 WREG32(DC_HPD5_INT_CONTROL, tmp);
3191 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3192 tmp = RREG32(DC_HPD5_INT_CONTROL);
3193 tmp |= DC_HPDx_INT_ACK;
3194 WREG32(DC_HPD6_INT_CONTROL, tmp);
3196 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3197 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3198 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3199 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3201 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3202 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3203 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3204 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3207 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3208 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3209 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3210 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3212 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3213 if (ASIC_IS_DCE3(rdev)) {
3214 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3215 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3216 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3218 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3219 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3220 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3226 void r600_irq_disable(struct radeon_device *rdev)
3228 r600_disable_interrupts(rdev);
3229 /* Wait and acknowledge irq */
3232 r600_disable_interrupt_state(rdev);
3235 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3239 if (rdev->wb.enabled)
3240 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3242 wptr = RREG32(IH_RB_WPTR);
3244 if (wptr & RB_OVERFLOW) {
3245 /* When a ring buffer overflow happen start parsing interrupt
3246 * from the last not overwritten vector (wptr + 16). Hopefully
3247 * this should allow us to catchup.
3249 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3250 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3251 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3252 tmp = RREG32(IH_RB_CNTL);
3253 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3254 WREG32(IH_RB_CNTL, tmp);
3256 return (wptr & rdev->ih.ptr_mask);
3260 * Each IV ring entry is 128 bits:
3261 * [7:0] - interrupt source id
3263 * [59:32] - interrupt source data
3264 * [127:60] - reserved
3266 * The basic interrupt vector entries
3267 * are decoded as follows:
3268 * src_id src_data description
3273 * 19 0 FP Hot plug detection A
3274 * 19 1 FP Hot plug detection B
3275 * 19 2 DAC A auto-detection
3276 * 19 3 DAC B auto-detection
3282 * 181 - EOP Interrupt
3285 * Note, these are based on r600 and may need to be
3286 * adjusted or added to on newer asics
3289 int r600_irq_process(struct radeon_device *rdev)
3293 u32 src_id, src_data;
3295 bool queue_hotplug = false;
3296 bool queue_hdmi = false;
3298 if (!rdev->ih.enabled || rdev->shutdown)
3301 /* No MSIs, need a dummy read to flush PCI DMAs */
3302 if (!rdev->msi_enabled)
3305 wptr = r600_get_ih_wptr(rdev);
3308 /* is somebody else already processing irqs? */
3309 if (atomic_xchg(&rdev->ih.lock, 1))
3312 rptr = rdev->ih.rptr;
3313 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3315 /* Order reading of wptr vs. reading of IH ring data */
3318 /* display interrupts */
3321 while (rptr != wptr) {
3322 /* wptr/rptr are in bytes! */
3323 ring_index = rptr / 4;
3324 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3325 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3328 case 1: /* D1 vblank/vline */
3330 case 0: /* D1 vblank */
3331 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3332 if (rdev->irq.crtc_vblank_int[0]) {
3333 drm_handle_vblank(rdev->ddev, 0);
3334 rdev->pm.vblank_sync = true;
3335 wake_up(&rdev->irq.vblank_queue);
3337 if (atomic_read(&rdev->irq.pflip[0]))
3338 radeon_crtc_handle_flip(rdev, 0);
3339 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3340 DRM_DEBUG("IH: D1 vblank\n");
3343 case 1: /* D1 vline */
3344 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3345 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3346 DRM_DEBUG("IH: D1 vline\n");
3350 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3354 case 5: /* D2 vblank/vline */
3356 case 0: /* D2 vblank */
3357 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3358 if (rdev->irq.crtc_vblank_int[1]) {
3359 drm_handle_vblank(rdev->ddev, 1);
3360 rdev->pm.vblank_sync = true;
3361 wake_up(&rdev->irq.vblank_queue);
3363 if (atomic_read(&rdev->irq.pflip[1]))
3364 radeon_crtc_handle_flip(rdev, 1);
3365 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3366 DRM_DEBUG("IH: D2 vblank\n");
3369 case 1: /* D1 vline */
3370 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3371 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3372 DRM_DEBUG("IH: D2 vline\n");
3376 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3380 case 19: /* HPD/DAC hotplug */
3383 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3384 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3385 queue_hotplug = true;
3386 DRM_DEBUG("IH: HPD1\n");
3390 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3391 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3392 queue_hotplug = true;
3393 DRM_DEBUG("IH: HPD2\n");
3397 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3398 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3399 queue_hotplug = true;
3400 DRM_DEBUG("IH: HPD3\n");
3404 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3405 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3406 queue_hotplug = true;
3407 DRM_DEBUG("IH: HPD4\n");
3411 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3412 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3413 queue_hotplug = true;
3414 DRM_DEBUG("IH: HPD5\n");
3418 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3419 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3420 queue_hotplug = true;
3421 DRM_DEBUG("IH: HPD6\n");
3425 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3432 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3433 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3435 DRM_DEBUG("IH: HDMI0\n");
3439 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3440 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3442 DRM_DEBUG("IH: HDMI1\n");
3446 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3450 case 176: /* CP_INT in ring buffer */
3451 case 177: /* CP_INT in IB1 */
3452 case 178: /* CP_INT in IB2 */
3453 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3454 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3456 case 181: /* CP EOP event */
3457 DRM_DEBUG("IH: CP EOP\n");
3458 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3460 case 233: /* GUI IDLE */
3461 DRM_DEBUG("IH: GUI idle\n");
3462 wake_up(&rdev->irq.idle_queue);
3465 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3469 /* wptr/rptr are in bytes! */
3471 rptr &= rdev->ih.ptr_mask;
3474 schedule_work(&rdev->hotplug_work);
3476 schedule_work(&rdev->audio_work);
3477 rdev->ih.rptr = rptr;
3478 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3479 atomic_set(&rdev->ih.lock, 0);
3481 /* make sure wptr hasn't changed while processing */
3482 wptr = r600_get_ih_wptr(rdev);
3492 #if defined(CONFIG_DEBUG_FS)
3494 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3496 struct drm_info_node *node = (struct drm_info_node *) m->private;
3497 struct drm_device *dev = node->minor->dev;
3498 struct radeon_device *rdev = dev->dev_private;
3500 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3501 DREG32_SYS(m, rdev, VM_L2_STATUS);
3505 static struct drm_info_list r600_mc_info_list[] = {
3506 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3510 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3512 #if defined(CONFIG_DEBUG_FS)
3513 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3520 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3521 * rdev: radeon device structure
3522 * bo: buffer object struct which userspace is waiting for idle
3524 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3525 * through ring buffer, this leads to corruption in rendering, see
3526 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3527 * directly perform HDP flush by writing register through MMIO.
3529 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3531 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3532 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3533 * This seems to cause problems on some AGP cards. Just use the old
3536 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3537 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3538 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3541 WREG32(HDP_DEBUG1, 0);
3542 tmp = readl((void __iomem *)ptr);
3544 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3547 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3549 u32 link_width_cntl, mask, target_reg;
3551 if (rdev->flags & RADEON_IS_IGP)
3554 if (!(rdev->flags & RADEON_IS_PCIE))
3557 /* x2 cards have a special sequence */
3558 if (ASIC_IS_X2(rdev))
3561 /* FIXME wait for idle */
3565 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3568 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3571 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3574 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3577 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3580 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3584 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3588 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3590 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3591 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3594 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3597 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3598 RADEON_PCIE_LC_RECONFIG_NOW |
3599 R600_PCIE_LC_RENEGOTIATE_EN |
3600 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3601 link_width_cntl |= mask;
3603 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3605 /* some northbridges can renegotiate the link rather than requiring
3606 * a complete re-config.
3607 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3609 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3610 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3612 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3614 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3615 RADEON_PCIE_LC_RECONFIG_NOW));
3617 if (rdev->family >= CHIP_RV770)
3618 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3620 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3622 /* wait for lane set to complete */
3623 link_width_cntl = RREG32(target_reg);
3624 while (link_width_cntl == 0xffffffff)
3625 link_width_cntl = RREG32(target_reg);
3629 int r600_get_pcie_lanes(struct radeon_device *rdev)
3631 u32 link_width_cntl;
3633 if (rdev->flags & RADEON_IS_IGP)
3636 if (!(rdev->flags & RADEON_IS_PCIE))
3639 /* x2 cards have a special sequence */
3640 if (ASIC_IS_X2(rdev))
3643 /* FIXME wait for idle */
3645 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3647 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3648 case RADEON_PCIE_LC_LINK_WIDTH_X0:
3650 case RADEON_PCIE_LC_LINK_WIDTH_X1:
3652 case RADEON_PCIE_LC_LINK_WIDTH_X2:
3654 case RADEON_PCIE_LC_LINK_WIDTH_X4:
3656 case RADEON_PCIE_LC_LINK_WIDTH_X8:
3658 case RADEON_PCIE_LC_LINK_WIDTH_X16:
3664 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3666 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3669 if (radeon_pcie_gen2 == 0)
3672 if (rdev->flags & RADEON_IS_IGP)
3675 if (!(rdev->flags & RADEON_IS_PCIE))
3678 /* x2 cards have a special sequence */
3679 if (ASIC_IS_X2(rdev))
3682 /* only RV6xx+ chips are supported */
3683 if (rdev->family <= CHIP_R600)
3686 /* 55 nm r6xx asics */
3687 if ((rdev->family == CHIP_RV670) ||
3688 (rdev->family == CHIP_RV620) ||
3689 (rdev->family == CHIP_RV635)) {
3690 /* advertise upconfig capability */
3691 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3692 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3693 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3694 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3695 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3696 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3697 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3698 LC_RECONFIG_ARC_MISSING_ESCAPE);
3699 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3700 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3702 link_width_cntl |= LC_UPCONFIGURE_DIS;
3703 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3707 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3708 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3709 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3711 /* 55 nm r6xx asics */
3712 if ((rdev->family == CHIP_RV670) ||
3713 (rdev->family == CHIP_RV620) ||
3714 (rdev->family == CHIP_RV635)) {
3715 WREG32(MM_CFGREGS_CNTL, 0x8);
3716 link_cntl2 = RREG32(0x4088);
3717 WREG32(MM_CFGREGS_CNTL, 0);
3718 /* not supported yet */
3719 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3723 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3724 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3725 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3726 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3727 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3728 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3730 tmp = RREG32(0x541c);
3731 WREG32(0x541c, tmp | 0x8);
3732 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3733 link_cntl2 = RREG16(0x4088);
3734 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3736 WREG16(0x4088, link_cntl2);
3737 WREG32(MM_CFGREGS_CNTL, 0);
3739 if ((rdev->family == CHIP_RV670) ||
3740 (rdev->family == CHIP_RV620) ||
3741 (rdev->family == CHIP_RV635)) {
3742 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3743 training_cntl &= ~LC_POINT_7_PLUS_EN;
3744 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3746 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3747 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3748 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3751 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3752 speed_cntl |= LC_GEN2_EN_STRAP;
3753 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3756 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3757 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3759 link_width_cntl |= LC_UPCONFIGURE_DIS;
3761 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3762 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);