2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
40 static const u32 crtc_offsets[6] =
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
50 static void evergreen_gpu_init(struct radeon_device *rdev);
51 void evergreen_fini(struct radeon_device *rdev);
52 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
53 extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
54 int ring, u32 cp_int_cntl);
56 void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
57 unsigned *bankh, unsigned *mtaspect,
60 *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
61 *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
62 *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
63 *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
66 case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
67 case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
68 case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
69 case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
73 case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
74 case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
75 case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
76 case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
80 case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
81 case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
82 case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
83 case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
87 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
92 cap = pci_pcie_cap(rdev->pdev);
96 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
100 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
102 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
103 * to avoid hangs or perfomance issues
105 if ((v == 0) || (v == 6) || (v == 7)) {
106 ctl &= ~PCI_EXP_DEVCTL_READRQ;
108 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
113 * dce4_wait_for_vblank - vblank wait asic callback.
115 * @rdev: radeon_device pointer
116 * @crtc: crtc to wait for vblank on
118 * Wait for vblank on the requested crtc (evergreen+).
120 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
124 if (crtc >= rdev->num_crtc)
127 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
128 for (i = 0; i < rdev->usec_timeout; i++) {
129 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
133 for (i = 0; i < rdev->usec_timeout; i++) {
134 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
142 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
144 * @rdev: radeon_device pointer
145 * @crtc: crtc to prepare for pageflip on
147 * Pre-pageflip callback (evergreen+).
148 * Enables the pageflip irq (vblank irq).
150 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
152 /* enable the pflip int */
153 radeon_irq_kms_pflip_irq_get(rdev, crtc);
157 * evergreen_post_page_flip - pos-pageflip callback.
159 * @rdev: radeon_device pointer
160 * @crtc: crtc to cleanup pageflip on
162 * Post-pageflip callback (evergreen+).
163 * Disables the pageflip irq (vblank irq).
165 void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
167 /* disable the pflip int */
168 radeon_irq_kms_pflip_irq_put(rdev, crtc);
172 * evergreen_page_flip - pageflip callback.
174 * @rdev: radeon_device pointer
175 * @crtc_id: crtc to cleanup pageflip on
176 * @crtc_base: new address of the crtc (GPU MC address)
178 * Does the actual pageflip (evergreen+).
179 * During vblank we take the crtc lock and wait for the update_pending
180 * bit to go high, when it does, we release the lock, and allow the
181 * double buffered update to take place.
182 * Returns the current update pending status.
184 u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
186 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
187 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
190 /* Lock the graphics update lock */
191 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
192 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
194 /* update the scanout addresses */
195 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
196 upper_32_bits(crtc_base));
197 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
200 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
201 upper_32_bits(crtc_base));
202 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
205 /* Wait for update_pending to go high. */
206 for (i = 0; i < rdev->usec_timeout; i++) {
207 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
211 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
213 /* Unlock the lock, so double-buffering can take place inside vblank */
214 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
215 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
217 /* Return current update_pending status: */
218 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
221 /* get temperature in millidegrees */
222 int evergreen_get_temp(struct radeon_device *rdev)
227 if (rdev->family == CHIP_JUNIPER) {
228 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
230 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
234 actual_temp = temp / 2 - (0x200 - toffset);
236 actual_temp = temp / 2 + toffset;
238 actual_temp = actual_temp * 1000;
241 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
246 else if (temp & 0x200)
248 else if (temp & 0x100) {
249 actual_temp = temp & 0x1ff;
250 actual_temp |= ~0x1ff;
252 actual_temp = temp & 0xff;
254 actual_temp = (actual_temp * 1000) / 2;
260 int sumo_get_temp(struct radeon_device *rdev)
262 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
263 int actual_temp = temp - 49;
265 return actual_temp * 1000;
269 * sumo_pm_init_profile - Initialize power profiles callback.
271 * @rdev: radeon_device pointer
273 * Initialize the power states used in profile mode
274 * (sumo, trinity, SI).
275 * Used for profile mode only.
277 void sumo_pm_init_profile(struct radeon_device *rdev)
282 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
283 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
284 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
285 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
288 if (rdev->flags & RADEON_IS_MOBILITY)
289 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
291 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
293 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
294 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
295 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
296 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
298 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
299 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
300 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
301 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
304 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
305 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
309 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
310 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
314 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
315 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
319 rdev->pm.power_state[idx].num_clock_modes - 1;
321 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
322 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
323 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
325 rdev->pm.power_state[idx].num_clock_modes - 1;
329 * evergreen_pm_misc - set additional pm hw parameters callback.
331 * @rdev: radeon_device pointer
333 * Set non-clock parameters associated with a power state
334 * (voltage, etc.) (evergreen+).
336 void evergreen_pm_misc(struct radeon_device *rdev)
338 int req_ps_idx = rdev->pm.requested_power_state_index;
339 int req_cm_idx = rdev->pm.requested_clock_mode_index;
340 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
341 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
343 if (voltage->type == VOLTAGE_SW) {
344 /* 0xff01 is a flag rather then an actual voltage */
345 if (voltage->voltage == 0xff01)
347 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
348 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
349 rdev->pm.current_vddc = voltage->voltage;
350 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
352 /* 0xff01 is a flag rather then an actual voltage */
353 if (voltage->vddci == 0xff01)
355 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
356 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
357 rdev->pm.current_vddci = voltage->vddci;
358 DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
364 * evergreen_pm_prepare - pre-power state change callback.
366 * @rdev: radeon_device pointer
368 * Prepare for a power state change (evergreen+).
370 void evergreen_pm_prepare(struct radeon_device *rdev)
372 struct drm_device *ddev = rdev->ddev;
373 struct drm_crtc *crtc;
374 struct radeon_crtc *radeon_crtc;
377 /* disable any active CRTCs */
378 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
379 radeon_crtc = to_radeon_crtc(crtc);
380 if (radeon_crtc->enabled) {
381 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
382 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
383 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
389 * evergreen_pm_finish - post-power state change callback.
391 * @rdev: radeon_device pointer
393 * Clean up after a power state change (evergreen+).
395 void evergreen_pm_finish(struct radeon_device *rdev)
397 struct drm_device *ddev = rdev->ddev;
398 struct drm_crtc *crtc;
399 struct radeon_crtc *radeon_crtc;
402 /* enable any active CRTCs */
403 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
404 radeon_crtc = to_radeon_crtc(crtc);
405 if (radeon_crtc->enabled) {
406 tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
407 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
408 WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
414 * evergreen_hpd_sense - hpd sense callback.
416 * @rdev: radeon_device pointer
417 * @hpd: hpd (hotplug detect) pin
419 * Checks if a digital monitor is connected (evergreen+).
420 * Returns true if connected, false if not connected.
422 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
424 bool connected = false;
428 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
432 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
436 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
440 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
444 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
448 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
459 * evergreen_hpd_set_polarity - hpd set polarity callback.
461 * @rdev: radeon_device pointer
462 * @hpd: hpd (hotplug detect) pin
464 * Set the polarity of the hpd pin (evergreen+).
466 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
467 enum radeon_hpd_id hpd)
470 bool connected = evergreen_hpd_sense(rdev, hpd);
474 tmp = RREG32(DC_HPD1_INT_CONTROL);
476 tmp &= ~DC_HPDx_INT_POLARITY;
478 tmp |= DC_HPDx_INT_POLARITY;
479 WREG32(DC_HPD1_INT_CONTROL, tmp);
482 tmp = RREG32(DC_HPD2_INT_CONTROL);
484 tmp &= ~DC_HPDx_INT_POLARITY;
486 tmp |= DC_HPDx_INT_POLARITY;
487 WREG32(DC_HPD2_INT_CONTROL, tmp);
490 tmp = RREG32(DC_HPD3_INT_CONTROL);
492 tmp &= ~DC_HPDx_INT_POLARITY;
494 tmp |= DC_HPDx_INT_POLARITY;
495 WREG32(DC_HPD3_INT_CONTROL, tmp);
498 tmp = RREG32(DC_HPD4_INT_CONTROL);
500 tmp &= ~DC_HPDx_INT_POLARITY;
502 tmp |= DC_HPDx_INT_POLARITY;
503 WREG32(DC_HPD4_INT_CONTROL, tmp);
506 tmp = RREG32(DC_HPD5_INT_CONTROL);
508 tmp &= ~DC_HPDx_INT_POLARITY;
510 tmp |= DC_HPDx_INT_POLARITY;
511 WREG32(DC_HPD5_INT_CONTROL, tmp);
514 tmp = RREG32(DC_HPD6_INT_CONTROL);
516 tmp &= ~DC_HPDx_INT_POLARITY;
518 tmp |= DC_HPDx_INT_POLARITY;
519 WREG32(DC_HPD6_INT_CONTROL, tmp);
527 * evergreen_hpd_init - hpd setup callback.
529 * @rdev: radeon_device pointer
531 * Setup the hpd pins used by the card (evergreen+).
532 * Enable the pin, set the polarity, and enable the hpd interrupts.
534 void evergreen_hpd_init(struct radeon_device *rdev)
536 struct drm_device *dev = rdev->ddev;
537 struct drm_connector *connector;
538 unsigned enabled = 0;
539 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
540 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
542 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
543 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
544 switch (radeon_connector->hpd.hpd) {
546 WREG32(DC_HPD1_CONTROL, tmp);
549 WREG32(DC_HPD2_CONTROL, tmp);
552 WREG32(DC_HPD3_CONTROL, tmp);
555 WREG32(DC_HPD4_CONTROL, tmp);
558 WREG32(DC_HPD5_CONTROL, tmp);
561 WREG32(DC_HPD6_CONTROL, tmp);
566 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
567 enabled |= 1 << radeon_connector->hpd.hpd;
569 radeon_irq_kms_enable_hpd(rdev, enabled);
573 * evergreen_hpd_fini - hpd tear down callback.
575 * @rdev: radeon_device pointer
577 * Tear down the hpd pins used by the card (evergreen+).
578 * Disable the hpd interrupts.
580 void evergreen_hpd_fini(struct radeon_device *rdev)
582 struct drm_device *dev = rdev->ddev;
583 struct drm_connector *connector;
584 unsigned disabled = 0;
586 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
587 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
588 switch (radeon_connector->hpd.hpd) {
590 WREG32(DC_HPD1_CONTROL, 0);
593 WREG32(DC_HPD2_CONTROL, 0);
596 WREG32(DC_HPD3_CONTROL, 0);
599 WREG32(DC_HPD4_CONTROL, 0);
602 WREG32(DC_HPD5_CONTROL, 0);
605 WREG32(DC_HPD6_CONTROL, 0);
610 disabled |= 1 << radeon_connector->hpd.hpd;
612 radeon_irq_kms_disable_hpd(rdev, disabled);
615 /* watermark setup */
617 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
618 struct radeon_crtc *radeon_crtc,
619 struct drm_display_mode *mode,
620 struct drm_display_mode *other_mode)
625 * There are 3 line buffers, each one shared by 2 display controllers.
626 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
627 * the display controllers. The paritioning is done via one of four
628 * preset allocations specified in bits 2:0:
629 * first display controller
630 * 0 - first half of lb (3840 * 2)
631 * 1 - first 3/4 of lb (5760 * 2)
632 * 2 - whole lb (7680 * 2), other crtc must be disabled
633 * 3 - first 1/4 of lb (1920 * 2)
634 * second display controller
635 * 4 - second half of lb (3840 * 2)
636 * 5 - second 3/4 of lb (5760 * 2)
637 * 6 - whole lb (7680 * 2), other crtc must be disabled
638 * 7 - last 1/4 of lb (1920 * 2)
640 /* this can get tricky if we have two large displays on a paired group
641 * of crtcs. Ideally for multiple large displays we'd assign them to
642 * non-linked crtcs for maximum line buffer allocation.
644 if (radeon_crtc->base.enabled && mode) {
652 /* second controller of the pair uses second half of the lb */
653 if (radeon_crtc->crtc_id % 2)
655 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
657 if (radeon_crtc->base.enabled && mode) {
662 if (ASIC_IS_DCE5(rdev))
668 if (ASIC_IS_DCE5(rdev))
674 if (ASIC_IS_DCE5(rdev))
680 if (ASIC_IS_DCE5(rdev))
687 /* controller not enabled, so no lb used */
691 u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
693 u32 tmp = RREG32(MC_SHARED_CHMAP);
695 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
708 struct evergreen_wm_params {
709 u32 dram_channels; /* number of dram channels */
710 u32 yclk; /* bandwidth per dram data pin in kHz */
711 u32 sclk; /* engine clock in kHz */
712 u32 disp_clk; /* display clock in kHz */
713 u32 src_width; /* viewport width */
714 u32 active_time; /* active display time in ns */
715 u32 blank_time; /* blank time in ns */
716 bool interlaced; /* mode is interlaced */
717 fixed20_12 vsc; /* vertical scale ratio */
718 u32 num_heads; /* number of active crtcs */
719 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
720 u32 lb_size; /* line buffer allocated to pipe */
721 u32 vtaps; /* vertical scaler taps */
724 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
726 /* Calculate DRAM Bandwidth and the part allocated to display. */
727 fixed20_12 dram_efficiency; /* 0.7 */
728 fixed20_12 yclk, dram_channels, bandwidth;
731 a.full = dfixed_const(1000);
732 yclk.full = dfixed_const(wm->yclk);
733 yclk.full = dfixed_div(yclk, a);
734 dram_channels.full = dfixed_const(wm->dram_channels * 4);
735 a.full = dfixed_const(10);
736 dram_efficiency.full = dfixed_const(7);
737 dram_efficiency.full = dfixed_div(dram_efficiency, a);
738 bandwidth.full = dfixed_mul(dram_channels, yclk);
739 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
741 return dfixed_trunc(bandwidth);
744 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
746 /* Calculate DRAM Bandwidth and the part allocated to display. */
747 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
748 fixed20_12 yclk, dram_channels, bandwidth;
751 a.full = dfixed_const(1000);
752 yclk.full = dfixed_const(wm->yclk);
753 yclk.full = dfixed_div(yclk, a);
754 dram_channels.full = dfixed_const(wm->dram_channels * 4);
755 a.full = dfixed_const(10);
756 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
757 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
758 bandwidth.full = dfixed_mul(dram_channels, yclk);
759 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
761 return dfixed_trunc(bandwidth);
764 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
766 /* Calculate the display Data return Bandwidth */
767 fixed20_12 return_efficiency; /* 0.8 */
768 fixed20_12 sclk, bandwidth;
771 a.full = dfixed_const(1000);
772 sclk.full = dfixed_const(wm->sclk);
773 sclk.full = dfixed_div(sclk, a);
774 a.full = dfixed_const(10);
775 return_efficiency.full = dfixed_const(8);
776 return_efficiency.full = dfixed_div(return_efficiency, a);
777 a.full = dfixed_const(32);
778 bandwidth.full = dfixed_mul(a, sclk);
779 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
781 return dfixed_trunc(bandwidth);
784 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
786 /* Calculate the DMIF Request Bandwidth */
787 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
788 fixed20_12 disp_clk, bandwidth;
791 a.full = dfixed_const(1000);
792 disp_clk.full = dfixed_const(wm->disp_clk);
793 disp_clk.full = dfixed_div(disp_clk, a);
794 a.full = dfixed_const(10);
795 disp_clk_request_efficiency.full = dfixed_const(8);
796 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
797 a.full = dfixed_const(32);
798 bandwidth.full = dfixed_mul(a, disp_clk);
799 bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
801 return dfixed_trunc(bandwidth);
804 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
806 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
807 u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
808 u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
809 u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
811 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
814 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
816 /* Calculate the display mode Average Bandwidth
817 * DisplayMode should contain the source and destination dimensions,
821 fixed20_12 line_time;
822 fixed20_12 src_width;
823 fixed20_12 bandwidth;
826 a.full = dfixed_const(1000);
827 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
828 line_time.full = dfixed_div(line_time, a);
829 bpp.full = dfixed_const(wm->bytes_per_pixel);
830 src_width.full = dfixed_const(wm->src_width);
831 bandwidth.full = dfixed_mul(src_width, bpp);
832 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
833 bandwidth.full = dfixed_div(bandwidth, line_time);
835 return dfixed_trunc(bandwidth);
838 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
840 /* First calcualte the latency in ns */
841 u32 mc_latency = 2000; /* 2000 ns. */
842 u32 available_bandwidth = evergreen_available_bandwidth(wm);
843 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
844 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
845 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
846 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
847 (wm->num_heads * cursor_line_pair_return_time);
848 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
849 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
852 if (wm->num_heads == 0)
855 a.full = dfixed_const(2);
856 b.full = dfixed_const(1);
857 if ((wm->vsc.full > a.full) ||
858 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
860 ((wm->vsc.full >= a.full) && wm->interlaced))
861 max_src_lines_per_dst_line = 4;
863 max_src_lines_per_dst_line = 2;
865 a.full = dfixed_const(available_bandwidth);
866 b.full = dfixed_const(wm->num_heads);
867 a.full = dfixed_div(a, b);
869 b.full = dfixed_const(1000);
870 c.full = dfixed_const(wm->disp_clk);
871 b.full = dfixed_div(c, b);
872 c.full = dfixed_const(wm->bytes_per_pixel);
873 b.full = dfixed_mul(b, c);
875 lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
877 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
878 b.full = dfixed_const(1000);
879 c.full = dfixed_const(lb_fill_bw);
880 b.full = dfixed_div(c, b);
881 a.full = dfixed_div(a, b);
882 line_fill_time = dfixed_trunc(a);
884 if (line_fill_time < wm->active_time)
887 return latency + (line_fill_time - wm->active_time);
891 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
893 if (evergreen_average_bandwidth(wm) <=
894 (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
900 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
902 if (evergreen_average_bandwidth(wm) <=
903 (evergreen_available_bandwidth(wm) / wm->num_heads))
909 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
911 u32 lb_partitions = wm->lb_size / wm->src_width;
912 u32 line_time = wm->active_time + wm->blank_time;
913 u32 latency_tolerant_lines;
917 a.full = dfixed_const(1);
918 if (wm->vsc.full > a.full)
919 latency_tolerant_lines = 1;
921 if (lb_partitions <= (wm->vtaps + 1))
922 latency_tolerant_lines = 1;
924 latency_tolerant_lines = 2;
927 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
929 if (evergreen_latency_watermark(wm) <= latency_hiding)
935 static void evergreen_program_watermarks(struct radeon_device *rdev,
936 struct radeon_crtc *radeon_crtc,
937 u32 lb_size, u32 num_heads)
939 struct drm_display_mode *mode = &radeon_crtc->base.mode;
940 struct evergreen_wm_params wm;
943 u32 latency_watermark_a = 0, latency_watermark_b = 0;
944 u32 priority_a_mark = 0, priority_b_mark = 0;
945 u32 priority_a_cnt = PRIORITY_OFF;
946 u32 priority_b_cnt = PRIORITY_OFF;
947 u32 pipe_offset = radeon_crtc->crtc_id * 16;
948 u32 tmp, arb_control3;
951 if (radeon_crtc->base.enabled && num_heads && mode) {
952 pixel_period = 1000000 / (u32)mode->clock;
953 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
957 wm.yclk = rdev->pm.current_mclk * 10;
958 wm.sclk = rdev->pm.current_sclk * 10;
959 wm.disp_clk = mode->clock;
960 wm.src_width = mode->crtc_hdisplay;
961 wm.active_time = mode->crtc_hdisplay * pixel_period;
962 wm.blank_time = line_time - wm.active_time;
963 wm.interlaced = false;
964 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
965 wm.interlaced = true;
966 wm.vsc = radeon_crtc->vsc;
968 if (radeon_crtc->rmx_type != RMX_OFF)
970 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
971 wm.lb_size = lb_size;
972 wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
973 wm.num_heads = num_heads;
975 /* set for high clocks */
976 latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
977 /* set for low clocks */
978 /* wm.yclk = low clk; wm.sclk = low clk */
979 latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
981 /* possibly force display priority to high */
982 /* should really do this at mode validation time... */
983 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
984 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
985 !evergreen_check_latency_hiding(&wm) ||
986 (rdev->disp_priority == 2)) {
987 DRM_DEBUG_KMS("force priority to high\n");
988 priority_a_cnt |= PRIORITY_ALWAYS_ON;
989 priority_b_cnt |= PRIORITY_ALWAYS_ON;
992 a.full = dfixed_const(1000);
993 b.full = dfixed_const(mode->clock);
994 b.full = dfixed_div(b, a);
995 c.full = dfixed_const(latency_watermark_a);
996 c.full = dfixed_mul(c, b);
997 c.full = dfixed_mul(c, radeon_crtc->hsc);
998 c.full = dfixed_div(c, a);
999 a.full = dfixed_const(16);
1000 c.full = dfixed_div(c, a);
1001 priority_a_mark = dfixed_trunc(c);
1002 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1004 a.full = dfixed_const(1000);
1005 b.full = dfixed_const(mode->clock);
1006 b.full = dfixed_div(b, a);
1007 c.full = dfixed_const(latency_watermark_b);
1008 c.full = dfixed_mul(c, b);
1009 c.full = dfixed_mul(c, radeon_crtc->hsc);
1010 c.full = dfixed_div(c, a);
1011 a.full = dfixed_const(16);
1012 c.full = dfixed_div(c, a);
1013 priority_b_mark = dfixed_trunc(c);
1014 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1018 arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1020 tmp &= ~LATENCY_WATERMARK_MASK(3);
1021 tmp |= LATENCY_WATERMARK_MASK(1);
1022 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1023 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1024 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1025 LATENCY_HIGH_WATERMARK(line_time)));
1027 tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
1028 tmp &= ~LATENCY_WATERMARK_MASK(3);
1029 tmp |= LATENCY_WATERMARK_MASK(2);
1030 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
1031 WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
1032 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1033 LATENCY_HIGH_WATERMARK(line_time)));
1034 /* restore original selection */
1035 WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
1037 /* write the priority marks */
1038 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
1039 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
1044 * evergreen_bandwidth_update - update display watermarks callback.
1046 * @rdev: radeon_device pointer
1048 * Update the display watermarks based on the requested mode(s)
1051 void evergreen_bandwidth_update(struct radeon_device *rdev)
1053 struct drm_display_mode *mode0 = NULL;
1054 struct drm_display_mode *mode1 = NULL;
1055 u32 num_heads = 0, lb_size;
1058 radeon_update_display_priority(rdev);
1060 for (i = 0; i < rdev->num_crtc; i++) {
1061 if (rdev->mode_info.crtcs[i]->base.enabled)
1064 for (i = 0; i < rdev->num_crtc; i += 2) {
1065 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
1066 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
1067 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
1068 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
1069 lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
1070 evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
1075 * evergreen_mc_wait_for_idle - wait for MC idle callback.
1077 * @rdev: radeon_device pointer
1079 * Wait for the MC (memory controller) to be idle.
1081 * Returns 0 if the MC is idle, -1 if not.
1083 int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
1088 for (i = 0; i < rdev->usec_timeout; i++) {
1089 /* read MC_STATUS */
1090 tmp = RREG32(SRBM_STATUS) & 0x1F00;
1101 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1106 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1108 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1109 for (i = 0; i < rdev->usec_timeout; i++) {
1110 /* read MC_STATUS */
1111 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1112 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1114 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
1124 static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1129 if (rdev->gart.robj == NULL) {
1130 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1133 r = radeon_gart_table_vram_pin(rdev);
1136 radeon_gart_restore(rdev);
1137 /* Setup L2 cache */
1138 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1139 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1140 EFFECTIVE_L2_QUEUE_SIZE(7));
1141 WREG32(VM_L2_CNTL2, 0);
1142 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1143 /* Setup TLB control */
1144 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1145 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1146 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1147 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1148 if (rdev->flags & RADEON_IS_IGP) {
1149 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
1150 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
1151 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
1153 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1154 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1155 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1156 if ((rdev->family == CHIP_JUNIPER) ||
1157 (rdev->family == CHIP_CYPRESS) ||
1158 (rdev->family == CHIP_HEMLOCK) ||
1159 (rdev->family == CHIP_BARTS))
1160 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
1162 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1163 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1164 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1165 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1166 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1167 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1168 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1169 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1170 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1171 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1172 (u32)(rdev->dummy_page.addr >> 12));
1173 WREG32(VM_CONTEXT1_CNTL, 0);
1175 evergreen_pcie_gart_tlb_flush(rdev);
1176 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1177 (unsigned)(rdev->mc.gtt_size >> 20),
1178 (unsigned long long)rdev->gart.table_addr);
1179 rdev->gart.ready = true;
1183 static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1187 /* Disable all tables */
1188 WREG32(VM_CONTEXT0_CNTL, 0);
1189 WREG32(VM_CONTEXT1_CNTL, 0);
1191 /* Setup L2 cache */
1192 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1193 EFFECTIVE_L2_QUEUE_SIZE(7));
1194 WREG32(VM_L2_CNTL2, 0);
1195 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1196 /* Setup TLB control */
1197 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1198 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1199 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1200 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1201 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1202 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1203 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1204 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1205 radeon_gart_table_vram_unpin(rdev);
1208 static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1210 evergreen_pcie_gart_disable(rdev);
1211 radeon_gart_table_vram_free(rdev);
1212 radeon_gart_fini(rdev);
1216 static void evergreen_agp_enable(struct radeon_device *rdev)
1220 /* Setup L2 cache */
1221 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1222 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1223 EFFECTIVE_L2_QUEUE_SIZE(7));
1224 WREG32(VM_L2_CNTL2, 0);
1225 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
1226 /* Setup TLB control */
1227 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1228 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1229 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
1230 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
1231 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1232 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1233 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1234 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1235 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1236 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1237 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1238 WREG32(VM_CONTEXT0_CNTL, 0);
1239 WREG32(VM_CONTEXT1_CNTL, 0);
1242 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
1244 u32 crtc_enabled, tmp, frame_count, blackout;
1247 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1248 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
1250 /* disable VGA render */
1251 WREG32(VGA_RENDER_CONTROL, 0);
1252 /* blank the display controllers */
1253 for (i = 0; i < rdev->num_crtc; i++) {
1254 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1256 save->crtc_enabled[i] = true;
1257 if (ASIC_IS_DCE6(rdev)) {
1258 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1259 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1260 radeon_wait_for_vblank(rdev, i);
1261 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1262 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1265 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1266 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1267 radeon_wait_for_vblank(rdev, i);
1268 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1269 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1272 /* wait for the next frame */
1273 frame_count = radeon_get_vblank_counter(rdev, i);
1274 for (j = 0; j < rdev->usec_timeout; j++) {
1275 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1282 radeon_mc_wait_for_idle(rdev);
1284 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1285 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1286 /* Block CPU access */
1287 WREG32(BIF_FB_EN, 0);
1288 /* blackout the MC */
1289 blackout &= ~BLACKOUT_MODE_MASK;
1290 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1294 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1296 u32 tmp, frame_count;
1299 /* update crtc base addresses */
1300 for (i = 0; i < rdev->num_crtc; i++) {
1301 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1302 upper_32_bits(rdev->mc.vram_start));
1303 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1304 upper_32_bits(rdev->mc.vram_start));
1305 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
1306 (u32)rdev->mc.vram_start);
1307 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
1308 (u32)rdev->mc.vram_start);
1310 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1311 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1313 /* unblackout the MC */
1314 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1315 tmp &= ~BLACKOUT_MODE_MASK;
1316 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1317 /* allow CPU access */
1318 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1320 for (i = 0; i < rdev->num_crtc; i++) {
1321 if (save->crtc_enabled) {
1322 if (ASIC_IS_DCE6(rdev)) {
1323 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1324 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1325 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1327 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1328 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1329 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1331 /* wait for the next frame */
1332 frame_count = radeon_get_vblank_counter(rdev, i);
1333 for (j = 0; j < rdev->usec_timeout; j++) {
1334 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1340 /* Unlock vga access */
1341 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1343 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1346 void evergreen_mc_program(struct radeon_device *rdev)
1348 struct evergreen_mc_save save;
1352 /* Initialize HDP */
1353 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1354 WREG32((0x2c14 + j), 0x00000000);
1355 WREG32((0x2c18 + j), 0x00000000);
1356 WREG32((0x2c1c + j), 0x00000000);
1357 WREG32((0x2c20 + j), 0x00000000);
1358 WREG32((0x2c24 + j), 0x00000000);
1360 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1362 evergreen_mc_stop(rdev, &save);
1363 if (evergreen_mc_wait_for_idle(rdev)) {
1364 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1366 /* Lockout access through VGA aperture*/
1367 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1368 /* Update configuration */
1369 if (rdev->flags & RADEON_IS_AGP) {
1370 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1371 /* VRAM before AGP */
1372 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1373 rdev->mc.vram_start >> 12);
1374 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1375 rdev->mc.gtt_end >> 12);
1377 /* VRAM after AGP */
1378 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1379 rdev->mc.gtt_start >> 12);
1380 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1381 rdev->mc.vram_end >> 12);
1384 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1385 rdev->mc.vram_start >> 12);
1386 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1387 rdev->mc.vram_end >> 12);
1389 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1390 /* llano/ontario only */
1391 if ((rdev->family == CHIP_PALM) ||
1392 (rdev->family == CHIP_SUMO) ||
1393 (rdev->family == CHIP_SUMO2)) {
1394 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1395 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1396 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1397 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1399 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1400 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1401 WREG32(MC_VM_FB_LOCATION, tmp);
1402 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1403 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
1404 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1405 if (rdev->flags & RADEON_IS_AGP) {
1406 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1407 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1408 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1410 WREG32(MC_VM_AGP_BASE, 0);
1411 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1412 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1414 if (evergreen_mc_wait_for_idle(rdev)) {
1415 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1417 evergreen_mc_resume(rdev, &save);
1418 /* we need to own VRAM, so turn off the VGA renderer here
1419 * to stop it overwriting our objects */
1420 rv515_vga_render_disable(rdev);
1426 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1428 struct radeon_ring *ring = &rdev->ring[ib->ring];
1431 /* set to DX10/11 mode */
1432 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1433 radeon_ring_write(ring, 1);
1435 if (ring->rptr_save_reg) {
1436 next_rptr = ring->wptr + 3 + 4;
1437 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1438 radeon_ring_write(ring, ((ring->rptr_save_reg -
1439 PACKET3_SET_CONFIG_REG_START) >> 2));
1440 radeon_ring_write(ring, next_rptr);
1441 } else if (rdev->wb.enabled) {
1442 next_rptr = ring->wptr + 5 + 4;
1443 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
1444 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1445 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
1446 radeon_ring_write(ring, next_rptr);
1447 radeon_ring_write(ring, 0);
1450 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1451 radeon_ring_write(ring,
1455 (ib->gpu_addr & 0xFFFFFFFC));
1456 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1457 radeon_ring_write(ring, ib->length_dw);
1461 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1463 const __be32 *fw_data;
1466 if (!rdev->me_fw || !rdev->pfp_fw)
1474 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1476 fw_data = (const __be32 *)rdev->pfp_fw->data;
1477 WREG32(CP_PFP_UCODE_ADDR, 0);
1478 for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1479 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1480 WREG32(CP_PFP_UCODE_ADDR, 0);
1482 fw_data = (const __be32 *)rdev->me_fw->data;
1483 WREG32(CP_ME_RAM_WADDR, 0);
1484 for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1485 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1487 WREG32(CP_PFP_UCODE_ADDR, 0);
1488 WREG32(CP_ME_RAM_WADDR, 0);
1489 WREG32(CP_ME_RAM_RADDR, 0);
1493 static int evergreen_cp_start(struct radeon_device *rdev)
1495 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1499 r = radeon_ring_lock(rdev, ring, 7);
1501 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1504 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1505 radeon_ring_write(ring, 0x1);
1506 radeon_ring_write(ring, 0x0);
1507 radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
1508 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1509 radeon_ring_write(ring, 0);
1510 radeon_ring_write(ring, 0);
1511 radeon_ring_unlock_commit(rdev, ring);
1514 WREG32(CP_ME_CNTL, cp_me);
1516 r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
1518 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1522 /* setup clear context state */
1523 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1524 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1526 for (i = 0; i < evergreen_default_size; i++)
1527 radeon_ring_write(ring, evergreen_default_state[i]);
1529 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1530 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1532 /* set clear context state */
1533 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1534 radeon_ring_write(ring, 0);
1536 /* SQ_VTX_BASE_VTX_LOC */
1537 radeon_ring_write(ring, 0xc0026f00);
1538 radeon_ring_write(ring, 0x00000000);
1539 radeon_ring_write(ring, 0x00000000);
1540 radeon_ring_write(ring, 0x00000000);
1543 radeon_ring_write(ring, 0xc0036f00);
1544 radeon_ring_write(ring, 0x00000bc4);
1545 radeon_ring_write(ring, 0xffffffff);
1546 radeon_ring_write(ring, 0xffffffff);
1547 radeon_ring_write(ring, 0xffffffff);
1549 radeon_ring_write(ring, 0xc0026900);
1550 radeon_ring_write(ring, 0x00000316);
1551 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1552 radeon_ring_write(ring, 0x00000010); /* */
1554 radeon_ring_unlock_commit(rdev, ring);
1559 static int evergreen_cp_resume(struct radeon_device *rdev)
1561 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1566 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1567 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1573 RREG32(GRBM_SOFT_RESET);
1575 WREG32(GRBM_SOFT_RESET, 0);
1576 RREG32(GRBM_SOFT_RESET);
1578 /* Set ring buffer size */
1579 rb_bufsz = drm_order(ring->ring_size / 8);
1580 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1582 tmp |= BUF_SWAP_32BIT;
1584 WREG32(CP_RB_CNTL, tmp);
1585 WREG32(CP_SEM_WAIT_TIMER, 0x0);
1586 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1588 /* Set the write pointer delay */
1589 WREG32(CP_RB_WPTR_DELAY, 0);
1591 /* Initialize the ring buffer's read and write pointers */
1592 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1593 WREG32(CP_RB_RPTR_WR, 0);
1595 WREG32(CP_RB_WPTR, ring->wptr);
1597 /* set the wb address wether it's enabled or not */
1598 WREG32(CP_RB_RPTR_ADDR,
1599 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1600 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1601 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1603 if (rdev->wb.enabled)
1604 WREG32(SCRATCH_UMSK, 0xff);
1606 tmp |= RB_NO_UPDATE;
1607 WREG32(SCRATCH_UMSK, 0);
1611 WREG32(CP_RB_CNTL, tmp);
1613 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
1614 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1616 ring->rptr = RREG32(CP_RB_RPTR);
1618 evergreen_cp_start(rdev);
1620 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1622 ring->ready = false;
1631 static void evergreen_gpu_init(struct radeon_device *rdev)
1634 u32 mc_shared_chmap, mc_arb_ramcfg;
1638 u32 sq_lds_resource_mgmt;
1639 u32 sq_gpr_resource_mgmt_1;
1640 u32 sq_gpr_resource_mgmt_2;
1641 u32 sq_gpr_resource_mgmt_3;
1642 u32 sq_thread_resource_mgmt;
1643 u32 sq_thread_resource_mgmt_2;
1644 u32 sq_stack_resource_mgmt_1;
1645 u32 sq_stack_resource_mgmt_2;
1646 u32 sq_stack_resource_mgmt_3;
1647 u32 vgt_cache_invalidation;
1648 u32 hdp_host_path_cntl, tmp;
1649 u32 disabled_rb_mask;
1650 int i, j, num_shader_engines, ps_thread_count;
1652 switch (rdev->family) {
1655 rdev->config.evergreen.num_ses = 2;
1656 rdev->config.evergreen.max_pipes = 4;
1657 rdev->config.evergreen.max_tile_pipes = 8;
1658 rdev->config.evergreen.max_simds = 10;
1659 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1660 rdev->config.evergreen.max_gprs = 256;
1661 rdev->config.evergreen.max_threads = 248;
1662 rdev->config.evergreen.max_gs_threads = 32;
1663 rdev->config.evergreen.max_stack_entries = 512;
1664 rdev->config.evergreen.sx_num_of_sets = 4;
1665 rdev->config.evergreen.sx_max_export_size = 256;
1666 rdev->config.evergreen.sx_max_export_pos_size = 64;
1667 rdev->config.evergreen.sx_max_export_smx_size = 192;
1668 rdev->config.evergreen.max_hw_contexts = 8;
1669 rdev->config.evergreen.sq_num_cf_insts = 2;
1671 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1672 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1673 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1674 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
1677 rdev->config.evergreen.num_ses = 1;
1678 rdev->config.evergreen.max_pipes = 4;
1679 rdev->config.evergreen.max_tile_pipes = 4;
1680 rdev->config.evergreen.max_simds = 10;
1681 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1682 rdev->config.evergreen.max_gprs = 256;
1683 rdev->config.evergreen.max_threads = 248;
1684 rdev->config.evergreen.max_gs_threads = 32;
1685 rdev->config.evergreen.max_stack_entries = 512;
1686 rdev->config.evergreen.sx_num_of_sets = 4;
1687 rdev->config.evergreen.sx_max_export_size = 256;
1688 rdev->config.evergreen.sx_max_export_pos_size = 64;
1689 rdev->config.evergreen.sx_max_export_smx_size = 192;
1690 rdev->config.evergreen.max_hw_contexts = 8;
1691 rdev->config.evergreen.sq_num_cf_insts = 2;
1693 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1694 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1695 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1696 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
1699 rdev->config.evergreen.num_ses = 1;
1700 rdev->config.evergreen.max_pipes = 4;
1701 rdev->config.evergreen.max_tile_pipes = 4;
1702 rdev->config.evergreen.max_simds = 5;
1703 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1704 rdev->config.evergreen.max_gprs = 256;
1705 rdev->config.evergreen.max_threads = 248;
1706 rdev->config.evergreen.max_gs_threads = 32;
1707 rdev->config.evergreen.max_stack_entries = 256;
1708 rdev->config.evergreen.sx_num_of_sets = 4;
1709 rdev->config.evergreen.sx_max_export_size = 256;
1710 rdev->config.evergreen.sx_max_export_pos_size = 64;
1711 rdev->config.evergreen.sx_max_export_smx_size = 192;
1712 rdev->config.evergreen.max_hw_contexts = 8;
1713 rdev->config.evergreen.sq_num_cf_insts = 2;
1715 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1716 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1717 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1718 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1722 rdev->config.evergreen.num_ses = 1;
1723 rdev->config.evergreen.max_pipes = 2;
1724 rdev->config.evergreen.max_tile_pipes = 2;
1725 rdev->config.evergreen.max_simds = 2;
1726 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1727 rdev->config.evergreen.max_gprs = 256;
1728 rdev->config.evergreen.max_threads = 192;
1729 rdev->config.evergreen.max_gs_threads = 16;
1730 rdev->config.evergreen.max_stack_entries = 256;
1731 rdev->config.evergreen.sx_num_of_sets = 4;
1732 rdev->config.evergreen.sx_max_export_size = 128;
1733 rdev->config.evergreen.sx_max_export_pos_size = 32;
1734 rdev->config.evergreen.sx_max_export_smx_size = 96;
1735 rdev->config.evergreen.max_hw_contexts = 4;
1736 rdev->config.evergreen.sq_num_cf_insts = 1;
1738 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1739 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1740 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1741 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1744 rdev->config.evergreen.num_ses = 1;
1745 rdev->config.evergreen.max_pipes = 2;
1746 rdev->config.evergreen.max_tile_pipes = 2;
1747 rdev->config.evergreen.max_simds = 2;
1748 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1749 rdev->config.evergreen.max_gprs = 256;
1750 rdev->config.evergreen.max_threads = 192;
1751 rdev->config.evergreen.max_gs_threads = 16;
1752 rdev->config.evergreen.max_stack_entries = 256;
1753 rdev->config.evergreen.sx_num_of_sets = 4;
1754 rdev->config.evergreen.sx_max_export_size = 128;
1755 rdev->config.evergreen.sx_max_export_pos_size = 32;
1756 rdev->config.evergreen.sx_max_export_smx_size = 96;
1757 rdev->config.evergreen.max_hw_contexts = 4;
1758 rdev->config.evergreen.sq_num_cf_insts = 1;
1760 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1761 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1762 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1763 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1766 rdev->config.evergreen.num_ses = 1;
1767 rdev->config.evergreen.max_pipes = 4;
1768 rdev->config.evergreen.max_tile_pipes = 2;
1769 if (rdev->pdev->device == 0x9648)
1770 rdev->config.evergreen.max_simds = 3;
1771 else if ((rdev->pdev->device == 0x9647) ||
1772 (rdev->pdev->device == 0x964a))
1773 rdev->config.evergreen.max_simds = 4;
1775 rdev->config.evergreen.max_simds = 5;
1776 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1777 rdev->config.evergreen.max_gprs = 256;
1778 rdev->config.evergreen.max_threads = 248;
1779 rdev->config.evergreen.max_gs_threads = 32;
1780 rdev->config.evergreen.max_stack_entries = 256;
1781 rdev->config.evergreen.sx_num_of_sets = 4;
1782 rdev->config.evergreen.sx_max_export_size = 256;
1783 rdev->config.evergreen.sx_max_export_pos_size = 64;
1784 rdev->config.evergreen.sx_max_export_smx_size = 192;
1785 rdev->config.evergreen.max_hw_contexts = 8;
1786 rdev->config.evergreen.sq_num_cf_insts = 2;
1788 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1789 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1790 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1791 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1794 rdev->config.evergreen.num_ses = 1;
1795 rdev->config.evergreen.max_pipes = 4;
1796 rdev->config.evergreen.max_tile_pipes = 4;
1797 rdev->config.evergreen.max_simds = 2;
1798 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1799 rdev->config.evergreen.max_gprs = 256;
1800 rdev->config.evergreen.max_threads = 248;
1801 rdev->config.evergreen.max_gs_threads = 32;
1802 rdev->config.evergreen.max_stack_entries = 512;
1803 rdev->config.evergreen.sx_num_of_sets = 4;
1804 rdev->config.evergreen.sx_max_export_size = 256;
1805 rdev->config.evergreen.sx_max_export_pos_size = 64;
1806 rdev->config.evergreen.sx_max_export_smx_size = 192;
1807 rdev->config.evergreen.max_hw_contexts = 8;
1808 rdev->config.evergreen.sq_num_cf_insts = 2;
1810 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1811 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1812 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1813 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1816 rdev->config.evergreen.num_ses = 2;
1817 rdev->config.evergreen.max_pipes = 4;
1818 rdev->config.evergreen.max_tile_pipes = 8;
1819 rdev->config.evergreen.max_simds = 7;
1820 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1821 rdev->config.evergreen.max_gprs = 256;
1822 rdev->config.evergreen.max_threads = 248;
1823 rdev->config.evergreen.max_gs_threads = 32;
1824 rdev->config.evergreen.max_stack_entries = 512;
1825 rdev->config.evergreen.sx_num_of_sets = 4;
1826 rdev->config.evergreen.sx_max_export_size = 256;
1827 rdev->config.evergreen.sx_max_export_pos_size = 64;
1828 rdev->config.evergreen.sx_max_export_smx_size = 192;
1829 rdev->config.evergreen.max_hw_contexts = 8;
1830 rdev->config.evergreen.sq_num_cf_insts = 2;
1832 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1833 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1834 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1835 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
1838 rdev->config.evergreen.num_ses = 1;
1839 rdev->config.evergreen.max_pipes = 4;
1840 rdev->config.evergreen.max_tile_pipes = 4;
1841 rdev->config.evergreen.max_simds = 6;
1842 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1843 rdev->config.evergreen.max_gprs = 256;
1844 rdev->config.evergreen.max_threads = 248;
1845 rdev->config.evergreen.max_gs_threads = 32;
1846 rdev->config.evergreen.max_stack_entries = 256;
1847 rdev->config.evergreen.sx_num_of_sets = 4;
1848 rdev->config.evergreen.sx_max_export_size = 256;
1849 rdev->config.evergreen.sx_max_export_pos_size = 64;
1850 rdev->config.evergreen.sx_max_export_smx_size = 192;
1851 rdev->config.evergreen.max_hw_contexts = 8;
1852 rdev->config.evergreen.sq_num_cf_insts = 2;
1854 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1855 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1856 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1857 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
1860 rdev->config.evergreen.num_ses = 1;
1861 rdev->config.evergreen.max_pipes = 4;
1862 rdev->config.evergreen.max_tile_pipes = 2;
1863 rdev->config.evergreen.max_simds = 2;
1864 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1865 rdev->config.evergreen.max_gprs = 256;
1866 rdev->config.evergreen.max_threads = 192;
1867 rdev->config.evergreen.max_gs_threads = 16;
1868 rdev->config.evergreen.max_stack_entries = 256;
1869 rdev->config.evergreen.sx_num_of_sets = 4;
1870 rdev->config.evergreen.sx_max_export_size = 128;
1871 rdev->config.evergreen.sx_max_export_pos_size = 32;
1872 rdev->config.evergreen.sx_max_export_smx_size = 96;
1873 rdev->config.evergreen.max_hw_contexts = 4;
1874 rdev->config.evergreen.sq_num_cf_insts = 1;
1876 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1877 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1878 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1879 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
1883 /* Initialize HDP */
1884 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1885 WREG32((0x2c14 + j), 0x00000000);
1886 WREG32((0x2c18 + j), 0x00000000);
1887 WREG32((0x2c1c + j), 0x00000000);
1888 WREG32((0x2c20 + j), 0x00000000);
1889 WREG32((0x2c24 + j), 0x00000000);
1892 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1894 evergreen_fix_pci_max_read_req_size(rdev);
1896 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1897 if ((rdev->family == CHIP_PALM) ||
1898 (rdev->family == CHIP_SUMO) ||
1899 (rdev->family == CHIP_SUMO2))
1900 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
1902 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1904 /* setup tiling info dword. gb_addr_config is not adequate since it does
1905 * not have bank info, so create a custom tiling dword.
1906 * bits 3:0 num_pipes
1907 * bits 7:4 num_banks
1908 * bits 11:8 group_size
1909 * bits 15:12 row_size
1911 rdev->config.evergreen.tile_config = 0;
1912 switch (rdev->config.evergreen.max_tile_pipes) {
1915 rdev->config.evergreen.tile_config |= (0 << 0);
1918 rdev->config.evergreen.tile_config |= (1 << 0);
1921 rdev->config.evergreen.tile_config |= (2 << 0);
1924 rdev->config.evergreen.tile_config |= (3 << 0);
1927 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1928 if (rdev->flags & RADEON_IS_IGP)
1929 rdev->config.evergreen.tile_config |= 1 << 4;
1931 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1932 case 0: /* four banks */
1933 rdev->config.evergreen.tile_config |= 0 << 4;
1935 case 1: /* eight banks */
1936 rdev->config.evergreen.tile_config |= 1 << 4;
1938 case 2: /* sixteen banks */
1940 rdev->config.evergreen.tile_config |= 2 << 4;
1944 rdev->config.evergreen.tile_config |= 0 << 8;
1945 rdev->config.evergreen.tile_config |=
1946 ((gb_addr_config & 0x30000000) >> 28) << 12;
1948 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
1950 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
1954 WREG32(RCU_IND_INDEX, 0x204);
1955 efuse_straps_4 = RREG32(RCU_IND_DATA);
1956 WREG32(RCU_IND_INDEX, 0x203);
1957 efuse_straps_3 = RREG32(RCU_IND_DATA);
1958 tmp = (((efuse_straps_4 & 0xf) << 4) |
1959 ((efuse_straps_3 & 0xf0000000) >> 28));
1962 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
1963 u32 rb_disable_bitmap;
1965 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1966 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1967 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1969 tmp |= rb_disable_bitmap;
1972 /* enabled rb are just the one not disabled :) */
1973 disabled_rb_mask = tmp;
1975 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1976 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1978 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1979 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1980 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1982 tmp = gb_addr_config & NUM_PIPES_MASK;
1983 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
1984 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
1985 WREG32(GB_BACKEND_MAP, tmp);
1987 WREG32(CGTS_SYS_TCC_DISABLE, 0);
1988 WREG32(CGTS_TCC_DISABLE, 0);
1989 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
1990 WREG32(CGTS_USER_TCC_DISABLE, 0);
1992 /* set HW defaults for 3D engine */
1993 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
1994 ROQ_IB2_START(0x2b)));
1996 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
1998 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2003 sx_debug_1 = RREG32(SX_DEBUG_1);
2004 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2005 WREG32(SX_DEBUG_1, sx_debug_1);
2008 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2009 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2010 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2011 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2013 if (rdev->family <= CHIP_SUMO2)
2014 WREG32(SMX_SAR_CTL0, 0x00010000);
2016 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2017 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2018 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2020 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2021 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2022 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2024 WREG32(VGT_NUM_INSTANCES, 1);
2025 WREG32(SPI_CONFIG_CNTL, 0);
2026 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2027 WREG32(CP_PERFMON_CNTL, 0);
2029 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2030 FETCH_FIFO_HIWATER(0x4) |
2031 DONE_FIFO_HIWATER(0xe0) |
2032 ALU_UPDATE_FIFO_HIWATER(0x8)));
2034 sq_config = RREG32(SQ_CONFIG);
2035 sq_config &= ~(PS_PRIO(3) |
2039 sq_config |= (VC_ENABLE |
2046 switch (rdev->family) {
2052 /* no vertex cache */
2053 sq_config &= ~VC_ENABLE;
2059 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2061 sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2062 sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2063 sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2064 sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2065 sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2066 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2067 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2069 switch (rdev->family) {
2074 ps_thread_count = 96;
2077 ps_thread_count = 128;
2081 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
2082 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2083 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2084 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2085 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2086 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2088 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2089 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2090 sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2091 sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2092 sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2093 sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2095 WREG32(SQ_CONFIG, sq_config);
2096 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2097 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2098 WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2099 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2100 WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2101 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2102 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2103 WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2104 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2105 WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2107 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2108 FORCE_EOV_MAX_REZ_CNT(255)));
2110 switch (rdev->family) {
2116 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2119 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
2122 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2123 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2125 WREG32(VGT_GS_VERTEX_REUSE, 16);
2126 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2127 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2129 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2130 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2132 WREG32(CB_PERF_CTR0_SEL_0, 0);
2133 WREG32(CB_PERF_CTR0_SEL_1, 0);
2134 WREG32(CB_PERF_CTR1_SEL_0, 0);
2135 WREG32(CB_PERF_CTR1_SEL_1, 0);
2136 WREG32(CB_PERF_CTR2_SEL_0, 0);
2137 WREG32(CB_PERF_CTR2_SEL_1, 0);
2138 WREG32(CB_PERF_CTR3_SEL_0, 0);
2139 WREG32(CB_PERF_CTR3_SEL_1, 0);
2141 /* clear render buffer base addresses */
2142 WREG32(CB_COLOR0_BASE, 0);
2143 WREG32(CB_COLOR1_BASE, 0);
2144 WREG32(CB_COLOR2_BASE, 0);
2145 WREG32(CB_COLOR3_BASE, 0);
2146 WREG32(CB_COLOR4_BASE, 0);
2147 WREG32(CB_COLOR5_BASE, 0);
2148 WREG32(CB_COLOR6_BASE, 0);
2149 WREG32(CB_COLOR7_BASE, 0);
2150 WREG32(CB_COLOR8_BASE, 0);
2151 WREG32(CB_COLOR9_BASE, 0);
2152 WREG32(CB_COLOR10_BASE, 0);
2153 WREG32(CB_COLOR11_BASE, 0);
2155 /* set the shader const cache sizes to 0 */
2156 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2158 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2161 tmp = RREG32(HDP_MISC_CNTL);
2162 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2163 WREG32(HDP_MISC_CNTL, tmp);
2165 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2166 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2168 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2174 int evergreen_mc_init(struct radeon_device *rdev)
2177 int chansize, numchan;
2179 /* Get VRAM informations */
2180 rdev->mc.vram_is_ddr = true;
2181 if ((rdev->family == CHIP_PALM) ||
2182 (rdev->family == CHIP_SUMO) ||
2183 (rdev->family == CHIP_SUMO2))
2184 tmp = RREG32(FUS_MC_ARB_RAMCFG);
2186 tmp = RREG32(MC_ARB_RAMCFG);
2187 if (tmp & CHANSIZE_OVERRIDE) {
2189 } else if (tmp & CHANSIZE_MASK) {
2194 tmp = RREG32(MC_SHARED_CHMAP);
2195 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2210 rdev->mc.vram_width = numchan * chansize;
2211 /* Could aper size report 0 ? */
2212 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2213 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2214 /* Setup GPU memory space */
2215 if ((rdev->family == CHIP_PALM) ||
2216 (rdev->family == CHIP_SUMO) ||
2217 (rdev->family == CHIP_SUMO2)) {
2218 /* size in bytes on fusion */
2219 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2220 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2222 /* size in MB on evergreen/cayman/tn */
2223 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2224 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2226 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2227 r700_vram_gtt_location(rdev, &rdev->mc);
2228 radeon_update_bandwidth_info(rdev);
2233 bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2237 u32 grbm_status_se0, grbm_status_se1;
2239 srbm_status = RREG32(SRBM_STATUS);
2240 grbm_status = RREG32(GRBM_STATUS);
2241 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2242 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2243 if (!(grbm_status & GUI_ACTIVE)) {
2244 radeon_ring_lockup_update(ring);
2247 /* force CP activities */
2248 radeon_ring_force_activity(rdev, ring);
2249 return radeon_ring_test_lockup(rdev, ring);
2252 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2254 struct evergreen_mc_save save;
2257 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2260 dev_info(rdev->dev, "GPU softreset \n");
2261 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2262 RREG32(GRBM_STATUS));
2263 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2264 RREG32(GRBM_STATUS_SE0));
2265 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2266 RREG32(GRBM_STATUS_SE1));
2267 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2268 RREG32(SRBM_STATUS));
2269 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2270 RREG32(CP_STALLED_STAT1));
2271 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2272 RREG32(CP_STALLED_STAT2));
2273 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2274 RREG32(CP_BUSY_STAT));
2275 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2277 evergreen_mc_stop(rdev, &save);
2278 if (evergreen_mc_wait_for_idle(rdev)) {
2279 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2281 /* Disable CP parsing/prefetching */
2282 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2284 /* reset all the gfx blocks */
2285 grbm_reset = (SOFT_RESET_CP |
2298 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2299 WREG32(GRBM_SOFT_RESET, grbm_reset);
2300 (void)RREG32(GRBM_SOFT_RESET);
2302 WREG32(GRBM_SOFT_RESET, 0);
2303 (void)RREG32(GRBM_SOFT_RESET);
2304 /* Wait a little for things to settle down */
2306 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2307 RREG32(GRBM_STATUS));
2308 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2309 RREG32(GRBM_STATUS_SE0));
2310 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2311 RREG32(GRBM_STATUS_SE1));
2312 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2313 RREG32(SRBM_STATUS));
2314 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2315 RREG32(CP_STALLED_STAT1));
2316 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2317 RREG32(CP_STALLED_STAT2));
2318 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2319 RREG32(CP_BUSY_STAT));
2320 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2322 evergreen_mc_resume(rdev, &save);
2326 int evergreen_asic_reset(struct radeon_device *rdev)
2328 return evergreen_gpu_soft_reset(rdev);
2333 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2335 if (crtc >= rdev->num_crtc)
2338 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
2341 void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2345 if (rdev->family >= CHIP_CAYMAN) {
2346 cayman_cp_int_cntl_setup(rdev, 0,
2347 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2348 cayman_cp_int_cntl_setup(rdev, 1, 0);
2349 cayman_cp_int_cntl_setup(rdev, 2, 0);
2351 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2352 WREG32(GRBM_INT_CNTL, 0);
2353 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2354 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2355 if (rdev->num_crtc >= 4) {
2356 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2357 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2359 if (rdev->num_crtc >= 6) {
2360 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2361 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2364 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2365 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2366 if (rdev->num_crtc >= 4) {
2367 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2368 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2370 if (rdev->num_crtc >= 6) {
2371 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2372 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2375 /* only one DAC on DCE6 */
2376 if (!ASIC_IS_DCE6(rdev))
2377 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2378 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2380 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2381 WREG32(DC_HPD1_INT_CONTROL, tmp);
2382 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2383 WREG32(DC_HPD2_INT_CONTROL, tmp);
2384 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2385 WREG32(DC_HPD3_INT_CONTROL, tmp);
2386 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2387 WREG32(DC_HPD4_INT_CONTROL, tmp);
2388 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2389 WREG32(DC_HPD5_INT_CONTROL, tmp);
2390 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2391 WREG32(DC_HPD6_INT_CONTROL, tmp);
2395 int evergreen_irq_set(struct radeon_device *rdev)
2397 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2398 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
2399 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2400 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2401 u32 grbm_int_cntl = 0;
2402 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2403 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
2405 if (!rdev->irq.installed) {
2406 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2409 /* don't enable anything if the ih is disabled */
2410 if (!rdev->ih.enabled) {
2411 r600_disable_interrupts(rdev);
2412 /* force the active interrupt state to all disabled */
2413 evergreen_disable_interrupt_state(rdev);
2417 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2418 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2419 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2420 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2421 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2422 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2424 afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2425 afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2426 afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2427 afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2428 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2429 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2431 if (rdev->family >= CHIP_CAYMAN) {
2432 /* enable CP interrupts on all rings */
2433 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2434 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2435 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2437 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
2438 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2439 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2441 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
2442 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2443 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2446 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2447 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2448 cp_int_cntl |= RB_INT_ENABLE;
2449 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2453 if (rdev->irq.crtc_vblank_int[0] ||
2454 atomic_read(&rdev->irq.pflip[0])) {
2455 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2456 crtc1 |= VBLANK_INT_MASK;
2458 if (rdev->irq.crtc_vblank_int[1] ||
2459 atomic_read(&rdev->irq.pflip[1])) {
2460 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2461 crtc2 |= VBLANK_INT_MASK;
2463 if (rdev->irq.crtc_vblank_int[2] ||
2464 atomic_read(&rdev->irq.pflip[2])) {
2465 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2466 crtc3 |= VBLANK_INT_MASK;
2468 if (rdev->irq.crtc_vblank_int[3] ||
2469 atomic_read(&rdev->irq.pflip[3])) {
2470 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2471 crtc4 |= VBLANK_INT_MASK;
2473 if (rdev->irq.crtc_vblank_int[4] ||
2474 atomic_read(&rdev->irq.pflip[4])) {
2475 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2476 crtc5 |= VBLANK_INT_MASK;
2478 if (rdev->irq.crtc_vblank_int[5] ||
2479 atomic_read(&rdev->irq.pflip[5])) {
2480 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2481 crtc6 |= VBLANK_INT_MASK;
2483 if (rdev->irq.hpd[0]) {
2484 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2485 hpd1 |= DC_HPDx_INT_EN;
2487 if (rdev->irq.hpd[1]) {
2488 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2489 hpd2 |= DC_HPDx_INT_EN;
2491 if (rdev->irq.hpd[2]) {
2492 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2493 hpd3 |= DC_HPDx_INT_EN;
2495 if (rdev->irq.hpd[3]) {
2496 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2497 hpd4 |= DC_HPDx_INT_EN;
2499 if (rdev->irq.hpd[4]) {
2500 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2501 hpd5 |= DC_HPDx_INT_EN;
2503 if (rdev->irq.hpd[5]) {
2504 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2505 hpd6 |= DC_HPDx_INT_EN;
2507 if (rdev->irq.afmt[0]) {
2508 DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
2509 afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2511 if (rdev->irq.afmt[1]) {
2512 DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
2513 afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2515 if (rdev->irq.afmt[2]) {
2516 DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
2517 afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2519 if (rdev->irq.afmt[3]) {
2520 DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
2521 afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2523 if (rdev->irq.afmt[4]) {
2524 DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
2525 afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2527 if (rdev->irq.afmt[5]) {
2528 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
2529 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2532 if (rdev->family >= CHIP_CAYMAN) {
2533 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
2534 cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
2535 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2537 WREG32(CP_INT_CNTL, cp_int_cntl);
2538 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2540 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2541 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
2542 if (rdev->num_crtc >= 4) {
2543 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2544 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
2546 if (rdev->num_crtc >= 6) {
2547 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2548 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2551 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2552 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
2553 if (rdev->num_crtc >= 4) {
2554 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2555 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2557 if (rdev->num_crtc >= 6) {
2558 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2559 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2562 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2563 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2564 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2565 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2566 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2567 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2569 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
2570 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
2571 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
2572 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
2573 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
2574 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
2579 static void evergreen_irq_ack(struct radeon_device *rdev)
2583 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2584 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2585 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2586 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2587 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2588 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2589 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2590 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2591 if (rdev->num_crtc >= 4) {
2592 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2593 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2595 if (rdev->num_crtc >= 6) {
2596 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2597 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2600 rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2601 rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2602 rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2603 rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2604 rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2605 rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2607 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2608 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2609 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2610 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2611 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
2612 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
2613 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
2614 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
2615 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
2616 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
2617 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
2618 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2620 if (rdev->num_crtc >= 4) {
2621 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2622 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2623 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2624 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2625 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2626 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2627 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2628 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2629 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2630 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2631 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2632 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2635 if (rdev->num_crtc >= 6) {
2636 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2637 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2638 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2639 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2640 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2641 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2642 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2643 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2644 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2645 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2646 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2647 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2650 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2651 tmp = RREG32(DC_HPD1_INT_CONTROL);
2652 tmp |= DC_HPDx_INT_ACK;
2653 WREG32(DC_HPD1_INT_CONTROL, tmp);
2655 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2656 tmp = RREG32(DC_HPD2_INT_CONTROL);
2657 tmp |= DC_HPDx_INT_ACK;
2658 WREG32(DC_HPD2_INT_CONTROL, tmp);
2660 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2661 tmp = RREG32(DC_HPD3_INT_CONTROL);
2662 tmp |= DC_HPDx_INT_ACK;
2663 WREG32(DC_HPD3_INT_CONTROL, tmp);
2665 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2666 tmp = RREG32(DC_HPD4_INT_CONTROL);
2667 tmp |= DC_HPDx_INT_ACK;
2668 WREG32(DC_HPD4_INT_CONTROL, tmp);
2670 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2671 tmp = RREG32(DC_HPD5_INT_CONTROL);
2672 tmp |= DC_HPDx_INT_ACK;
2673 WREG32(DC_HPD5_INT_CONTROL, tmp);
2675 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2676 tmp = RREG32(DC_HPD5_INT_CONTROL);
2677 tmp |= DC_HPDx_INT_ACK;
2678 WREG32(DC_HPD6_INT_CONTROL, tmp);
2680 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
2681 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
2682 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2683 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
2685 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
2686 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
2687 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2688 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
2690 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
2691 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
2692 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2693 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
2695 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
2696 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
2697 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2698 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
2700 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
2701 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
2702 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2703 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
2705 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
2706 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
2707 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
2708 WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
2712 static void evergreen_irq_disable(struct radeon_device *rdev)
2714 r600_disable_interrupts(rdev);
2715 /* Wait and acknowledge irq */
2717 evergreen_irq_ack(rdev);
2718 evergreen_disable_interrupt_state(rdev);
2721 void evergreen_irq_suspend(struct radeon_device *rdev)
2723 evergreen_irq_disable(rdev);
2724 r600_rlc_stop(rdev);
2727 static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2731 if (rdev->wb.enabled)
2732 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
2734 wptr = RREG32(IH_RB_WPTR);
2736 if (wptr & RB_OVERFLOW) {
2737 /* When a ring buffer overflow happen start parsing interrupt
2738 * from the last not overwritten vector (wptr + 16). Hopefully
2739 * this should allow us to catchup.
2741 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2742 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2743 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2744 tmp = RREG32(IH_RB_CNTL);
2745 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2746 WREG32(IH_RB_CNTL, tmp);
2748 return (wptr & rdev->ih.ptr_mask);
2751 int evergreen_irq_process(struct radeon_device *rdev)
2755 u32 src_id, src_data;
2757 bool queue_hotplug = false;
2758 bool queue_hdmi = false;
2760 if (!rdev->ih.enabled || rdev->shutdown)
2763 wptr = evergreen_get_ih_wptr(rdev);
2766 /* is somebody else already processing irqs? */
2767 if (atomic_xchg(&rdev->ih.lock, 1))
2770 rptr = rdev->ih.rptr;
2771 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2773 /* Order reading of wptr vs. reading of IH ring data */
2776 /* display interrupts */
2777 evergreen_irq_ack(rdev);
2779 while (rptr != wptr) {
2780 /* wptr/rptr are in bytes! */
2781 ring_index = rptr / 4;
2782 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2783 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2786 case 1: /* D1 vblank/vline */
2788 case 0: /* D1 vblank */
2789 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
2790 if (rdev->irq.crtc_vblank_int[0]) {
2791 drm_handle_vblank(rdev->ddev, 0);
2792 rdev->pm.vblank_sync = true;
2793 wake_up(&rdev->irq.vblank_queue);
2795 if (atomic_read(&rdev->irq.pflip[0]))
2796 radeon_crtc_handle_flip(rdev, 0);
2797 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2798 DRM_DEBUG("IH: D1 vblank\n");
2801 case 1: /* D1 vline */
2802 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2803 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
2804 DRM_DEBUG("IH: D1 vline\n");
2808 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2812 case 2: /* D2 vblank/vline */
2814 case 0: /* D2 vblank */
2815 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2816 if (rdev->irq.crtc_vblank_int[1]) {
2817 drm_handle_vblank(rdev->ddev, 1);
2818 rdev->pm.vblank_sync = true;
2819 wake_up(&rdev->irq.vblank_queue);
2821 if (atomic_read(&rdev->irq.pflip[1]))
2822 radeon_crtc_handle_flip(rdev, 1);
2823 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2824 DRM_DEBUG("IH: D2 vblank\n");
2827 case 1: /* D2 vline */
2828 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2829 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
2830 DRM_DEBUG("IH: D2 vline\n");
2834 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2838 case 3: /* D3 vblank/vline */
2840 case 0: /* D3 vblank */
2841 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2842 if (rdev->irq.crtc_vblank_int[2]) {
2843 drm_handle_vblank(rdev->ddev, 2);
2844 rdev->pm.vblank_sync = true;
2845 wake_up(&rdev->irq.vblank_queue);
2847 if (atomic_read(&rdev->irq.pflip[2]))
2848 radeon_crtc_handle_flip(rdev, 2);
2849 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2850 DRM_DEBUG("IH: D3 vblank\n");
2853 case 1: /* D3 vline */
2854 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
2855 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
2856 DRM_DEBUG("IH: D3 vline\n");
2860 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2864 case 4: /* D4 vblank/vline */
2866 case 0: /* D4 vblank */
2867 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2868 if (rdev->irq.crtc_vblank_int[3]) {
2869 drm_handle_vblank(rdev->ddev, 3);
2870 rdev->pm.vblank_sync = true;
2871 wake_up(&rdev->irq.vblank_queue);
2873 if (atomic_read(&rdev->irq.pflip[3]))
2874 radeon_crtc_handle_flip(rdev, 3);
2875 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2876 DRM_DEBUG("IH: D4 vblank\n");
2879 case 1: /* D4 vline */
2880 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
2881 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
2882 DRM_DEBUG("IH: D4 vline\n");
2886 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2890 case 5: /* D5 vblank/vline */
2892 case 0: /* D5 vblank */
2893 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2894 if (rdev->irq.crtc_vblank_int[4]) {
2895 drm_handle_vblank(rdev->ddev, 4);
2896 rdev->pm.vblank_sync = true;
2897 wake_up(&rdev->irq.vblank_queue);
2899 if (atomic_read(&rdev->irq.pflip[4]))
2900 radeon_crtc_handle_flip(rdev, 4);
2901 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2902 DRM_DEBUG("IH: D5 vblank\n");
2905 case 1: /* D5 vline */
2906 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
2907 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
2908 DRM_DEBUG("IH: D5 vline\n");
2912 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2916 case 6: /* D6 vblank/vline */
2918 case 0: /* D6 vblank */
2919 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2920 if (rdev->irq.crtc_vblank_int[5]) {
2921 drm_handle_vblank(rdev->ddev, 5);
2922 rdev->pm.vblank_sync = true;
2923 wake_up(&rdev->irq.vblank_queue);
2925 if (atomic_read(&rdev->irq.pflip[5]))
2926 radeon_crtc_handle_flip(rdev, 5);
2927 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2928 DRM_DEBUG("IH: D6 vblank\n");
2931 case 1: /* D6 vline */
2932 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
2933 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
2934 DRM_DEBUG("IH: D6 vline\n");
2938 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2942 case 42: /* HPD hotplug */
2945 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2946 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
2947 queue_hotplug = true;
2948 DRM_DEBUG("IH: HPD1\n");
2952 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2953 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
2954 queue_hotplug = true;
2955 DRM_DEBUG("IH: HPD2\n");
2959 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2960 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
2961 queue_hotplug = true;
2962 DRM_DEBUG("IH: HPD3\n");
2966 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2967 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
2968 queue_hotplug = true;
2969 DRM_DEBUG("IH: HPD4\n");
2973 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2974 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
2975 queue_hotplug = true;
2976 DRM_DEBUG("IH: HPD5\n");
2980 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2981 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
2982 queue_hotplug = true;
2983 DRM_DEBUG("IH: HPD6\n");
2987 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2994 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
2995 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
2997 DRM_DEBUG("IH: HDMI0\n");
3001 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
3002 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
3004 DRM_DEBUG("IH: HDMI1\n");
3008 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
3009 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
3011 DRM_DEBUG("IH: HDMI2\n");
3015 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
3016 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
3018 DRM_DEBUG("IH: HDMI3\n");
3022 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
3023 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
3025 DRM_DEBUG("IH: HDMI4\n");
3029 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
3030 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
3032 DRM_DEBUG("IH: HDMI5\n");
3036 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3040 case 176: /* CP_INT in ring buffer */
3041 case 177: /* CP_INT in IB1 */
3042 case 178: /* CP_INT in IB2 */
3043 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3044 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3046 case 181: /* CP EOP event */
3047 DRM_DEBUG("IH: CP EOP\n");
3048 if (rdev->family >= CHIP_CAYMAN) {
3051 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3054 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
3057 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
3061 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3063 case 233: /* GUI IDLE */
3064 DRM_DEBUG("IH: GUI idle\n");
3067 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3071 /* wptr/rptr are in bytes! */
3073 rptr &= rdev->ih.ptr_mask;
3076 schedule_work(&rdev->hotplug_work);
3078 schedule_work(&rdev->audio_work);
3079 rdev->ih.rptr = rptr;
3080 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3081 atomic_set(&rdev->ih.lock, 0);
3083 /* make sure wptr hasn't changed while processing */
3084 wptr = evergreen_get_ih_wptr(rdev);
3091 static int evergreen_startup(struct radeon_device *rdev)
3093 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3096 /* enable pcie gen2 link */
3097 evergreen_pcie_gen2_enable(rdev);
3099 if (ASIC_IS_DCE5(rdev)) {
3100 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3101 r = ni_init_microcode(rdev);
3103 DRM_ERROR("Failed to load firmware!\n");
3107 r = ni_mc_load_microcode(rdev);
3109 DRM_ERROR("Failed to load MC firmware!\n");
3113 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3114 r = r600_init_microcode(rdev);
3116 DRM_ERROR("Failed to load firmware!\n");
3122 r = r600_vram_scratch_init(rdev);
3126 evergreen_mc_program(rdev);
3127 if (rdev->flags & RADEON_IS_AGP) {
3128 evergreen_agp_enable(rdev);
3130 r = evergreen_pcie_gart_enable(rdev);
3134 evergreen_gpu_init(rdev);
3136 r = evergreen_blit_init(rdev);
3138 r600_blit_fini(rdev);
3139 rdev->asic->copy.copy = NULL;
3140 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
3143 /* allocate wb buffer */
3144 r = radeon_wb_init(rdev);
3148 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3150 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3155 r = r600_irq_init(rdev);
3157 DRM_ERROR("radeon: IH init failed (%d).\n", r);
3158 radeon_irq_kms_fini(rdev);
3161 evergreen_irq_set(rdev);
3163 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3164 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
3165 0, 0xfffff, RADEON_CP_PACKET2);
3168 r = evergreen_cp_load_microcode(rdev);
3171 r = evergreen_cp_resume(rdev);
3175 r = radeon_ib_pool_init(rdev);
3177 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3181 r = r600_audio_init(rdev);
3183 DRM_ERROR("radeon: audio init failed\n");
3190 int evergreen_resume(struct radeon_device *rdev)
3194 /* reset the asic, the gfx blocks are often in a bad state
3195 * after the driver is unloaded or after a resume
3197 if (radeon_asic_reset(rdev))
3198 dev_warn(rdev->dev, "GPU reset failed !\n");
3199 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3200 * posting will perform necessary task to bring back GPU into good
3204 atom_asic_init(rdev->mode_info.atom_context);
3206 rdev->accel_working = true;
3207 r = evergreen_startup(rdev);
3209 DRM_ERROR("evergreen startup failed on resume\n");
3210 rdev->accel_working = false;
3218 int evergreen_suspend(struct radeon_device *rdev)
3220 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3222 r600_audio_fini(rdev);
3224 ring->ready = false;
3225 evergreen_irq_suspend(rdev);
3226 radeon_wb_disable(rdev);
3227 evergreen_pcie_gart_disable(rdev);
3232 /* Plan is to move initialization in that function and use
3233 * helper function so that radeon_device_init pretty much
3234 * do nothing more than calling asic specific function. This
3235 * should also allow to remove a bunch of callback function
3238 int evergreen_init(struct radeon_device *rdev)
3243 if (!radeon_get_bios(rdev)) {
3244 if (ASIC_IS_AVIVO(rdev))
3247 /* Must be an ATOMBIOS */
3248 if (!rdev->is_atom_bios) {
3249 dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
3252 r = radeon_atombios_init(rdev);
3255 /* reset the asic, the gfx blocks are often in a bad state
3256 * after the driver is unloaded or after a resume
3258 if (radeon_asic_reset(rdev))
3259 dev_warn(rdev->dev, "GPU reset failed !\n");
3260 /* Post card if necessary */
3261 if (!radeon_card_posted(rdev)) {
3263 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3266 DRM_INFO("GPU not posted. posting now...\n");
3267 atom_asic_init(rdev->mode_info.atom_context);
3269 /* Initialize scratch registers */
3270 r600_scratch_init(rdev);
3271 /* Initialize surface registers */
3272 radeon_surface_init(rdev);
3273 /* Initialize clocks */
3274 radeon_get_clock_info(rdev->ddev);
3276 r = radeon_fence_driver_init(rdev);
3279 /* initialize AGP */
3280 if (rdev->flags & RADEON_IS_AGP) {
3281 r = radeon_agp_init(rdev);
3283 radeon_agp_disable(rdev);
3285 /* initialize memory controller */
3286 r = evergreen_mc_init(rdev);
3289 /* Memory manager */
3290 r = radeon_bo_init(rdev);
3294 r = radeon_irq_kms_init(rdev);
3298 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3299 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3301 rdev->ih.ring_obj = NULL;
3302 r600_ih_ring_init(rdev, 64 * 1024);
3304 r = r600_pcie_gart_init(rdev);
3308 rdev->accel_working = true;
3309 r = evergreen_startup(rdev);
3311 dev_err(rdev->dev, "disabling GPU acceleration\n");
3313 r600_irq_fini(rdev);
3314 radeon_wb_fini(rdev);
3315 radeon_ib_pool_fini(rdev);
3316 radeon_irq_kms_fini(rdev);
3317 evergreen_pcie_gart_fini(rdev);
3318 rdev->accel_working = false;
3321 /* Don't start up if the MC ucode is missing on BTC parts.
3322 * The default clocks and voltages before the MC ucode
3323 * is loaded are not suffient for advanced operations.
3325 if (ASIC_IS_DCE5(rdev)) {
3326 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
3327 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3335 void evergreen_fini(struct radeon_device *rdev)
3337 r600_audio_fini(rdev);
3338 r600_blit_fini(rdev);
3340 r600_irq_fini(rdev);
3341 radeon_wb_fini(rdev);
3342 radeon_ib_pool_fini(rdev);
3343 radeon_irq_kms_fini(rdev);
3344 evergreen_pcie_gart_fini(rdev);
3345 r600_vram_scratch_fini(rdev);
3346 radeon_gem_fini(rdev);
3347 radeon_fence_driver_fini(rdev);
3348 radeon_agp_fini(rdev);
3349 radeon_bo_fini(rdev);
3350 radeon_atombios_fini(rdev);
3355 void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3357 u32 link_width_cntl, speed_cntl, mask;
3360 if (radeon_pcie_gen2 == 0)
3363 if (rdev->flags & RADEON_IS_IGP)
3366 if (!(rdev->flags & RADEON_IS_PCIE))
3369 /* x2 cards have a special sequence */
3370 if (ASIC_IS_X2(rdev))
3373 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
3377 if (!(mask & DRM_PCIE_SPEED_50))
3380 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3382 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3383 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3384 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3386 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3387 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3388 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3390 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3391 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3392 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3394 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3395 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3396 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3398 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3399 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3400 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3402 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3403 speed_cntl |= LC_GEN2_EN_STRAP;
3404 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3407 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3408 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3410 link_width_cntl |= LC_UPCONFIGURE_DIS;
3412 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3413 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);