Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / dce_v10_0.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "vid.h"
28#include "atom.h"
29#include "amdgpu_atombios.h"
30#include "atombios_crtc.h"
31#include "atombios_encoders.h"
32#include "amdgpu_pll.h"
33#include "amdgpu_connectors.h"
34
35#include "dce/dce_10_0_d.h"
36#include "dce/dce_10_0_sh_mask.h"
37#include "dce/dce_10_0_enum.h"
38#include "oss/oss_3_0_d.h"
39#include "oss/oss_3_0_sh_mask.h"
40#include "gmc/gmc_8_1_d.h"
41#include "gmc/gmc_8_1_sh_mask.h"
42
43static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
44static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
45
46static const u32 crtc_offsets[] =
47{
48 CRTC0_REGISTER_OFFSET,
49 CRTC1_REGISTER_OFFSET,
50 CRTC2_REGISTER_OFFSET,
51 CRTC3_REGISTER_OFFSET,
52 CRTC4_REGISTER_OFFSET,
53 CRTC5_REGISTER_OFFSET,
54 CRTC6_REGISTER_OFFSET
55};
56
57static const u32 hpd_offsets[] =
58{
59 HPD0_REGISTER_OFFSET,
60 HPD1_REGISTER_OFFSET,
61 HPD2_REGISTER_OFFSET,
62 HPD3_REGISTER_OFFSET,
63 HPD4_REGISTER_OFFSET,
64 HPD5_REGISTER_OFFSET
65};
66
67static const uint32_t dig_offsets[] = {
68 DIG0_REGISTER_OFFSET,
69 DIG1_REGISTER_OFFSET,
70 DIG2_REGISTER_OFFSET,
71 DIG3_REGISTER_OFFSET,
72 DIG4_REGISTER_OFFSET,
73 DIG5_REGISTER_OFFSET,
74 DIG6_REGISTER_OFFSET
75};
76
77static const struct {
78 uint32_t reg;
79 uint32_t vblank;
80 uint32_t vline;
81 uint32_t hpd;
82
83} interrupt_status_offsets[] = { {
84 .reg = mmDISP_INTERRUPT_STATUS,
85 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
86 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
87 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
88}, {
89 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
90 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
91 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
92 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
93}, {
94 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
95 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
96 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
97 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
98}, {
99 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
100 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
101 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
102 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
103}, {
104 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
105 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
106 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
107 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
108}, {
109 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
110 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
111 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
112 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
113} };
114
115static const u32 golden_settings_tonga_a11[] =
116{
117 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
118 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
119 mmFBC_MISC, 0x1f311fff, 0x12300000,
120 mmHDMI_CONTROL, 0x31000111, 0x00000011,
121};
122
5732a94f
AD
123static const u32 tonga_mgcg_cgcg_init[] =
124{
125 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
126 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
127};
128
84390860
DZ
129static const u32 golden_settings_fiji_a10[] =
130{
131 mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
132 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
133 mmFBC_MISC, 0x1f311fff, 0x12300000,
134 mmHDMI_CONTROL, 0x31000111, 0x00000011,
135};
136
137static const u32 fiji_mgcg_cgcg_init[] =
138{
139 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
140 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
141};
142
aaa36a97
AD
143static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
144{
145 switch (adev->asic_type) {
84390860
DZ
146 case CHIP_FIJI:
147 amdgpu_program_register_sequence(adev,
148 fiji_mgcg_cgcg_init,
149 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
150 amdgpu_program_register_sequence(adev,
151 golden_settings_fiji_a10,
152 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
153 break;
aaa36a97 154 case CHIP_TONGA:
5732a94f
AD
155 amdgpu_program_register_sequence(adev,
156 tonga_mgcg_cgcg_init,
157 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
aaa36a97
AD
158 amdgpu_program_register_sequence(adev,
159 golden_settings_tonga_a11,
160 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
161 break;
162 default:
163 break;
164 }
165}
166
167static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
168 u32 block_offset, u32 reg)
169{
170 unsigned long flags;
171 u32 r;
172
173 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
174 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
175 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
176 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
177
178 return r;
179}
180
181static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
182 u32 block_offset, u32 reg, u32 v)
183{
184 unsigned long flags;
185
186 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
187 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
188 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
189 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
190}
191
192static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
193{
194 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
195 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
196 return true;
197 else
198 return false;
199}
200
201static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
202{
203 u32 pos1, pos2;
204
205 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
206 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
207
208 if (pos1 != pos2)
209 return true;
210 else
211 return false;
212}
213
214/**
215 * dce_v10_0_vblank_wait - vblank wait asic callback.
216 *
217 * @adev: amdgpu_device pointer
218 * @crtc: crtc to wait for vblank on
219 *
220 * Wait for vblank on the requested crtc (evergreen+).
221 */
222static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
223{
e37e4f05 224 unsigned i = 100;
aaa36a97
AD
225
226 if (crtc >= adev->mode_info.num_crtc)
227 return;
228
229 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
230 return;
231
232 /* depending on when we hit vblank, we may be close to active; if so,
233 * wait for another frame.
234 */
235 while (dce_v10_0_is_in_vblank(adev, crtc)) {
e37e4f05
TSD
236 if (i++ == 100) {
237 i = 0;
aaa36a97
AD
238 if (!dce_v10_0_is_counter_moving(adev, crtc))
239 break;
240 }
241 }
242
243 while (!dce_v10_0_is_in_vblank(adev, crtc)) {
e37e4f05
TSD
244 if (i++ == 100) {
245 i = 0;
aaa36a97
AD
246 if (!dce_v10_0_is_counter_moving(adev, crtc))
247 break;
248 }
249 }
250}
251
252static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
253{
254 if (crtc >= adev->mode_info.num_crtc)
255 return 0;
256 else
257 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
258}
259
f6c7aba4
MD
260static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
261{
262 unsigned i;
263
264 /* Enable pflip interrupts */
265 for (i = 0; i < adev->mode_info.num_crtc; i++)
266 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
267}
268
269static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
270{
271 unsigned i;
272
273 /* Disable pflip interrupts */
274 for (i = 0; i < adev->mode_info.num_crtc; i++)
275 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
276}
277
aaa36a97
AD
278/**
279 * dce_v10_0_page_flip - pageflip callback.
280 *
281 * @adev: amdgpu_device pointer
282 * @crtc_id: crtc to cleanup pageflip on
283 * @crtc_base: new address of the crtc (GPU MC address)
284 *
0eaaacab
AD
285 * Triggers the actual pageflip by updating the primary
286 * surface base address.
aaa36a97
AD
287 */
288static void dce_v10_0_page_flip(struct amdgpu_device *adev,
cb9e59d7 289 int crtc_id, u64 crtc_base, bool async)
aaa36a97
AD
290{
291 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
cb9e59d7 292 u32 tmp;
aaa36a97 293
cb9e59d7
AD
294 /* flip at hsync for async, default is vsync */
295 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
296 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
297 GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
298 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
0eaaacab 299 /* update the primary scanout address */
aaa36a97
AD
300 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
301 upper_32_bits(crtc_base));
0eaaacab 302 /* writing to the low address triggers the update */
aaa36a97
AD
303 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
304 lower_32_bits(crtc_base));
0eaaacab
AD
305 /* post the write */
306 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
aaa36a97
AD
307}
308
309static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
310 u32 *vbl, u32 *position)
311{
312 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
313 return -EINVAL;
314
315 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
316 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
317
318 return 0;
319}
320
321/**
322 * dce_v10_0_hpd_sense - hpd sense callback.
323 *
324 * @adev: amdgpu_device pointer
325 * @hpd: hpd (hotplug detect) pin
326 *
327 * Checks if a digital monitor is connected (evergreen+).
328 * Returns true if connected, false if not connected.
329 */
330static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
331 enum amdgpu_hpd_id hpd)
332{
333 int idx;
334 bool connected = false;
335
336 switch (hpd) {
337 case AMDGPU_HPD_1:
338 idx = 0;
339 break;
340 case AMDGPU_HPD_2:
341 idx = 1;
342 break;
343 case AMDGPU_HPD_3:
344 idx = 2;
345 break;
346 case AMDGPU_HPD_4:
347 idx = 3;
348 break;
349 case AMDGPU_HPD_5:
350 idx = 4;
351 break;
352 case AMDGPU_HPD_6:
353 idx = 5;
354 break;
355 default:
356 return connected;
357 }
358
359 if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
360 DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
361 connected = true;
362
363 return connected;
364}
365
366/**
367 * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
368 *
369 * @adev: amdgpu_device pointer
370 * @hpd: hpd (hotplug detect) pin
371 *
372 * Set the polarity of the hpd pin (evergreen+).
373 */
374static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
375 enum amdgpu_hpd_id hpd)
376{
377 u32 tmp;
378 bool connected = dce_v10_0_hpd_sense(adev, hpd);
379 int idx;
380
381 switch (hpd) {
382 case AMDGPU_HPD_1:
383 idx = 0;
384 break;
385 case AMDGPU_HPD_2:
386 idx = 1;
387 break;
388 case AMDGPU_HPD_3:
389 idx = 2;
390 break;
391 case AMDGPU_HPD_4:
392 idx = 3;
393 break;
394 case AMDGPU_HPD_5:
395 idx = 4;
396 break;
397 case AMDGPU_HPD_6:
398 idx = 5;
399 break;
400 default:
401 return;
402 }
403
404 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
405 if (connected)
406 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
407 else
408 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
409 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
410}
411
412/**
413 * dce_v10_0_hpd_init - hpd setup callback.
414 *
415 * @adev: amdgpu_device pointer
416 *
417 * Setup the hpd pins used by the card (evergreen+).
418 * Enable the pin, set the polarity, and enable the hpd interrupts.
419 */
420static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
421{
422 struct drm_device *dev = adev->ddev;
423 struct drm_connector *connector;
424 u32 tmp;
425 int idx;
426
427 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
428 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
429
430 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
431 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
432 /* don't try to enable hpd on eDP or LVDS avoid breaking the
433 * aux dp channel on imac and help (but not completely fix)
434 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
435 * also avoid interrupt storms during dpms.
436 */
437 continue;
438 }
439
440 switch (amdgpu_connector->hpd.hpd) {
441 case AMDGPU_HPD_1:
442 idx = 0;
443 break;
444 case AMDGPU_HPD_2:
445 idx = 1;
446 break;
447 case AMDGPU_HPD_3:
448 idx = 2;
449 break;
450 case AMDGPU_HPD_4:
451 idx = 3;
452 break;
453 case AMDGPU_HPD_5:
454 idx = 4;
455 break;
456 case AMDGPU_HPD_6:
457 idx = 5;
458 break;
459 default:
460 continue;
461 }
462
463 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
464 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
465 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
466
467 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
468 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
469 DC_HPD_CONNECT_INT_DELAY,
470 AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
471 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
472 DC_HPD_DISCONNECT_INT_DELAY,
473 AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
474 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
475
476 dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
477 amdgpu_irq_get(adev, &adev->hpd_irq,
478 amdgpu_connector->hpd.hpd);
479 }
480}
481
482/**
483 * dce_v10_0_hpd_fini - hpd tear down callback.
484 *
485 * @adev: amdgpu_device pointer
486 *
487 * Tear down the hpd pins used by the card (evergreen+).
488 * Disable the hpd interrupts.
489 */
490static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
491{
492 struct drm_device *dev = adev->ddev;
493 struct drm_connector *connector;
494 u32 tmp;
495 int idx;
496
497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
498 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
499
500 switch (amdgpu_connector->hpd.hpd) {
501 case AMDGPU_HPD_1:
502 idx = 0;
503 break;
504 case AMDGPU_HPD_2:
505 idx = 1;
506 break;
507 case AMDGPU_HPD_3:
508 idx = 2;
509 break;
510 case AMDGPU_HPD_4:
511 idx = 3;
512 break;
513 case AMDGPU_HPD_5:
514 idx = 4;
515 break;
516 case AMDGPU_HPD_6:
517 idx = 5;
518 break;
519 default:
520 continue;
521 }
522
523 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
524 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
525 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
526
527 amdgpu_irq_put(adev, &adev->hpd_irq,
528 amdgpu_connector->hpd.hpd);
529 }
530}
531
532static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
533{
534 return mmDC_GPIO_HPD_A;
535}
536
537static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
538{
539 u32 crtc_hung = 0;
540 u32 crtc_status[6];
541 u32 i, j, tmp;
542
543 for (i = 0; i < adev->mode_info.num_crtc; i++) {
544 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
545 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
546 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
547 crtc_hung |= (1 << i);
548 }
549 }
550
551 for (j = 0; j < 10; j++) {
552 for (i = 0; i < adev->mode_info.num_crtc; i++) {
553 if (crtc_hung & (1 << i)) {
554 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
555 if (tmp != crtc_status[i])
556 crtc_hung &= ~(1 << i);
557 }
558 }
559 if (crtc_hung == 0)
560 return false;
561 udelay(100);
562 }
563
564 return true;
565}
566
567static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
568 struct amdgpu_mode_mc_save *save)
569{
570 u32 crtc_enabled, tmp;
571 int i;
572
573 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
574 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
575
576 /* disable VGA render */
577 tmp = RREG32(mmVGA_RENDER_CONTROL);
578 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
579 WREG32(mmVGA_RENDER_CONTROL, tmp);
580
581 /* blank the display controllers */
582 for (i = 0; i < adev->mode_info.num_crtc; i++) {
583 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
584 CRTC_CONTROL, CRTC_MASTER_EN);
585 if (crtc_enabled) {
586#if 0
587 u32 frame_count;
588 int j;
589
590 save->crtc_enabled[i] = true;
591 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
592 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
593 amdgpu_display_vblank_wait(adev, i);
594 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
595 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
596 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
597 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
598 }
599 /* wait for the next frame */
600 frame_count = amdgpu_display_vblank_get_counter(adev, i);
601 for (j = 0; j < adev->usec_timeout; j++) {
602 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
603 break;
604 udelay(1);
605 }
606 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
607 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
608 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
609 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
610 }
611 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
612 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
613 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
614 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
615 }
616#else
617 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
618 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
619 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
620 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
621 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
622 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
623 save->crtc_enabled[i] = false;
624 /* ***** */
625#endif
626 } else {
627 save->crtc_enabled[i] = false;
628 }
629 }
630}
631
632static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
633 struct amdgpu_mode_mc_save *save)
634{
635 u32 tmp, frame_count;
636 int i, j;
637
638 /* update crtc base addresses */
639 for (i = 0; i < adev->mode_info.num_crtc; i++) {
640 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
641 upper_32_bits(adev->mc.vram_start));
642 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
643 upper_32_bits(adev->mc.vram_start));
644 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
645 (u32)adev->mc.vram_start);
646 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
647 (u32)adev->mc.vram_start);
648
649 if (save->crtc_enabled[i]) {
650 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
3fd4b751
MD
651 if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
652 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
aaa36a97
AD
653 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
654 }
655 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
656 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
657 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
658 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
659 }
660 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
661 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
662 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
663 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
664 }
665 for (j = 0; j < adev->usec_timeout; j++) {
666 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
667 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
668 break;
669 udelay(1);
670 }
671 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
672 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
673 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
674 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
675 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
676 /* wait for the next frame */
677 frame_count = amdgpu_display_vblank_get_counter(adev, i);
678 for (j = 0; j < adev->usec_timeout; j++) {
679 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
680 break;
681 udelay(1);
682 }
683 }
684 }
685
686 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
687 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
688
689 /* Unlock vga access */
690 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
691 mdelay(1);
692 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
693}
694
695static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
696 bool render)
697{
698 u32 tmp;
699
700 /* Lockout access through VGA aperture*/
701 tmp = RREG32(mmVGA_HDP_CONTROL);
702 if (render)
703 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
704 else
705 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
706 WREG32(mmVGA_HDP_CONTROL, tmp);
707
708 /* disable VGA render */
709 tmp = RREG32(mmVGA_RENDER_CONTROL);
710 if (render)
711 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
712 else
713 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
714 WREG32(mmVGA_RENDER_CONTROL, tmp);
715}
716
83c9b025
ED
717static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
718{
719 int num_crtc = 0;
720
721 switch (adev->asic_type) {
722 case CHIP_FIJI:
723 case CHIP_TONGA:
724 num_crtc = 6;
725 break;
726 default:
727 num_crtc = 0;
728 }
729 return num_crtc;
730}
731
732void dce_v10_0_disable_dce(struct amdgpu_device *adev)
733{
734 /*Disable VGA render and enabled crtc, if has DCE engine*/
735 if (amdgpu_atombios_has_dce_engine_info(adev)) {
736 u32 tmp;
737 int crtc_enabled, i;
738
739 dce_v10_0_set_vga_render_state(adev, false);
740
741 /*Disable crtc*/
742 for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
743 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
744 CRTC_CONTROL, CRTC_MASTER_EN);
745 if (crtc_enabled) {
746 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
747 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
748 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
749 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
750 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
751 }
752 }
753 }
754}
755
aaa36a97
AD
756static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
757{
758 struct drm_device *dev = encoder->dev;
759 struct amdgpu_device *adev = dev->dev_private;
760 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
761 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
762 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
763 int bpc = 0;
764 u32 tmp = 0;
765 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
766
767 if (connector) {
768 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
769 bpc = amdgpu_connector_get_monitor_bpc(connector);
770 dither = amdgpu_connector->dither;
771 }
772
773 /* LVDS/eDP FMT is set up by atom */
774 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
775 return;
776
777 /* not needed for analog */
778 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
779 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
780 return;
781
782 if (bpc == 0)
783 return;
784
785 switch (bpc) {
786 case 6:
787 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
788 /* XXX sort out optimal dither settings */
789 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
790 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
791 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
792 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
793 } else {
794 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
795 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
796 }
797 break;
798 case 8:
799 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
800 /* XXX sort out optimal dither settings */
801 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
802 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
803 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
804 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
805 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
806 } else {
807 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
808 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
809 }
810 break;
811 case 10:
812 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
813 /* XXX sort out optimal dither settings */
814 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
815 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
816 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
817 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
818 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
819 } else {
820 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
821 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
822 }
823 break;
824 default:
825 /* not needed */
826 break;
827 }
828
829 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
830}
831
832
833/* display watermark setup */
834/**
835 * dce_v10_0_line_buffer_adjust - Set up the line buffer
836 *
837 * @adev: amdgpu_device pointer
838 * @amdgpu_crtc: the selected display controller
839 * @mode: the current display mode on the selected display
840 * controller
841 *
842 * Setup up the line buffer allocation for
843 * the selected display controller (CIK).
844 * Returns the line buffer size in pixels.
845 */
846static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
847 struct amdgpu_crtc *amdgpu_crtc,
848 struct drm_display_mode *mode)
849{
850 u32 tmp, buffer_alloc, i, mem_cfg;
851 u32 pipe_offset = amdgpu_crtc->crtc_id;
852 /*
853 * Line Buffer Setup
854 * There are 6 line buffers, one for each display controllers.
855 * There are 3 partitions per LB. Select the number of partitions
856 * to enable based on the display width. For display widths larger
857 * than 4096, you need use to use 2 display controllers and combine
858 * them using the stereo blender.
859 */
860 if (amdgpu_crtc->base.enabled && mode) {
861 if (mode->crtc_hdisplay < 1920) {
862 mem_cfg = 1;
863 buffer_alloc = 2;
864 } else if (mode->crtc_hdisplay < 2560) {
865 mem_cfg = 2;
866 buffer_alloc = 2;
867 } else if (mode->crtc_hdisplay < 4096) {
868 mem_cfg = 0;
2f7d10b3 869 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
aaa36a97
AD
870 } else {
871 DRM_DEBUG_KMS("Mode too big for LB!\n");
872 mem_cfg = 0;
2f7d10b3 873 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
aaa36a97
AD
874 }
875 } else {
876 mem_cfg = 1;
877 buffer_alloc = 0;
878 }
879
880 tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
881 tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
882 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
883
884 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
885 tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
886 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
887
888 for (i = 0; i < adev->usec_timeout; i++) {
889 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
890 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
891 break;
892 udelay(1);
893 }
894
895 if (amdgpu_crtc->base.enabled && mode) {
896 switch (mem_cfg) {
897 case 0:
898 default:
899 return 4096 * 2;
900 case 1:
901 return 1920 * 2;
902 case 2:
903 return 2560 * 2;
904 }
905 }
906
907 /* controller not enabled, so no lb used */
908 return 0;
909}
910
911/**
912 * cik_get_number_of_dram_channels - get the number of dram channels
913 *
914 * @adev: amdgpu_device pointer
915 *
916 * Look up the number of video ram channels (CIK).
917 * Used for display watermark bandwidth calculations
918 * Returns the number of dram channels
919 */
920static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
921{
922 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
923
924 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
925 case 0:
926 default:
927 return 1;
928 case 1:
929 return 2;
930 case 2:
931 return 4;
932 case 3:
933 return 8;
934 case 4:
935 return 3;
936 case 5:
937 return 6;
938 case 6:
939 return 10;
940 case 7:
941 return 12;
942 case 8:
943 return 16;
944 }
945}
946
947struct dce10_wm_params {
948 u32 dram_channels; /* number of dram channels */
949 u32 yclk; /* bandwidth per dram data pin in kHz */
950 u32 sclk; /* engine clock in kHz */
951 u32 disp_clk; /* display clock in kHz */
952 u32 src_width; /* viewport width */
953 u32 active_time; /* active display time in ns */
954 u32 blank_time; /* blank time in ns */
955 bool interlaced; /* mode is interlaced */
956 fixed20_12 vsc; /* vertical scale ratio */
957 u32 num_heads; /* number of active crtcs */
958 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
959 u32 lb_size; /* line buffer allocated to pipe */
960 u32 vtaps; /* vertical scaler taps */
961};
962
963/**
964 * dce_v10_0_dram_bandwidth - get the dram bandwidth
965 *
966 * @wm: watermark calculation data
967 *
968 * Calculate the raw dram bandwidth (CIK).
969 * Used for display watermark bandwidth calculations
970 * Returns the dram bandwidth in MBytes/s
971 */
972static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
973{
974 /* Calculate raw DRAM Bandwidth */
975 fixed20_12 dram_efficiency; /* 0.7 */
976 fixed20_12 yclk, dram_channels, bandwidth;
977 fixed20_12 a;
978
979 a.full = dfixed_const(1000);
980 yclk.full = dfixed_const(wm->yclk);
981 yclk.full = dfixed_div(yclk, a);
982 dram_channels.full = dfixed_const(wm->dram_channels * 4);
983 a.full = dfixed_const(10);
984 dram_efficiency.full = dfixed_const(7);
985 dram_efficiency.full = dfixed_div(dram_efficiency, a);
986 bandwidth.full = dfixed_mul(dram_channels, yclk);
987 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
988
989 return dfixed_trunc(bandwidth);
990}
991
992/**
993 * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
994 *
995 * @wm: watermark calculation data
996 *
997 * Calculate the dram bandwidth used for display (CIK).
998 * Used for display watermark bandwidth calculations
999 * Returns the dram bandwidth for display in MBytes/s
1000 */
1001static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
1002{
1003 /* Calculate DRAM Bandwidth and the part allocated to display. */
1004 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
1005 fixed20_12 yclk, dram_channels, bandwidth;
1006 fixed20_12 a;
1007
1008 a.full = dfixed_const(1000);
1009 yclk.full = dfixed_const(wm->yclk);
1010 yclk.full = dfixed_div(yclk, a);
1011 dram_channels.full = dfixed_const(wm->dram_channels * 4);
1012 a.full = dfixed_const(10);
1013 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
1014 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
1015 bandwidth.full = dfixed_mul(dram_channels, yclk);
1016 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
1017
1018 return dfixed_trunc(bandwidth);
1019}
1020
1021/**
1022 * dce_v10_0_data_return_bandwidth - get the data return bandwidth
1023 *
1024 * @wm: watermark calculation data
1025 *
1026 * Calculate the data return bandwidth used for display (CIK).
1027 * Used for display watermark bandwidth calculations
1028 * Returns the data return bandwidth in MBytes/s
1029 */
1030static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
1031{
1032 /* Calculate the display Data return Bandwidth */
1033 fixed20_12 return_efficiency; /* 0.8 */
1034 fixed20_12 sclk, bandwidth;
1035 fixed20_12 a;
1036
1037 a.full = dfixed_const(1000);
1038 sclk.full = dfixed_const(wm->sclk);
1039 sclk.full = dfixed_div(sclk, a);
1040 a.full = dfixed_const(10);
1041 return_efficiency.full = dfixed_const(8);
1042 return_efficiency.full = dfixed_div(return_efficiency, a);
1043 a.full = dfixed_const(32);
1044 bandwidth.full = dfixed_mul(a, sclk);
1045 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
1046
1047 return dfixed_trunc(bandwidth);
1048}
1049
1050/**
1051 * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
1052 *
1053 * @wm: watermark calculation data
1054 *
1055 * Calculate the dmif bandwidth used for display (CIK).
1056 * Used for display watermark bandwidth calculations
1057 * Returns the dmif bandwidth in MBytes/s
1058 */
1059static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
1060{
1061 /* Calculate the DMIF Request Bandwidth */
1062 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
1063 fixed20_12 disp_clk, bandwidth;
1064 fixed20_12 a, b;
1065
1066 a.full = dfixed_const(1000);
1067 disp_clk.full = dfixed_const(wm->disp_clk);
1068 disp_clk.full = dfixed_div(disp_clk, a);
1069 a.full = dfixed_const(32);
1070 b.full = dfixed_mul(a, disp_clk);
1071
1072 a.full = dfixed_const(10);
1073 disp_clk_request_efficiency.full = dfixed_const(8);
1074 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
1075
1076 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
1077
1078 return dfixed_trunc(bandwidth);
1079}
1080
1081/**
1082 * dce_v10_0_available_bandwidth - get the min available bandwidth
1083 *
1084 * @wm: watermark calculation data
1085 *
1086 * Calculate the min available bandwidth used for display (CIK).
1087 * Used for display watermark bandwidth calculations
1088 * Returns the min available bandwidth in MBytes/s
1089 */
1090static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
1091{
1092 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1093 u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
1094 u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
1095 u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
1096
1097 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1098}
1099
1100/**
1101 * dce_v10_0_average_bandwidth - get the average available bandwidth
1102 *
1103 * @wm: watermark calculation data
1104 *
1105 * Calculate the average available bandwidth used for display (CIK).
1106 * Used for display watermark bandwidth calculations
1107 * Returns the average available bandwidth in MBytes/s
1108 */
1109static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
1110{
1111 /* Calculate the display mode Average Bandwidth
1112 * DisplayMode should contain the source and destination dimensions,
1113 * timing, etc.
1114 */
1115 fixed20_12 bpp;
1116 fixed20_12 line_time;
1117 fixed20_12 src_width;
1118 fixed20_12 bandwidth;
1119 fixed20_12 a;
1120
1121 a.full = dfixed_const(1000);
1122 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1123 line_time.full = dfixed_div(line_time, a);
1124 bpp.full = dfixed_const(wm->bytes_per_pixel);
1125 src_width.full = dfixed_const(wm->src_width);
1126 bandwidth.full = dfixed_mul(src_width, bpp);
1127 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1128 bandwidth.full = dfixed_div(bandwidth, line_time);
1129
1130 return dfixed_trunc(bandwidth);
1131}
1132
1133/**
1134 * dce_v10_0_latency_watermark - get the latency watermark
1135 *
1136 * @wm: watermark calculation data
1137 *
1138 * Calculate the latency watermark (CIK).
1139 * Used for display watermark bandwidth calculations
1140 * Returns the latency watermark in ns
1141 */
1142static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
1143{
1144 /* First calculate the latency in ns */
1145 u32 mc_latency = 2000; /* 2000 ns. */
1146 u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
1147 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1148 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1149 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1150 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1151 (wm->num_heads * cursor_line_pair_return_time);
1152 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1153 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1154 u32 tmp, dmif_size = 12288;
1155 fixed20_12 a, b, c;
1156
1157 if (wm->num_heads == 0)
1158 return 0;
1159
1160 a.full = dfixed_const(2);
1161 b.full = dfixed_const(1);
1162 if ((wm->vsc.full > a.full) ||
1163 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1164 (wm->vtaps >= 5) ||
1165 ((wm->vsc.full >= a.full) && wm->interlaced))
1166 max_src_lines_per_dst_line = 4;
1167 else
1168 max_src_lines_per_dst_line = 2;
1169
1170 a.full = dfixed_const(available_bandwidth);
1171 b.full = dfixed_const(wm->num_heads);
1172 a.full = dfixed_div(a, b);
1173
1174 b.full = dfixed_const(mc_latency + 512);
1175 c.full = dfixed_const(wm->disp_clk);
1176 b.full = dfixed_div(b, c);
1177
1178 c.full = dfixed_const(dmif_size);
1179 b.full = dfixed_div(c, b);
1180
1181 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1182
1183 b.full = dfixed_const(1000);
1184 c.full = dfixed_const(wm->disp_clk);
1185 b.full = dfixed_div(c, b);
1186 c.full = dfixed_const(wm->bytes_per_pixel);
1187 b.full = dfixed_mul(b, c);
1188
1189 lb_fill_bw = min(tmp, dfixed_trunc(b));
1190
1191 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1192 b.full = dfixed_const(1000);
1193 c.full = dfixed_const(lb_fill_bw);
1194 b.full = dfixed_div(c, b);
1195 a.full = dfixed_div(a, b);
1196 line_fill_time = dfixed_trunc(a);
1197
1198 if (line_fill_time < wm->active_time)
1199 return latency;
1200 else
1201 return latency + (line_fill_time - wm->active_time);
1202
1203}
1204
1205/**
1206 * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1207 * average and available dram bandwidth
1208 *
1209 * @wm: watermark calculation data
1210 *
1211 * Check if the display average bandwidth fits in the display
1212 * dram bandwidth (CIK).
1213 * Used for display watermark bandwidth calculations
1214 * Returns true if the display fits, false if not.
1215 */
1216static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
1217{
1218 if (dce_v10_0_average_bandwidth(wm) <=
1219 (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1220 return true;
1221 else
1222 return false;
1223}
1224
1225/**
1226 * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
1227 * average and available bandwidth
1228 *
1229 * @wm: watermark calculation data
1230 *
1231 * Check if the display average bandwidth fits in the display
1232 * available bandwidth (CIK).
1233 * Used for display watermark bandwidth calculations
1234 * Returns true if the display fits, false if not.
1235 */
1236static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
1237{
1238 if (dce_v10_0_average_bandwidth(wm) <=
1239 (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
1240 return true;
1241 else
1242 return false;
1243}
1244
1245/**
1246 * dce_v10_0_check_latency_hiding - check latency hiding
1247 *
1248 * @wm: watermark calculation data
1249 *
1250 * Check latency hiding (CIK).
1251 * Used for display watermark bandwidth calculations
1252 * Returns true if the display fits, false if not.
1253 */
1254static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
1255{
1256 u32 lb_partitions = wm->lb_size / wm->src_width;
1257 u32 line_time = wm->active_time + wm->blank_time;
1258 u32 latency_tolerant_lines;
1259 u32 latency_hiding;
1260 fixed20_12 a;
1261
1262 a.full = dfixed_const(1);
1263 if (wm->vsc.full > a.full)
1264 latency_tolerant_lines = 1;
1265 else {
1266 if (lb_partitions <= (wm->vtaps + 1))
1267 latency_tolerant_lines = 1;
1268 else
1269 latency_tolerant_lines = 2;
1270 }
1271
1272 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1273
1274 if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
1275 return true;
1276 else
1277 return false;
1278}
1279
1280/**
1281 * dce_v10_0_program_watermarks - program display watermarks
1282 *
1283 * @adev: amdgpu_device pointer
1284 * @amdgpu_crtc: the selected display controller
1285 * @lb_size: line buffer size
1286 * @num_heads: number of display controllers in use
1287 *
1288 * Calculate and program the display watermarks for the
1289 * selected display controller (CIK).
1290 */
1291static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1292 struct amdgpu_crtc *amdgpu_crtc,
1293 u32 lb_size, u32 num_heads)
1294{
1295 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1296 struct dce10_wm_params wm_low, wm_high;
1297 u32 pixel_period;
1298 u32 line_time = 0;
1299 u32 latency_watermark_a = 0, latency_watermark_b = 0;
8e36f9d3 1300 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
aaa36a97
AD
1301
1302 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1303 pixel_period = 1000000 / (u32)mode->clock;
1304 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1305
1306 /* watermark for high clocks */
1307 if (adev->pm.dpm_enabled) {
1308 wm_high.yclk =
1309 amdgpu_dpm_get_mclk(adev, false) * 10;
1310 wm_high.sclk =
1311 amdgpu_dpm_get_sclk(adev, false) * 10;
1312 } else {
1313 wm_high.yclk = adev->pm.current_mclk * 10;
1314 wm_high.sclk = adev->pm.current_sclk * 10;
1315 }
1316
1317 wm_high.disp_clk = mode->clock;
1318 wm_high.src_width = mode->crtc_hdisplay;
1319 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1320 wm_high.blank_time = line_time - wm_high.active_time;
1321 wm_high.interlaced = false;
1322 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1323 wm_high.interlaced = true;
1324 wm_high.vsc = amdgpu_crtc->vsc;
1325 wm_high.vtaps = 1;
1326 if (amdgpu_crtc->rmx_type != RMX_OFF)
1327 wm_high.vtaps = 2;
1328 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1329 wm_high.lb_size = lb_size;
1330 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1331 wm_high.num_heads = num_heads;
1332
1333 /* set for high clocks */
1334 latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
1335
1336 /* possibly force display priority to high */
1337 /* should really do this at mode validation time... */
1338 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1339 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1340 !dce_v10_0_check_latency_hiding(&wm_high) ||
1341 (adev->mode_info.disp_priority == 2)) {
1342 DRM_DEBUG_KMS("force priority to high\n");
1343 }
1344
1345 /* watermark for low clocks */
1346 if (adev->pm.dpm_enabled) {
1347 wm_low.yclk =
1348 amdgpu_dpm_get_mclk(adev, true) * 10;
1349 wm_low.sclk =
1350 amdgpu_dpm_get_sclk(adev, true) * 10;
1351 } else {
1352 wm_low.yclk = adev->pm.current_mclk * 10;
1353 wm_low.sclk = adev->pm.current_sclk * 10;
1354 }
1355
1356 wm_low.disp_clk = mode->clock;
1357 wm_low.src_width = mode->crtc_hdisplay;
1358 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1359 wm_low.blank_time = line_time - wm_low.active_time;
1360 wm_low.interlaced = false;
1361 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1362 wm_low.interlaced = true;
1363 wm_low.vsc = amdgpu_crtc->vsc;
1364 wm_low.vtaps = 1;
1365 if (amdgpu_crtc->rmx_type != RMX_OFF)
1366 wm_low.vtaps = 2;
1367 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1368 wm_low.lb_size = lb_size;
1369 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1370 wm_low.num_heads = num_heads;
1371
1372 /* set for low clocks */
1373 latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
1374
1375 /* possibly force display priority to high */
1376 /* should really do this at mode validation time... */
1377 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1378 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1379 !dce_v10_0_check_latency_hiding(&wm_low) ||
1380 (adev->mode_info.disp_priority == 2)) {
1381 DRM_DEBUG_KMS("force priority to high\n");
1382 }
8e36f9d3 1383 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
aaa36a97
AD
1384 }
1385
1386 /* select wm A */
1387 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1388 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1389 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1390 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1391 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1392 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1393 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1394 /* select wm B */
1395 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1396 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1397 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
be9fd2e9 1398 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
aaa36a97
AD
1399 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1400 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1401 /* restore original selection */
1402 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1403
1404 /* save values for DPM */
1405 amdgpu_crtc->line_time = line_time;
1406 amdgpu_crtc->wm_high = latency_watermark_a;
1407 amdgpu_crtc->wm_low = latency_watermark_b;
8e36f9d3
AD
1408 /* Save number of lines the linebuffer leads before the scanout */
1409 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
aaa36a97
AD
1410}
1411
1412/**
1413 * dce_v10_0_bandwidth_update - program display watermarks
1414 *
1415 * @adev: amdgpu_device pointer
1416 *
1417 * Calculate and program the display watermarks and line
1418 * buffer allocation (CIK).
1419 */
1420static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1421{
1422 struct drm_display_mode *mode = NULL;
1423 u32 num_heads = 0, lb_size;
1424 int i;
1425
1426 amdgpu_update_display_priority(adev);
1427
1428 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1429 if (adev->mode_info.crtcs[i]->base.enabled)
1430 num_heads++;
1431 }
1432 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1433 mode = &adev->mode_info.crtcs[i]->base.mode;
1434 lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1435 dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1436 lb_size, num_heads);
1437 }
1438}
1439
1440static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1441{
1442 int i;
1443 u32 offset, tmp;
1444
1445 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1446 offset = adev->mode_info.audio.pin[i].offset;
1447 tmp = RREG32_AUDIO_ENDPT(offset,
1448 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1449 if (((tmp &
1450 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1451 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1452 adev->mode_info.audio.pin[i].connected = false;
1453 else
1454 adev->mode_info.audio.pin[i].connected = true;
1455 }
1456}
1457
1458static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1459{
1460 int i;
1461
1462 dce_v10_0_audio_get_connected_pins(adev);
1463
1464 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1465 if (adev->mode_info.audio.pin[i].connected)
1466 return &adev->mode_info.audio.pin[i];
1467 }
1468 DRM_ERROR("No connected audio pins found!\n");
1469 return NULL;
1470}
1471
1472static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1473{
1474 struct amdgpu_device *adev = encoder->dev->dev_private;
1475 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1476 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1477 u32 tmp;
1478
1479 if (!dig || !dig->afmt || !dig->afmt->pin)
1480 return;
1481
1482 tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1483 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1484 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1485}
1486
1487static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1488 struct drm_display_mode *mode)
1489{
1490 struct amdgpu_device *adev = encoder->dev->dev_private;
1491 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1492 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1493 struct drm_connector *connector;
1494 struct amdgpu_connector *amdgpu_connector = NULL;
1495 u32 tmp;
1496 int interlace = 0;
1497
1498 if (!dig || !dig->afmt || !dig->afmt->pin)
1499 return;
1500
1501 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1502 if (connector->encoder == encoder) {
1503 amdgpu_connector = to_amdgpu_connector(connector);
1504 break;
1505 }
1506 }
1507
1508 if (!amdgpu_connector) {
1509 DRM_ERROR("Couldn't find encoder's connector\n");
1510 return;
1511 }
1512
1513 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1514 interlace = 1;
1515 if (connector->latency_present[interlace]) {
1516 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1517 VIDEO_LIPSYNC, connector->video_latency[interlace]);
1518 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1519 AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1520 } else {
1521 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1522 VIDEO_LIPSYNC, 0);
1523 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1524 AUDIO_LIPSYNC, 0);
1525 }
1526 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1527 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1528}
1529
1530static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1531{
1532 struct amdgpu_device *adev = encoder->dev->dev_private;
1533 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1534 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1535 struct drm_connector *connector;
1536 struct amdgpu_connector *amdgpu_connector = NULL;
1537 u32 tmp;
1538 u8 *sadb = NULL;
1539 int sad_count;
1540
1541 if (!dig || !dig->afmt || !dig->afmt->pin)
1542 return;
1543
1544 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1545 if (connector->encoder == encoder) {
1546 amdgpu_connector = to_amdgpu_connector(connector);
1547 break;
1548 }
1549 }
1550
1551 if (!amdgpu_connector) {
1552 DRM_ERROR("Couldn't find encoder's connector\n");
1553 return;
1554 }
1555
1556 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1557 if (sad_count < 0) {
1558 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1559 sad_count = 0;
1560 }
1561
1562 /* program the speaker allocation */
1563 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1564 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1565 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1566 DP_CONNECTION, 0);
1567 /* set HDMI mode */
1568 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1569 HDMI_CONNECTION, 1);
1570 if (sad_count)
1571 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1572 SPEAKER_ALLOCATION, sadb[0]);
1573 else
1574 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1575 SPEAKER_ALLOCATION, 5); /* stereo */
1576 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1577 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1578
1579 kfree(sadb);
1580}
1581
1582static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1583{
1584 struct amdgpu_device *adev = encoder->dev->dev_private;
1585 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1586 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1587 struct drm_connector *connector;
1588 struct amdgpu_connector *amdgpu_connector = NULL;
1589 struct cea_sad *sads;
1590 int i, sad_count;
1591
1592 static const u16 eld_reg_to_type[][2] = {
1593 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1594 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1595 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1596 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1597 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1598 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1599 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1600 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1601 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1602 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1603 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1604 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1605 };
1606
1607 if (!dig || !dig->afmt || !dig->afmt->pin)
1608 return;
1609
1610 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1611 if (connector->encoder == encoder) {
1612 amdgpu_connector = to_amdgpu_connector(connector);
1613 break;
1614 }
1615 }
1616
1617 if (!amdgpu_connector) {
1618 DRM_ERROR("Couldn't find encoder's connector\n");
1619 return;
1620 }
1621
1622 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1623 if (sad_count <= 0) {
1624 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1625 return;
1626 }
1627 BUG_ON(!sads);
1628
1629 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1630 u32 tmp = 0;
1631 u8 stereo_freqs = 0;
1632 int max_channels = -1;
1633 int j;
1634
1635 for (j = 0; j < sad_count; j++) {
1636 struct cea_sad *sad = &sads[j];
1637
1638 if (sad->format == eld_reg_to_type[i][1]) {
1639 if (sad->channels > max_channels) {
1640 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1641 MAX_CHANNELS, sad->channels);
1642 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1643 DESCRIPTOR_BYTE_2, sad->byte2);
1644 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1645 SUPPORTED_FREQUENCIES, sad->freq);
1646 max_channels = sad->channels;
1647 }
1648
1649 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1650 stereo_freqs |= sad->freq;
1651 else
1652 break;
1653 }
1654 }
1655
1656 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1657 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1658 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1659 }
1660
1661 kfree(sads);
1662}
1663
1664static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1665 struct amdgpu_audio_pin *pin,
1666 bool enable)
1667{
1668 if (!pin)
1669 return;
1670
1671 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1672 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1673}
1674
1675static const u32 pin_offsets[] =
1676{
1677 AUD0_REGISTER_OFFSET,
1678 AUD1_REGISTER_OFFSET,
1679 AUD2_REGISTER_OFFSET,
1680 AUD3_REGISTER_OFFSET,
1681 AUD4_REGISTER_OFFSET,
1682 AUD5_REGISTER_OFFSET,
1683 AUD6_REGISTER_OFFSET,
1684};
1685
1686static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1687{
1688 int i;
1689
1690 if (!amdgpu_audio)
1691 return 0;
1692
1693 adev->mode_info.audio.enabled = true;
1694
1695 adev->mode_info.audio.num_pins = 7;
1696
1697 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1698 adev->mode_info.audio.pin[i].channels = -1;
1699 adev->mode_info.audio.pin[i].rate = -1;
1700 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1701 adev->mode_info.audio.pin[i].status_bits = 0;
1702 adev->mode_info.audio.pin[i].category_code = 0;
1703 adev->mode_info.audio.pin[i].connected = false;
1704 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1705 adev->mode_info.audio.pin[i].id = i;
1706 /* disable audio. it will be set up later */
1707 /* XXX remove once we switch to ip funcs */
1708 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1709 }
1710
1711 return 0;
1712}
1713
1714static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1715{
1716 int i;
1717
441ce96f
TSD
1718 if (!amdgpu_audio)
1719 return;
1720
aaa36a97
AD
1721 if (!adev->mode_info.audio.enabled)
1722 return;
1723
1724 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1725 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1726
1727 adev->mode_info.audio.enabled = false;
1728}
1729
1730/*
1731 * update the N and CTS parameters for a given pixel clock rate
1732 */
1733static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1734{
1735 struct drm_device *dev = encoder->dev;
1736 struct amdgpu_device *adev = dev->dev_private;
1737 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1738 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1739 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1740 u32 tmp;
1741
1742 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1743 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1744 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1745 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1746 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1747 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1748
1749 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1750 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1751 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1752 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1753 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1754 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1755
1756 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1757 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1758 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1759 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1760 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1761 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1762
1763}
1764
1765/*
1766 * build a HDMI Video Info Frame
1767 */
1768static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1769 void *buffer, size_t size)
1770{
1771 struct drm_device *dev = encoder->dev;
1772 struct amdgpu_device *adev = dev->dev_private;
1773 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1774 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1775 uint8_t *frame = buffer + 3;
1776 uint8_t *header = buffer;
1777
1778 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1779 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1780 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1781 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1782 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1783 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1784 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1785 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1786}
1787
1788static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1789{
1790 struct drm_device *dev = encoder->dev;
1791 struct amdgpu_device *adev = dev->dev_private;
1792 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1793 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1794 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1795 u32 dto_phase = 24 * 1000;
1796 u32 dto_modulo = clock;
1797 u32 tmp;
1798
1799 if (!dig || !dig->afmt)
1800 return;
1801
1802 /* XXX two dtos; generally use dto0 for hdmi */
1803 /* Express [24MHz / target pixel clock] as an exact rational
1804 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1805 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1806 */
1807 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1808 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1809 amdgpu_crtc->crtc_id);
1810 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1811 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1812 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1813}
1814
1815/*
1816 * update the info frames with the data from the current display mode
1817 */
1818static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1819 struct drm_display_mode *mode)
1820{
1821 struct drm_device *dev = encoder->dev;
1822 struct amdgpu_device *adev = dev->dev_private;
1823 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1824 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1825 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1826 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1827 struct hdmi_avi_infoframe frame;
1828 ssize_t err;
1829 u32 tmp;
1830 int bpc = 8;
1831
1832 if (!dig || !dig->afmt)
1833 return;
1834
1835 /* Silent, r600_hdmi_enable will raise WARN for us */
1836 if (!dig->afmt->enabled)
1837 return;
1838
1839 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1840 if (encoder->crtc) {
1841 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1842 bpc = amdgpu_crtc->bpc;
1843 }
1844
1845 /* disable audio prior to setting up hw */
1846 dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1847 dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1848
1849 dce_v10_0_audio_set_dto(encoder, mode->clock);
1850
1851 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1852 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1853 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1854
1855 WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1856
1857 tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1858 switch (bpc) {
1859 case 0:
1860 case 6:
1861 case 8:
1862 case 16:
1863 default:
1864 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1865 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1866 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1867 connector->name, bpc);
1868 break;
1869 case 10:
1870 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1871 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1872 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1873 connector->name);
1874 break;
1875 case 12:
1876 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1877 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1878 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1879 connector->name);
1880 break;
1881 }
1882 WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1883
1884 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1885 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1886 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1887 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1888 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1889
1890 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1891 /* enable audio info frames (frames won't be set until audio is enabled) */
1892 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1893 /* required for audio info values to be updated */
1894 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1895 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1896
1897 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1898 /* required for audio info values to be updated */
1899 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1900 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1901
1902 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1903 /* anything other than 0 */
1904 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1905 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1906
1907 WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1908
1909 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1910 /* set the default audio delay */
1911 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1912 /* should be suffient for all audio modes and small enough for all hblanks */
1913 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1914 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1915
1916 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1917 /* allow 60958 channel status fields to be updated */
1918 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1919 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1920
1921 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1922 if (bpc > 8)
1923 /* clear SW CTS value */
1924 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1925 else
1926 /* select SW CTS value */
1927 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1928 /* allow hw to sent ACR packets when required */
1929 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1930 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1931
1932 dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1933
1934 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1935 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1936 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1937
1938 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1939 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1940 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1941
1942 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1943 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1944 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1945 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1946 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1947 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1948 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1949 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1950
1951 dce_v10_0_audio_write_speaker_allocation(encoder);
1952
1953 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1954 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1955
1956 dce_v10_0_afmt_audio_select_pin(encoder);
1957 dce_v10_0_audio_write_sad_regs(encoder);
1958 dce_v10_0_audio_write_latency_fields(encoder, mode);
1959
1960 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1961 if (err < 0) {
1962 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1963 return;
1964 }
1965
1966 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1967 if (err < 0) {
1968 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1969 return;
1970 }
1971
1972 dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1973
1974 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1975 /* enable AVI info frames */
1976 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1977 /* required for audio info values to be updated */
1978 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1979 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1980
1981 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1982 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1983 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1984
1985 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1986 /* send audio packets */
1987 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1988 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1989
1990 WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1991 WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1992 WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1993 WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1994
1995 /* enable audio after to setting up hw */
1996 dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1997}
1998
1999static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
2000{
2001 struct drm_device *dev = encoder->dev;
2002 struct amdgpu_device *adev = dev->dev_private;
2003 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2004 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2005
2006 if (!dig || !dig->afmt)
2007 return;
2008
2009 /* Silent, r600_hdmi_enable will raise WARN for us */
2010 if (enable && dig->afmt->enabled)
2011 return;
2012 if (!enable && !dig->afmt->enabled)
2013 return;
2014
2015 if (!enable && dig->afmt->pin) {
2016 dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
2017 dig->afmt->pin = NULL;
2018 }
2019
2020 dig->afmt->enabled = enable;
2021
2022 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
2023 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
2024}
2025
720a6ce3 2026static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
aaa36a97
AD
2027{
2028 int i;
2029
2030 for (i = 0; i < adev->mode_info.num_dig; i++)
2031 adev->mode_info.afmt[i] = NULL;
2032
2033 /* DCE10 has audio blocks tied to DIG encoders */
2034 for (i = 0; i < adev->mode_info.num_dig; i++) {
2035 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
2036 if (adev->mode_info.afmt[i]) {
2037 adev->mode_info.afmt[i]->offset = dig_offsets[i];
2038 adev->mode_info.afmt[i]->id = i;
720a6ce3
TSD
2039 } else {
2040 int j;
2041 for (j = 0; j < i; j++) {
2042 kfree(adev->mode_info.afmt[j]);
2043 adev->mode_info.afmt[j] = NULL;
2044 }
2045 return -ENOMEM;
aaa36a97
AD
2046 }
2047 }
720a6ce3 2048 return 0;
aaa36a97
AD
2049}
2050
2051static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
2052{
2053 int i;
2054
2055 for (i = 0; i < adev->mode_info.num_dig; i++) {
2056 kfree(adev->mode_info.afmt[i]);
2057 adev->mode_info.afmt[i] = NULL;
2058 }
2059}
2060
2061static const u32 vga_control_regs[6] =
2062{
2063 mmD1VGA_CONTROL,
2064 mmD2VGA_CONTROL,
2065 mmD3VGA_CONTROL,
2066 mmD4VGA_CONTROL,
2067 mmD5VGA_CONTROL,
2068 mmD6VGA_CONTROL,
2069};
2070
2071static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
2072{
2073 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2074 struct drm_device *dev = crtc->dev;
2075 struct amdgpu_device *adev = dev->dev_private;
2076 u32 vga_control;
2077
2078 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
2079 if (enable)
2080 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
2081 else
2082 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
2083}
2084
2085static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
2086{
2087 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2088 struct drm_device *dev = crtc->dev;
2089 struct amdgpu_device *adev = dev->dev_private;
2090
2091 if (enable)
2092 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
2093 else
2094 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
2095}
2096
aaa36a97
AD
2097static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
2098 struct drm_framebuffer *fb,
2099 int x, int y, int atomic)
2100{
2101 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2102 struct drm_device *dev = crtc->dev;
2103 struct amdgpu_device *adev = dev->dev_private;
2104 struct amdgpu_framebuffer *amdgpu_fb;
2105 struct drm_framebuffer *target_fb;
2106 struct drm_gem_object *obj;
2107 struct amdgpu_bo *rbo;
2108 uint64_t fb_location, tiling_flags;
2109 uint32_t fb_format, fb_pitch_pixels;
aaa36a97 2110 u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
fbd76d59 2111 u32 pipe_config;
aaa36a97
AD
2112 u32 tmp, viewport_w, viewport_h;
2113 int r;
2114 bool bypass_lut = false;
d3828147 2115 char *format_name;
aaa36a97
AD
2116
2117 /* no fb bound */
2118 if (!atomic && !crtc->primary->fb) {
2119 DRM_DEBUG_KMS("No FB bound\n");
2120 return 0;
2121 }
2122
2123 if (atomic) {
2124 amdgpu_fb = to_amdgpu_framebuffer(fb);
2125 target_fb = fb;
849dc32b 2126 } else {
aaa36a97
AD
2127 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2128 target_fb = crtc->primary->fb;
2129 }
2130
2131 /* If atomic, assume fb object is pinned & idle & fenced and
2132 * just update base pointers
2133 */
2134 obj = amdgpu_fb->obj;
2135 rbo = gem_to_amdgpu_bo(obj);
2136 r = amdgpu_bo_reserve(rbo, false);
2137 if (unlikely(r != 0))
2138 return r;
2139
849dc32b 2140 if (atomic) {
aaa36a97 2141 fb_location = amdgpu_bo_gpu_offset(rbo);
849dc32b 2142 } else {
aaa36a97
AD
2143 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2144 if (unlikely(r != 0)) {
2145 amdgpu_bo_unreserve(rbo);
2146 return -EINVAL;
2147 }
2148 }
2149
2150 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2151 amdgpu_bo_unreserve(rbo);
2152
fbd76d59
MO
2153 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2154
aaa36a97
AD
2155 switch (target_fb->pixel_format) {
2156 case DRM_FORMAT_C8:
2157 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
2158 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2159 break;
2160 case DRM_FORMAT_XRGB4444:
2161 case DRM_FORMAT_ARGB4444:
2162 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2163 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
2164#ifdef __BIG_ENDIAN
2165 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2166 ENDIAN_8IN16);
2167#endif
2168 break;
2169 case DRM_FORMAT_XRGB1555:
2170 case DRM_FORMAT_ARGB1555:
2171 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2172 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2173#ifdef __BIG_ENDIAN
2174 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2175 ENDIAN_8IN16);
2176#endif
2177 break;
2178 case DRM_FORMAT_BGRX5551:
2179 case DRM_FORMAT_BGRA5551:
2180 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2181 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
2182#ifdef __BIG_ENDIAN
2183 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2184 ENDIAN_8IN16);
2185#endif
2186 break;
2187 case DRM_FORMAT_RGB565:
2188 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2189 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2190#ifdef __BIG_ENDIAN
2191 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2192 ENDIAN_8IN16);
2193#endif
2194 break;
2195 case DRM_FORMAT_XRGB8888:
2196 case DRM_FORMAT_ARGB8888:
2197 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2198 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2199#ifdef __BIG_ENDIAN
2200 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2201 ENDIAN_8IN32);
2202#endif
2203 break;
2204 case DRM_FORMAT_XRGB2101010:
2205 case DRM_FORMAT_ARGB2101010:
2206 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2207 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2208#ifdef __BIG_ENDIAN
2209 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2210 ENDIAN_8IN32);
2211#endif
2212 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2213 bypass_lut = true;
2214 break;
2215 case DRM_FORMAT_BGRX1010102:
2216 case DRM_FORMAT_BGRA1010102:
2217 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2218 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
2219#ifdef __BIG_ENDIAN
2220 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2221 ENDIAN_8IN32);
2222#endif
2223 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2224 bypass_lut = true;
2225 break;
2226 default:
90844f00
EE
2227 format_name = drm_get_format_name(target_fb->pixel_format);
2228 DRM_ERROR("Unsupported screen format %s\n", format_name);
2229 kfree(format_name);
aaa36a97
AD
2230 return -EINVAL;
2231 }
2232
fbd76d59
MO
2233 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2234 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
aaa36a97 2235
fbd76d59
MO
2236 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2237 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2238 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2239 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2240 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
aaa36a97 2241
aaa36a97
AD
2242 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2243 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2244 ARRAY_2D_TILED_THIN1);
2245 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2246 tile_split);
2247 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2248 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2249 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2250 mtaspect);
2251 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2252 ADDR_SURF_MICRO_TILING_DISPLAY);
fbd76d59 2253 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
aaa36a97
AD
2254 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2255 ARRAY_1D_TILED_THIN1);
2256 }
2257
aaa36a97
AD
2258 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2259 pipe_config);
2260
2261 dce_v10_0_vga_enable(crtc, false);
2262
cb9e59d7
AD
2263 /* Make sure surface address is updated at vertical blank rather than
2264 * horizontal blank
2265 */
2266 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2267 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2268 GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2269 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2270
aaa36a97
AD
2271 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2272 upper_32_bits(fb_location));
2273 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2274 upper_32_bits(fb_location));
2275 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2276 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2277 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2278 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2279 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2280 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2281
2282 /*
2283 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2284 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2285 * retain the full precision throughout the pipeline.
2286 */
2287 tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2288 if (bypass_lut)
2289 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2290 else
2291 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2292 WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2293
2294 if (bypass_lut)
2295 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2296
2297 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2298 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2299 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2300 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2301 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2302 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2303
2304 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2305 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2306
2307 dce_v10_0_grph_enable(crtc, true);
2308
2309 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2310 target_fb->height);
2311
2312 x &= ~3;
2313 y &= ~1;
2314 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2315 (x << 16) | y);
2316 viewport_w = crtc->mode.hdisplay;
2317 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2318 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2319 (viewport_w << 16) | viewport_h);
2320
3fd4b751
MD
2321 /* set pageflip to happen anywhere in vblank interval */
2322 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
aaa36a97
AD
2323
2324 if (!atomic && fb && fb != crtc->primary->fb) {
2325 amdgpu_fb = to_amdgpu_framebuffer(fb);
2326 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2327 r = amdgpu_bo_reserve(rbo, false);
2328 if (unlikely(r != 0))
2329 return r;
2330 amdgpu_bo_unpin(rbo);
2331 amdgpu_bo_unreserve(rbo);
2332 }
2333
2334 /* Bytes per pixel may have changed */
2335 dce_v10_0_bandwidth_update(adev);
2336
2337 return 0;
2338}
2339
2340static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2341 struct drm_display_mode *mode)
2342{
2343 struct drm_device *dev = crtc->dev;
2344 struct amdgpu_device *adev = dev->dev_private;
2345 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2346 u32 tmp;
2347
2348 tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2349 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2350 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2351 else
2352 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2353 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2354}
2355
2356static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2357{
2358 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2359 struct drm_device *dev = crtc->dev;
2360 struct amdgpu_device *adev = dev->dev_private;
2361 int i;
2362 u32 tmp;
2363
2364 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2365
2366 tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2367 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2368 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2369 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2370
2371 tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2372 tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2373 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2374
2375 tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2376 tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2377 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2378
2379 tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2380 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2381 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2382 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2383
2384 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2385
2386 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2387 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2388 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2389
2390 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2391 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2392 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2393
2394 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2395 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2396
2397 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2398 for (i = 0; i < 256; i++) {
2399 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2400 (amdgpu_crtc->lut_r[i] << 20) |
2401 (amdgpu_crtc->lut_g[i] << 10) |
2402 (amdgpu_crtc->lut_b[i] << 0));
2403 }
2404
2405 tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2406 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2407 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2408 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2409 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2410
2411 tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2412 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2413 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2414 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2415
2416 tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2417 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2418 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2419 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2420
2421 tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2422 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2423 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2424 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2425
2426 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2427 WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2428 /* XXX this only needs to be programmed once per crtc at startup,
2429 * not sure where the best place for it is
2430 */
2431 tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2432 tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2433 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2434}
2435
2436static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2437{
2438 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2439 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2440
2441 switch (amdgpu_encoder->encoder_id) {
2442 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2443 if (dig->linkb)
2444 return 1;
2445 else
2446 return 0;
2447 break;
2448 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2449 if (dig->linkb)
2450 return 3;
2451 else
2452 return 2;
2453 break;
2454 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2455 if (dig->linkb)
2456 return 5;
2457 else
2458 return 4;
2459 break;
2460 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2461 return 6;
2462 break;
2463 default:
2464 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2465 return 0;
2466 }
2467}
2468
2469/**
2470 * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2471 *
2472 * @crtc: drm crtc
2473 *
2474 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2475 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2476 * monitors a dedicated PPLL must be used. If a particular board has
2477 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2478 * as there is no need to program the PLL itself. If we are not able to
2479 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2480 * avoid messing up an existing monitor.
2481 *
2482 * Asic specific PLL information
2483 *
2484 * DCE 10.x
2485 * Tonga
2486 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2487 * CI
2488 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2489 *
2490 */
2491static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2492{
2493 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2494 struct drm_device *dev = crtc->dev;
2495 struct amdgpu_device *adev = dev->dev_private;
2496 u32 pll_in_use;
2497 int pll;
2498
2499 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2500 if (adev->clock.dp_extclk)
2501 /* skip PPLL programming if using ext clock */
2502 return ATOM_PPLL_INVALID;
2503 else {
2504 /* use the same PPLL for all DP monitors */
2505 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2506 if (pll != ATOM_PPLL_INVALID)
2507 return pll;
2508 }
2509 } else {
2510 /* use the same PPLL for all monitors with the same clock */
2511 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2512 if (pll != ATOM_PPLL_INVALID)
2513 return pll;
2514 }
2515
2516 /* DCE10 has PPLL0, PPLL1, and PPLL2 */
2517 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2518 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2519 return ATOM_PPLL2;
2520 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2521 return ATOM_PPLL1;
2522 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2523 return ATOM_PPLL0;
2524 DRM_ERROR("unable to allocate a PPLL\n");
2525 return ATOM_PPLL_INVALID;
2526}
2527
2528static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2529{
2530 struct amdgpu_device *adev = crtc->dev->dev_private;
2531 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2532 uint32_t cur_lock;
2533
2534 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2535 if (lock)
2536 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2537 else
2538 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2539 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2540}
2541
2542static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2543{
2544 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2545 struct amdgpu_device *adev = crtc->dev->dev_private;
2546 u32 tmp;
2547
2548 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2549 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2550 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2551}
2552
2553static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2554{
2555 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2556 struct amdgpu_device *adev = crtc->dev->dev_private;
2557 u32 tmp;
2558
3c681718
AD
2559 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2560 upper_32_bits(amdgpu_crtc->cursor_addr));
2561 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2562 lower_32_bits(amdgpu_crtc->cursor_addr));
2563
aaa36a97
AD
2564 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2565 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2566 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2567 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2568}
2569
29275a9b
AD
2570static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2571 int x, int y)
aaa36a97
AD
2572{
2573 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2574 struct amdgpu_device *adev = crtc->dev->dev_private;
2575 int xorigin = 0, yorigin = 0;
2576
2577 /* avivo cursor are offset into the total surface */
2578 x += crtc->x;
2579 y += crtc->y;
2580 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2581
2582 if (x < 0) {
2583 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2584 x = 0;
2585 }
2586 if (y < 0) {
2587 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2588 y = 0;
2589 }
2590
aaa36a97
AD
2591 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2592 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2593 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2594 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
29275a9b
AD
2595
2596 amdgpu_crtc->cursor_x = x;
2597 amdgpu_crtc->cursor_y = y;
aaa36a97
AD
2598
2599 return 0;
2600}
2601
29275a9b
AD
2602static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2603 int x, int y)
2604{
2605 int ret;
2606
2607 dce_v10_0_lock_cursor(crtc, true);
2608 ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2609 dce_v10_0_lock_cursor(crtc, false);
2610
2611 return ret;
2612}
2613
2614static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2615 struct drm_file *file_priv,
2616 uint32_t handle,
2617 uint32_t width,
2618 uint32_t height,
2619 int32_t hot_x,
2620 int32_t hot_y)
aaa36a97
AD
2621{
2622 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2623 struct drm_gem_object *obj;
f9242d1b 2624 struct amdgpu_bo *aobj;
aaa36a97
AD
2625 int ret;
2626
2627 if (!handle) {
2628 /* turn off cursor */
2629 dce_v10_0_hide_cursor(crtc);
2630 obj = NULL;
2631 goto unpin;
2632 }
2633
2634 if ((width > amdgpu_crtc->max_cursor_width) ||
2635 (height > amdgpu_crtc->max_cursor_height)) {
2636 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2637 return -EINVAL;
2638 }
2639
a8ad0bd8 2640 obj = drm_gem_object_lookup(file_priv, handle);
aaa36a97
AD
2641 if (!obj) {
2642 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2643 return -ENOENT;
2644 }
2645
f9242d1b
AD
2646 aobj = gem_to_amdgpu_bo(obj);
2647 ret = amdgpu_bo_reserve(aobj, false);
2648 if (ret != 0) {
2649 drm_gem_object_unreference_unlocked(obj);
2650 return ret;
2651 }
2652
2653 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2654 amdgpu_bo_unreserve(aobj);
2655 if (ret) {
2656 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2657 drm_gem_object_unreference_unlocked(obj);
2658 return ret;
2659 }
aaa36a97
AD
2660
2661 amdgpu_crtc->cursor_width = width;
2662 amdgpu_crtc->cursor_height = height;
2663
2664 dce_v10_0_lock_cursor(crtc, true);
ef67e38c
AD
2665
2666 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2667 hot_y != amdgpu_crtc->cursor_hot_y) {
2668 int x, y;
2669
2670 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2671 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2672
2673 dce_v10_0_cursor_move_locked(crtc, x, y);
2674
2675 amdgpu_crtc->cursor_hot_x = hot_x;
2676 amdgpu_crtc->cursor_hot_y = hot_y;
2677 }
2678
aaa36a97
AD
2679 dce_v10_0_show_cursor(crtc);
2680 dce_v10_0_lock_cursor(crtc, false);
2681
2682unpin:
2683 if (amdgpu_crtc->cursor_bo) {
dd0b5d2f
AD
2684 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2685 ret = amdgpu_bo_reserve(aobj, false);
aaa36a97 2686 if (likely(ret == 0)) {
dd0b5d2f
AD
2687 amdgpu_bo_unpin(aobj);
2688 amdgpu_bo_unreserve(aobj);
aaa36a97
AD
2689 }
2690 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2691 }
2692
2693 amdgpu_crtc->cursor_bo = obj;
2694 return 0;
dd0b5d2f 2695}
aaa36a97 2696
dd0b5d2f
AD
2697static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2698{
2699 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
dd0b5d2f
AD
2700
2701 if (amdgpu_crtc->cursor_bo) {
2702 dce_v10_0_lock_cursor(crtc, true);
2703
2704 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2705 amdgpu_crtc->cursor_y);
2706
f9242d1b 2707 dce_v10_0_show_cursor(crtc);
dd0b5d2f
AD
2708
2709 dce_v10_0_lock_cursor(crtc, false);
2710 }
aaa36a97
AD
2711}
2712
7ea77283
ML
2713static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2714 u16 *blue, uint32_t size)
aaa36a97
AD
2715{
2716 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7ea77283 2717 int i;
aaa36a97
AD
2718
2719 /* userspace palettes are always correct as is */
7ea77283 2720 for (i = 0; i < size; i++) {
aaa36a97
AD
2721 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2722 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2723 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2724 }
2725 dce_v10_0_crtc_load_lut(crtc);
7ea77283
ML
2726
2727 return 0;
aaa36a97
AD
2728}
2729
2730static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2731{
2732 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2733
2734 drm_crtc_cleanup(crtc);
aaa36a97
AD
2735 kfree(amdgpu_crtc);
2736}
2737
2738static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
29275a9b 2739 .cursor_set2 = dce_v10_0_crtc_cursor_set2,
aaa36a97
AD
2740 .cursor_move = dce_v10_0_crtc_cursor_move,
2741 .gamma_set = dce_v10_0_crtc_gamma_set,
2742 .set_config = amdgpu_crtc_set_config,
2743 .destroy = dce_v10_0_crtc_destroy,
325cbba1 2744 .page_flip_target = amdgpu_crtc_page_flip_target,
aaa36a97
AD
2745};
2746
2747static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2748{
2749 struct drm_device *dev = crtc->dev;
2750 struct amdgpu_device *adev = dev->dev_private;
2751 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5e6775ab 2752 unsigned type;
aaa36a97
AD
2753
2754 switch (mode) {
2755 case DRM_MODE_DPMS_ON:
2756 amdgpu_crtc->enabled = true;
2757 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2758 dce_v10_0_vga_enable(crtc, true);
2759 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2760 dce_v10_0_vga_enable(crtc, false);
f6c7aba4 2761 /* Make sure VBLANK and PFLIP interrupts are still enabled */
5e6775ab
MD
2762 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2763 amdgpu_irq_update(adev, &adev->crtc_irq, type);
f6c7aba4 2764 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
9a7841e9 2765 drm_crtc_vblank_on(crtc);
aaa36a97
AD
2766 dce_v10_0_crtc_load_lut(crtc);
2767 break;
2768 case DRM_MODE_DPMS_STANDBY:
2769 case DRM_MODE_DPMS_SUSPEND:
2770 case DRM_MODE_DPMS_OFF:
9a7841e9 2771 drm_crtc_vblank_off(crtc);
aaa36a97
AD
2772 if (amdgpu_crtc->enabled) {
2773 dce_v10_0_vga_enable(crtc, true);
2774 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2775 dce_v10_0_vga_enable(crtc, false);
2776 }
2777 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2778 amdgpu_crtc->enabled = false;
2779 break;
2780 }
2781 /* adjust pm to dpms */
2782 amdgpu_pm_compute_clocks(adev);
2783}
2784
2785static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2786{
2787 /* disable crtc pair power gating before programming */
2788 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2789 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2790 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2791}
2792
2793static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2794{
2795 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2796 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2797}
2798
2799static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2800{
2801 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2802 struct drm_device *dev = crtc->dev;
2803 struct amdgpu_device *adev = dev->dev_private;
2804 struct amdgpu_atom_ss ss;
2805 int i;
2806
2807 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2808 if (crtc->primary->fb) {
2809 int r;
2810 struct amdgpu_framebuffer *amdgpu_fb;
2811 struct amdgpu_bo *rbo;
2812
2813 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2814 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2815 r = amdgpu_bo_reserve(rbo, false);
2816 if (unlikely(r))
2817 DRM_ERROR("failed to reserve rbo before unpin\n");
2818 else {
2819 amdgpu_bo_unpin(rbo);
2820 amdgpu_bo_unreserve(rbo);
2821 }
2822 }
2823 /* disable the GRPH */
2824 dce_v10_0_grph_enable(crtc, false);
2825
2826 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2827
2828 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2829 if (adev->mode_info.crtcs[i] &&
2830 adev->mode_info.crtcs[i]->enabled &&
2831 i != amdgpu_crtc->crtc_id &&
2832 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2833 /* one other crtc is using this pll don't turn
2834 * off the pll
2835 */
2836 goto done;
2837 }
2838 }
2839
2840 switch (amdgpu_crtc->pll_id) {
2841 case ATOM_PPLL0:
2842 case ATOM_PPLL1:
2843 case ATOM_PPLL2:
2844 /* disable the ppll */
2845 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2846 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2847 break;
2848 default:
2849 break;
2850 }
2851done:
2852 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2853 amdgpu_crtc->adjusted_clock = 0;
2854 amdgpu_crtc->encoder = NULL;
2855 amdgpu_crtc->connector = NULL;
2856}
2857
2858static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2859 struct drm_display_mode *mode,
2860 struct drm_display_mode *adjusted_mode,
2861 int x, int y, struct drm_framebuffer *old_fb)
2862{
2863 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2864
2865 if (!amdgpu_crtc->adjusted_clock)
2866 return -EINVAL;
2867
2868 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2869 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2870 dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2871 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2872 amdgpu_atombios_crtc_scaler_setup(crtc);
dd0b5d2f 2873 dce_v10_0_cursor_reset(crtc);
aaa36a97
AD
2874 /* update the hw version fpr dpm */
2875 amdgpu_crtc->hw_mode = *adjusted_mode;
2876
2877 return 0;
2878}
2879
2880static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2881 const struct drm_display_mode *mode,
2882 struct drm_display_mode *adjusted_mode)
2883{
2884 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2885 struct drm_device *dev = crtc->dev;
2886 struct drm_encoder *encoder;
2887
2888 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2889 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2890 if (encoder->crtc == crtc) {
2891 amdgpu_crtc->encoder = encoder;
2892 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2893 break;
2894 }
2895 }
2896 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2897 amdgpu_crtc->encoder = NULL;
2898 amdgpu_crtc->connector = NULL;
2899 return false;
2900 }
2901 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2902 return false;
2903 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2904 return false;
2905 /* pick pll */
2906 amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2907 /* if we can't get a PPLL for a non-DP encoder, fail */
2908 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2909 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2910 return false;
2911
2912 return true;
2913}
2914
2915static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2916 struct drm_framebuffer *old_fb)
2917{
2918 return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2919}
2920
2921static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2922 struct drm_framebuffer *fb,
2923 int x, int y, enum mode_set_atomic state)
2924{
2925 return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2926}
2927
2928static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2929 .dpms = dce_v10_0_crtc_dpms,
2930 .mode_fixup = dce_v10_0_crtc_mode_fixup,
2931 .mode_set = dce_v10_0_crtc_mode_set,
2932 .mode_set_base = dce_v10_0_crtc_set_base,
2933 .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2934 .prepare = dce_v10_0_crtc_prepare,
2935 .commit = dce_v10_0_crtc_commit,
2936 .load_lut = dce_v10_0_crtc_load_lut,
2937 .disable = dce_v10_0_crtc_disable,
2938};
2939
2940static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2941{
2942 struct amdgpu_crtc *amdgpu_crtc;
2943 int i;
2944
2945 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2946 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2947 if (amdgpu_crtc == NULL)
2948 return -ENOMEM;
2949
2950 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2951
2952 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2953 amdgpu_crtc->crtc_id = index;
aaa36a97
AD
2954 adev->mode_info.crtcs[index] = amdgpu_crtc;
2955
2956 amdgpu_crtc->max_cursor_width = 128;
2957 amdgpu_crtc->max_cursor_height = 128;
2958 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2959 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2960
2961 for (i = 0; i < 256; i++) {
2962 amdgpu_crtc->lut_r[i] = i << 2;
2963 amdgpu_crtc->lut_g[i] = i << 2;
2964 amdgpu_crtc->lut_b[i] = i << 2;
2965 }
2966
2967 switch (amdgpu_crtc->crtc_id) {
2968 case 0:
2969 default:
2970 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2971 break;
2972 case 1:
2973 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2974 break;
2975 case 2:
2976 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2977 break;
2978 case 3:
2979 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2980 break;
2981 case 4:
2982 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2983 break;
2984 case 5:
2985 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2986 break;
2987 }
2988
2989 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2990 amdgpu_crtc->adjusted_clock = 0;
2991 amdgpu_crtc->encoder = NULL;
2992 amdgpu_crtc->connector = NULL;
2993 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2994
2995 return 0;
2996}
2997
5fc3aeeb 2998static int dce_v10_0_early_init(void *handle)
aaa36a97 2999{
5fc3aeeb 3000 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3001
aaa36a97
AD
3002 adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
3003 adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
3004
3005 dce_v10_0_set_display_funcs(adev);
3006 dce_v10_0_set_irq_funcs(adev);
3007
83c9b025
ED
3008 adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
3009
aaa36a97 3010 switch (adev->asic_type) {
84390860 3011 case CHIP_FIJI:
aaa36a97 3012 case CHIP_TONGA:
aaa36a97
AD
3013 adev->mode_info.num_hpd = 6;
3014 adev->mode_info.num_dig = 7;
3015 break;
3016 default:
3017 /* FIXME: not supported yet */
3018 return -EINVAL;
3019 }
3020
3021 return 0;
3022}
3023
5fc3aeeb 3024static int dce_v10_0_sw_init(void *handle)
aaa36a97
AD
3025{
3026 int r, i;
5fc3aeeb 3027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
3028
3029 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3030 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
3031 if (r)
3032 return r;
3033 }
3034
3035 for (i = 8; i < 20; i += 2) {
3036 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
3037 if (r)
3038 return r;
3039 }
3040
3041 /* HPD hotplug */
3042 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
3043 if (r)
3044 return r;
3045
aaa36a97
AD
3046 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
3047
cb9e59d7
AD
3048 adev->ddev->mode_config.async_page_flip = true;
3049
aaa36a97
AD
3050 adev->ddev->mode_config.max_width = 16384;
3051 adev->ddev->mode_config.max_height = 16384;
3052
3053 adev->ddev->mode_config.preferred_depth = 24;
3054 adev->ddev->mode_config.prefer_shadow = 1;
3055
3056 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
3057
3058 r = amdgpu_modeset_create_props(adev);
3059 if (r)
3060 return r;
3061
3062 adev->ddev->mode_config.max_width = 16384;
3063 adev->ddev->mode_config.max_height = 16384;
3064
3065 /* allocate crtcs */
3066 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3067 r = dce_v10_0_crtc_init(adev, i);
3068 if (r)
3069 return r;
3070 }
3071
3072 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
3073 amdgpu_print_display_setup(adev->ddev);
3074 else
3075 return -EINVAL;
3076
3077 /* setup afmt */
720a6ce3
TSD
3078 r = dce_v10_0_afmt_init(adev);
3079 if (r)
3080 return r;
aaa36a97
AD
3081
3082 r = dce_v10_0_audio_init(adev);
3083 if (r)
3084 return r;
3085
3086 drm_kms_helper_poll_init(adev->ddev);
3087
98822a2f
TSD
3088 adev->mode_info.mode_config_initialized = true;
3089 return 0;
aaa36a97
AD
3090}
3091
5fc3aeeb 3092static int dce_v10_0_sw_fini(void *handle)
aaa36a97 3093{
5fc3aeeb 3094 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3095
aaa36a97
AD
3096 kfree(adev->mode_info.bios_hardcoded_edid);
3097
3098 drm_kms_helper_poll_fini(adev->ddev);
3099
3100 dce_v10_0_audio_fini(adev);
3101
3102 dce_v10_0_afmt_fini(adev);
3103
3104 drm_mode_config_cleanup(adev->ddev);
3105 adev->mode_info.mode_config_initialized = false;
3106
3107 return 0;
3108}
3109
5fc3aeeb 3110static int dce_v10_0_hw_init(void *handle)
aaa36a97
AD
3111{
3112 int i;
5fc3aeeb 3113 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
3114
3115 dce_v10_0_init_golden_registers(adev);
3116
3117 /* init dig PHYs, disp eng pll */
3118 amdgpu_atombios_encoder_init_dig(adev);
3119 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3120
3121 /* initialize hpd */
3122 dce_v10_0_hpd_init(adev);
3123
3124 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3125 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3126 }
3127
f6c7aba4
MD
3128 dce_v10_0_pageflip_interrupt_init(adev);
3129
aaa36a97
AD
3130 return 0;
3131}
3132
5fc3aeeb 3133static int dce_v10_0_hw_fini(void *handle)
aaa36a97
AD
3134{
3135 int i;
5fc3aeeb 3136 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
3137
3138 dce_v10_0_hpd_fini(adev);
3139
3140 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3141 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3142 }
3143
f6c7aba4
MD
3144 dce_v10_0_pageflip_interrupt_fini(adev);
3145
aaa36a97
AD
3146 return 0;
3147}
3148
5fc3aeeb 3149static int dce_v10_0_suspend(void *handle)
aaa36a97 3150{
5fc3aeeb 3151 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97 3152
aaa36a97
AD
3153 amdgpu_atombios_scratch_regs_save(adev);
3154
f9fff064 3155 return dce_v10_0_hw_fini(handle);
aaa36a97
AD
3156}
3157
5fc3aeeb 3158static int dce_v10_0_resume(void *handle)
aaa36a97 3159{
5fc3aeeb 3160 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
f9fff064 3161 int ret;
aaa36a97 3162
f9fff064 3163 ret = dce_v10_0_hw_init(handle);
aaa36a97
AD
3164
3165 amdgpu_atombios_scratch_regs_restore(adev);
3166
aaa36a97
AD
3167 /* turn on the BL */
3168 if (adev->mode_info.bl_encoder) {
3169 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3170 adev->mode_info.bl_encoder);
3171 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3172 bl_level);
3173 }
3174
f9fff064 3175 return ret;
aaa36a97
AD
3176}
3177
5fc3aeeb 3178static bool dce_v10_0_is_idle(void *handle)
aaa36a97 3179{
aaa36a97
AD
3180 return true;
3181}
3182
5fc3aeeb 3183static int dce_v10_0_wait_for_idle(void *handle)
aaa36a97 3184{
aaa36a97
AD
3185 return 0;
3186}
3187
81e04e18
CZ
3188static int dce_v10_0_check_soft_reset(void *handle)
3189{
3190 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3191
3192 if (dce_v10_0_is_display_hung(adev))
3193 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
3194 else
3195 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
3196
3197 return 0;
3198}
3199
5fc3aeeb 3200static int dce_v10_0_soft_reset(void *handle)
aaa36a97
AD
3201{
3202 u32 srbm_soft_reset = 0, tmp;
5fc3aeeb 3203 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97 3204
81e04e18
CZ
3205 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
3206 return 0;
3207
aaa36a97
AD
3208 if (dce_v10_0_is_display_hung(adev))
3209 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3210
3211 if (srbm_soft_reset) {
aaa36a97
AD
3212 tmp = RREG32(mmSRBM_SOFT_RESET);
3213 tmp |= srbm_soft_reset;
3214 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3215 WREG32(mmSRBM_SOFT_RESET, tmp);
3216 tmp = RREG32(mmSRBM_SOFT_RESET);
3217
3218 udelay(50);
3219
3220 tmp &= ~srbm_soft_reset;
3221 WREG32(mmSRBM_SOFT_RESET, tmp);
3222 tmp = RREG32(mmSRBM_SOFT_RESET);
3223
3224 /* Wait a little for things to settle down */
3225 udelay(50);
aaa36a97
AD
3226 }
3227 return 0;
3228}
3229
3230static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3231 int crtc,
3232 enum amdgpu_interrupt_state state)
3233{
3234 u32 lb_interrupt_mask;
3235
3236 if (crtc >= adev->mode_info.num_crtc) {
3237 DRM_DEBUG("invalid crtc %d\n", crtc);
3238 return;
3239 }
3240
3241 switch (state) {
3242 case AMDGPU_IRQ_STATE_DISABLE:
3243 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3244 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3245 VBLANK_INTERRUPT_MASK, 0);
3246 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3247 break;
3248 case AMDGPU_IRQ_STATE_ENABLE:
3249 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3250 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3251 VBLANK_INTERRUPT_MASK, 1);
3252 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3253 break;
3254 default:
3255 break;
3256 }
3257}
3258
3259static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3260 int crtc,
3261 enum amdgpu_interrupt_state state)
3262{
3263 u32 lb_interrupt_mask;
3264
3265 if (crtc >= adev->mode_info.num_crtc) {
3266 DRM_DEBUG("invalid crtc %d\n", crtc);
3267 return;
3268 }
3269
3270 switch (state) {
3271 case AMDGPU_IRQ_STATE_DISABLE:
3272 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3273 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3274 VLINE_INTERRUPT_MASK, 0);
3275 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3276 break;
3277 case AMDGPU_IRQ_STATE_ENABLE:
3278 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3279 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3280 VLINE_INTERRUPT_MASK, 1);
3281 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3282 break;
3283 default:
3284 break;
3285 }
3286}
3287
3288static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3289 struct amdgpu_irq_src *source,
3290 unsigned hpd,
3291 enum amdgpu_interrupt_state state)
3292{
3293 u32 tmp;
3294
3295 if (hpd >= adev->mode_info.num_hpd) {
3296 DRM_DEBUG("invalid hdp %d\n", hpd);
3297 return 0;
3298 }
3299
3300 switch (state) {
3301 case AMDGPU_IRQ_STATE_DISABLE:
3302 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3303 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3304 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3305 break;
3306 case AMDGPU_IRQ_STATE_ENABLE:
3307 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3308 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3309 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3310 break;
3311 default:
3312 break;
3313 }
3314
3315 return 0;
3316}
3317
3318static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3319 struct amdgpu_irq_src *source,
3320 unsigned type,
3321 enum amdgpu_interrupt_state state)
3322{
3323 switch (type) {
3324 case AMDGPU_CRTC_IRQ_VBLANK1:
3325 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3326 break;
3327 case AMDGPU_CRTC_IRQ_VBLANK2:
3328 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3329 break;
3330 case AMDGPU_CRTC_IRQ_VBLANK3:
3331 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3332 break;
3333 case AMDGPU_CRTC_IRQ_VBLANK4:
3334 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3335 break;
3336 case AMDGPU_CRTC_IRQ_VBLANK5:
3337 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3338 break;
3339 case AMDGPU_CRTC_IRQ_VBLANK6:
3340 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3341 break;
3342 case AMDGPU_CRTC_IRQ_VLINE1:
3343 dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3344 break;
3345 case AMDGPU_CRTC_IRQ_VLINE2:
3346 dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3347 break;
3348 case AMDGPU_CRTC_IRQ_VLINE3:
3349 dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3350 break;
3351 case AMDGPU_CRTC_IRQ_VLINE4:
3352 dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3353 break;
3354 case AMDGPU_CRTC_IRQ_VLINE5:
3355 dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3356 break;
3357 case AMDGPU_CRTC_IRQ_VLINE6:
3358 dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3359 break;
3360 default:
3361 break;
3362 }
3363 return 0;
3364}
3365
3366static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3367 struct amdgpu_irq_src *src,
3368 unsigned type,
3369 enum amdgpu_interrupt_state state)
3370{
7dfac896
AD
3371 u32 reg;
3372
3373 if (type >= adev->mode_info.num_crtc) {
3374 DRM_ERROR("invalid pageflip crtc %d\n", type);
3375 return -EINVAL;
aaa36a97
AD
3376 }
3377
7dfac896 3378 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
aaa36a97 3379 if (state == AMDGPU_IRQ_STATE_DISABLE)
7dfac896
AD
3380 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3381 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
aaa36a97 3382 else
7dfac896
AD
3383 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3384 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
aaa36a97
AD
3385
3386 return 0;
3387}
3388
3389static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3390 struct amdgpu_irq_src *source,
3391 struct amdgpu_iv_entry *entry)
3392{
aaa36a97
AD
3393 unsigned long flags;
3394 unsigned crtc_id;
3395 struct amdgpu_crtc *amdgpu_crtc;
3396 struct amdgpu_flip_work *works;
3397
3398 crtc_id = (entry->src_id - 8) >> 1;
3399 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3400
7dfac896
AD
3401 if (crtc_id >= adev->mode_info.num_crtc) {
3402 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3403 return -EINVAL;
aaa36a97
AD
3404 }
3405
7dfac896
AD
3406 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3407 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3408 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3409 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
aaa36a97
AD
3410
3411 /* IRQ could occur when in initial stage */
3412 if (amdgpu_crtc == NULL)
3413 return 0;
3414
3415 spin_lock_irqsave(&adev->ddev->event_lock, flags);
3416 works = amdgpu_crtc->pflip_works;
3417 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3418 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3419 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3420 amdgpu_crtc->pflip_status,
3421 AMDGPU_FLIP_SUBMITTED);
3422 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3423 return 0;
3424 }
3425
3426 /* page flip completed. clean up */
3427 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3428 amdgpu_crtc->pflip_works = NULL;
3429
3430 /* wakeup usersapce */
3431 if (works->event)
56286769 3432 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
aaa36a97
AD
3433
3434 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3435
60629c4d 3436 drm_crtc_vblank_put(&amdgpu_crtc->base);
87d58c11 3437 schedule_work(&works->unpin_work);
aaa36a97
AD
3438
3439 return 0;
3440}
3441
3442static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3443 int hpd)
3444{
3445 u32 tmp;
3446
3447 if (hpd >= adev->mode_info.num_hpd) {
3448 DRM_DEBUG("invalid hdp %d\n", hpd);
3449 return;
3450 }
3451
3452 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3453 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3454 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3455}
3456
3457static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3458 int crtc)
3459{
3460 u32 tmp;
3461
3462 if (crtc >= adev->mode_info.num_crtc) {
3463 DRM_DEBUG("invalid crtc %d\n", crtc);
3464 return;
3465 }
3466
3467 tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3468 tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3469 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3470}
3471
3472static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3473 int crtc)
3474{
3475 u32 tmp;
3476
3477 if (crtc >= adev->mode_info.num_crtc) {
3478 DRM_DEBUG("invalid crtc %d\n", crtc);
3479 return;
3480 }
3481
3482 tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3483 tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3484 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3485}
3486
3487static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3488 struct amdgpu_irq_src *source,
3489 struct amdgpu_iv_entry *entry)
3490{
3491 unsigned crtc = entry->src_id - 1;
3492 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3493 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3494
3495 switch (entry->src_data) {
3496 case 0: /* vblank */
bd833144 3497 if (disp_int & interrupt_status_offsets[crtc].vblank)
aaa36a97 3498 dce_v10_0_crtc_vblank_int_ack(adev, crtc);
bd833144
MK
3499 else
3500 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3501
3502 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3503 drm_handle_vblank(adev->ddev, crtc);
aaa36a97 3504 }
bd833144
MK
3505 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3506
aaa36a97
AD
3507 break;
3508 case 1: /* vline */
bd833144 3509 if (disp_int & interrupt_status_offsets[crtc].vline)
aaa36a97 3510 dce_v10_0_crtc_vline_int_ack(adev, crtc);
bd833144
MK
3511 else
3512 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3513
3514 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3515
aaa36a97
AD
3516 break;
3517 default:
3518 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3519 break;
3520 }
3521
3522 return 0;
3523}
3524
3525static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3526 struct amdgpu_irq_src *source,
3527 struct amdgpu_iv_entry *entry)
3528{
3529 uint32_t disp_int, mask;
3530 unsigned hpd;
3531
3532 if (entry->src_data >= adev->mode_info.num_hpd) {
3533 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3534 return 0;
3535 }
3536
3537 hpd = entry->src_data;
3538 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3539 mask = interrupt_status_offsets[hpd].hpd;
3540
3541 if (disp_int & mask) {
3542 dce_v10_0_hpd_int_ack(adev, hpd);
3543 schedule_work(&adev->hotplug_work);
3544 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3545 }
3546
3547 return 0;
3548}
3549
5fc3aeeb 3550static int dce_v10_0_set_clockgating_state(void *handle,
3551 enum amd_clockgating_state state)
aaa36a97
AD
3552{
3553 return 0;
3554}
3555
5fc3aeeb 3556static int dce_v10_0_set_powergating_state(void *handle,
3557 enum amd_powergating_state state)
aaa36a97
AD
3558{
3559 return 0;
3560}
3561
5fc3aeeb 3562const struct amd_ip_funcs dce_v10_0_ip_funcs = {
88a907d6 3563 .name = "dce_v10_0",
aaa36a97
AD
3564 .early_init = dce_v10_0_early_init,
3565 .late_init = NULL,
3566 .sw_init = dce_v10_0_sw_init,
3567 .sw_fini = dce_v10_0_sw_fini,
3568 .hw_init = dce_v10_0_hw_init,
3569 .hw_fini = dce_v10_0_hw_fini,
3570 .suspend = dce_v10_0_suspend,
3571 .resume = dce_v10_0_resume,
3572 .is_idle = dce_v10_0_is_idle,
3573 .wait_for_idle = dce_v10_0_wait_for_idle,
81e04e18 3574 .check_soft_reset = dce_v10_0_check_soft_reset,
aaa36a97 3575 .soft_reset = dce_v10_0_soft_reset,
aaa36a97
AD
3576 .set_clockgating_state = dce_v10_0_set_clockgating_state,
3577 .set_powergating_state = dce_v10_0_set_powergating_state,
3578};
3579
3580static void
3581dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3582 struct drm_display_mode *mode,
3583 struct drm_display_mode *adjusted_mode)
3584{
3585 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3586
3587 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3588
3589 /* need to call this here rather than in prepare() since we need some crtc info */
3590 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3591
3592 /* set scaler clears this on some chips */
3593 dce_v10_0_set_interleave(encoder->crtc, mode);
3594
3595 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3596 dce_v10_0_afmt_enable(encoder, true);
3597 dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3598 }
3599}
3600
3601static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3602{
3603 struct amdgpu_device *adev = encoder->dev->dev_private;
3604 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3605 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3606
3607 if ((amdgpu_encoder->active_device &
3608 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3609 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3610 ENCODER_OBJECT_ID_NONE)) {
3611 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3612 if (dig) {
3613 dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3614 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3615 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3616 }
3617 }
3618
3619 amdgpu_atombios_scratch_regs_lock(adev, true);
3620
3621 if (connector) {
3622 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3623
3624 /* select the clock/data port if it uses a router */
3625 if (amdgpu_connector->router.cd_valid)
3626 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3627
3628 /* turn eDP panel on for mode set */
3629 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3630 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3631 ATOM_TRANSMITTER_ACTION_POWER_ON);
3632 }
3633
3634 /* this is needed for the pll/ss setup to work correctly in some cases */
3635 amdgpu_atombios_encoder_set_crtc_source(encoder);
3636 /* set up the FMT blocks */
3637 dce_v10_0_program_fmt(encoder);
3638}
3639
3640static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3641{
3642 struct drm_device *dev = encoder->dev;
3643 struct amdgpu_device *adev = dev->dev_private;
3644
3645 /* need to call this here as we need the crtc set up */
3646 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3647 amdgpu_atombios_scratch_regs_lock(adev, false);
3648}
3649
3650static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3651{
3652 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3653 struct amdgpu_encoder_atom_dig *dig;
3654
3655 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3656
3657 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3658 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3659 dce_v10_0_afmt_enable(encoder, false);
3660 dig = amdgpu_encoder->enc_priv;
3661 dig->dig_encoder = -1;
3662 }
3663 amdgpu_encoder->active_device = 0;
3664}
3665
3666/* these are handled by the primary encoders */
3667static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3668{
3669
3670}
3671
3672static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3673{
3674
3675}
3676
3677static void
3678dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3679 struct drm_display_mode *mode,
3680 struct drm_display_mode *adjusted_mode)
3681{
3682
3683}
3684
3685static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3686{
3687
3688}
3689
3690static void
3691dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3692{
3693
3694}
3695
aaa36a97
AD
3696static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3697 .dpms = dce_v10_0_ext_dpms,
aaa36a97
AD
3698 .prepare = dce_v10_0_ext_prepare,
3699 .mode_set = dce_v10_0_ext_mode_set,
3700 .commit = dce_v10_0_ext_commit,
3701 .disable = dce_v10_0_ext_disable,
3702 /* no detect for TMDS/LVDS yet */
3703};
3704
3705static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3706 .dpms = amdgpu_atombios_encoder_dpms,
3707 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3708 .prepare = dce_v10_0_encoder_prepare,
3709 .mode_set = dce_v10_0_encoder_mode_set,
3710 .commit = dce_v10_0_encoder_commit,
3711 .disable = dce_v10_0_encoder_disable,
3712 .detect = amdgpu_atombios_encoder_dig_detect,
3713};
3714
3715static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3716 .dpms = amdgpu_atombios_encoder_dpms,
3717 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3718 .prepare = dce_v10_0_encoder_prepare,
3719 .mode_set = dce_v10_0_encoder_mode_set,
3720 .commit = dce_v10_0_encoder_commit,
3721 .detect = amdgpu_atombios_encoder_dac_detect,
3722};
3723
3724static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3725{
3726 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3727 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3728 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3729 kfree(amdgpu_encoder->enc_priv);
3730 drm_encoder_cleanup(encoder);
3731 kfree(amdgpu_encoder);
3732}
3733
3734static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3735 .destroy = dce_v10_0_encoder_destroy,
3736};
3737
3738static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3739 uint32_t encoder_enum,
3740 uint32_t supported_device,
3741 u16 caps)
3742{
3743 struct drm_device *dev = adev->ddev;
3744 struct drm_encoder *encoder;
3745 struct amdgpu_encoder *amdgpu_encoder;
3746
3747 /* see if we already added it */
3748 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3749 amdgpu_encoder = to_amdgpu_encoder(encoder);
3750 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3751 amdgpu_encoder->devices |= supported_device;
3752 return;
3753 }
3754
3755 }
3756
3757 /* add a new one */
3758 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3759 if (!amdgpu_encoder)
3760 return;
3761
3762 encoder = &amdgpu_encoder->base;
3763 switch (adev->mode_info.num_crtc) {
3764 case 1:
3765 encoder->possible_crtcs = 0x1;
3766 break;
3767 case 2:
3768 default:
3769 encoder->possible_crtcs = 0x3;
3770 break;
3771 case 4:
3772 encoder->possible_crtcs = 0xf;
3773 break;
3774 case 6:
3775 encoder->possible_crtcs = 0x3f;
3776 break;
3777 }
3778
3779 amdgpu_encoder->enc_priv = NULL;
3780
3781 amdgpu_encoder->encoder_enum = encoder_enum;
3782 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3783 amdgpu_encoder->devices = supported_device;
3784 amdgpu_encoder->rmx_type = RMX_OFF;
3785 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3786 amdgpu_encoder->is_ext_encoder = false;
3787 amdgpu_encoder->caps = caps;
3788
3789 switch (amdgpu_encoder->encoder_id) {
3790 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3791 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3792 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
13a3d91f 3793 DRM_MODE_ENCODER_DAC, NULL);
aaa36a97
AD
3794 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3795 break;
3796 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3797 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3798 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3799 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3800 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3801 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3802 amdgpu_encoder->rmx_type = RMX_FULL;
3803 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
13a3d91f 3804 DRM_MODE_ENCODER_LVDS, NULL);
aaa36a97
AD
3805 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3806 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3807 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
13a3d91f 3808 DRM_MODE_ENCODER_DAC, NULL);
aaa36a97
AD
3809 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3810 } else {
3811 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
13a3d91f 3812 DRM_MODE_ENCODER_TMDS, NULL);
aaa36a97
AD
3813 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3814 }
3815 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3816 break;
3817 case ENCODER_OBJECT_ID_SI170B:
3818 case ENCODER_OBJECT_ID_CH7303:
3819 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3820 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3821 case ENCODER_OBJECT_ID_TITFP513:
3822 case ENCODER_OBJECT_ID_VT1623:
3823 case ENCODER_OBJECT_ID_HDMI_SI1930:
3824 case ENCODER_OBJECT_ID_TRAVIS:
3825 case ENCODER_OBJECT_ID_NUTMEG:
3826 /* these are handled by the primary encoders */
3827 amdgpu_encoder->is_ext_encoder = true;
3828 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3829 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
13a3d91f 3830 DRM_MODE_ENCODER_LVDS, NULL);
aaa36a97
AD
3831 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3832 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
13a3d91f 3833 DRM_MODE_ENCODER_DAC, NULL);
aaa36a97
AD
3834 else
3835 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
13a3d91f 3836 DRM_MODE_ENCODER_TMDS, NULL);
aaa36a97
AD
3837 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3838 break;
3839 }
3840}
3841
3842static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3843 .set_vga_render_state = &dce_v10_0_set_vga_render_state,
3844 .bandwidth_update = &dce_v10_0_bandwidth_update,
3845 .vblank_get_counter = &dce_v10_0_vblank_get_counter,
3846 .vblank_wait = &dce_v10_0_vblank_wait,
3847 .is_display_hung = &dce_v10_0_is_display_hung,
3848 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3849 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3850 .hpd_sense = &dce_v10_0_hpd_sense,
3851 .hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3852 .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3853 .page_flip = &dce_v10_0_page_flip,
3854 .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3855 .add_encoder = &dce_v10_0_encoder_add,
3856 .add_connector = &amdgpu_connector_add,
3857 .stop_mc_access = &dce_v10_0_stop_mc_access,
3858 .resume_mc_access = &dce_v10_0_resume_mc_access,
3859};
3860
3861static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3862{
3863 if (adev->mode_info.funcs == NULL)
3864 adev->mode_info.funcs = &dce_v10_0_display_funcs;
3865}
3866
3867static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3868 .set = dce_v10_0_set_crtc_irq_state,
3869 .process = dce_v10_0_crtc_irq,
3870};
3871
3872static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3873 .set = dce_v10_0_set_pageflip_irq_state,
3874 .process = dce_v10_0_pageflip_irq,
3875};
3876
3877static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3878 .set = dce_v10_0_set_hpd_irq_state,
3879 .process = dce_v10_0_hpd_irq,
3880};
3881
3882static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3883{
3884 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3885 adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3886
3887 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3888 adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3889
3890 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3891 adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3892}