drm/radeon/kms: update power table parsing for SI
[linux-2.6-block.git] / drivers / gpu / drm / radeon / si.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "radeon_drm.h"
28 #include "sid.h"
29 #include "atom.h"
30
31 /* watermark setup */
32 static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
33                                    struct radeon_crtc *radeon_crtc,
34                                    struct drm_display_mode *mode,
35                                    struct drm_display_mode *other_mode)
36 {
37         u32 tmp;
38         /*
39          * Line Buffer Setup
40          * There are 3 line buffers, each one shared by 2 display controllers.
41          * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
42          * the display controllers.  The paritioning is done via one of four
43          * preset allocations specified in bits 21:20:
44          *  0 - half lb
45          *  2 - whole lb, other crtc must be disabled
46          */
47         /* this can get tricky if we have two large displays on a paired group
48          * of crtcs.  Ideally for multiple large displays we'd assign them to
49          * non-linked crtcs for maximum line buffer allocation.
50          */
51         if (radeon_crtc->base.enabled && mode) {
52                 if (other_mode)
53                         tmp = 0; /* 1/2 */
54                 else
55                         tmp = 2; /* whole */
56         } else
57                 tmp = 0;
58
59         WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
60                DC_LB_MEMORY_CONFIG(tmp));
61
62         if (radeon_crtc->base.enabled && mode) {
63                 switch (tmp) {
64                 case 0:
65                 default:
66                         return 4096 * 2;
67                 case 2:
68                         return 8192 * 2;
69                 }
70         }
71
72         /* controller not enabled, so no lb used */
73         return 0;
74 }
75
76 static u32 dce6_get_number_of_dram_channels(struct radeon_device *rdev)
77 {
78         u32 tmp = RREG32(MC_SHARED_CHMAP);
79
80         switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
81         case 0:
82         default:
83                 return 1;
84         case 1:
85                 return 2;
86         case 2:
87                 return 4;
88         case 3:
89                 return 8;
90         case 4:
91                 return 3;
92         case 5:
93                 return 6;
94         case 6:
95                 return 10;
96         case 7:
97                 return 12;
98         case 8:
99                 return 16;
100         }
101 }
102
103 struct dce6_wm_params {
104         u32 dram_channels; /* number of dram channels */
105         u32 yclk;          /* bandwidth per dram data pin in kHz */
106         u32 sclk;          /* engine clock in kHz */
107         u32 disp_clk;      /* display clock in kHz */
108         u32 src_width;     /* viewport width */
109         u32 active_time;   /* active display time in ns */
110         u32 blank_time;    /* blank time in ns */
111         bool interlaced;    /* mode is interlaced */
112         fixed20_12 vsc;    /* vertical scale ratio */
113         u32 num_heads;     /* number of active crtcs */
114         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
115         u32 lb_size;       /* line buffer allocated to pipe */
116         u32 vtaps;         /* vertical scaler taps */
117 };
118
119 static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
120 {
121         /* Calculate raw DRAM Bandwidth */
122         fixed20_12 dram_efficiency; /* 0.7 */
123         fixed20_12 yclk, dram_channels, bandwidth;
124         fixed20_12 a;
125
126         a.full = dfixed_const(1000);
127         yclk.full = dfixed_const(wm->yclk);
128         yclk.full = dfixed_div(yclk, a);
129         dram_channels.full = dfixed_const(wm->dram_channels * 4);
130         a.full = dfixed_const(10);
131         dram_efficiency.full = dfixed_const(7);
132         dram_efficiency.full = dfixed_div(dram_efficiency, a);
133         bandwidth.full = dfixed_mul(dram_channels, yclk);
134         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
135
136         return dfixed_trunc(bandwidth);
137 }
138
139 static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
140 {
141         /* Calculate DRAM Bandwidth and the part allocated to display. */
142         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
143         fixed20_12 yclk, dram_channels, bandwidth;
144         fixed20_12 a;
145
146         a.full = dfixed_const(1000);
147         yclk.full = dfixed_const(wm->yclk);
148         yclk.full = dfixed_div(yclk, a);
149         dram_channels.full = dfixed_const(wm->dram_channels * 4);
150         a.full = dfixed_const(10);
151         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
152         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
153         bandwidth.full = dfixed_mul(dram_channels, yclk);
154         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
155
156         return dfixed_trunc(bandwidth);
157 }
158
159 static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
160 {
161         /* Calculate the display Data return Bandwidth */
162         fixed20_12 return_efficiency; /* 0.8 */
163         fixed20_12 sclk, bandwidth;
164         fixed20_12 a;
165
166         a.full = dfixed_const(1000);
167         sclk.full = dfixed_const(wm->sclk);
168         sclk.full = dfixed_div(sclk, a);
169         a.full = dfixed_const(10);
170         return_efficiency.full = dfixed_const(8);
171         return_efficiency.full = dfixed_div(return_efficiency, a);
172         a.full = dfixed_const(32);
173         bandwidth.full = dfixed_mul(a, sclk);
174         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
175
176         return dfixed_trunc(bandwidth);
177 }
178
179 static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
180 {
181         return 32;
182 }
183
184 static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
185 {
186         /* Calculate the DMIF Request Bandwidth */
187         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
188         fixed20_12 disp_clk, sclk, bandwidth;
189         fixed20_12 a, b1, b2;
190         u32 min_bandwidth;
191
192         a.full = dfixed_const(1000);
193         disp_clk.full = dfixed_const(wm->disp_clk);
194         disp_clk.full = dfixed_div(disp_clk, a);
195         a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
196         b1.full = dfixed_mul(a, disp_clk);
197
198         a.full = dfixed_const(1000);
199         sclk.full = dfixed_const(wm->sclk);
200         sclk.full = dfixed_div(sclk, a);
201         a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
202         b2.full = dfixed_mul(a, sclk);
203
204         a.full = dfixed_const(10);
205         disp_clk_request_efficiency.full = dfixed_const(8);
206         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
207
208         min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
209
210         a.full = dfixed_const(min_bandwidth);
211         bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
212
213         return dfixed_trunc(bandwidth);
214 }
215
216 static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
217 {
218         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
219         u32 dram_bandwidth = dce6_dram_bandwidth(wm);
220         u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
221         u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
222
223         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
224 }
225
226 static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
227 {
228         /* Calculate the display mode Average Bandwidth
229          * DisplayMode should contain the source and destination dimensions,
230          * timing, etc.
231          */
232         fixed20_12 bpp;
233         fixed20_12 line_time;
234         fixed20_12 src_width;
235         fixed20_12 bandwidth;
236         fixed20_12 a;
237
238         a.full = dfixed_const(1000);
239         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
240         line_time.full = dfixed_div(line_time, a);
241         bpp.full = dfixed_const(wm->bytes_per_pixel);
242         src_width.full = dfixed_const(wm->src_width);
243         bandwidth.full = dfixed_mul(src_width, bpp);
244         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
245         bandwidth.full = dfixed_div(bandwidth, line_time);
246
247         return dfixed_trunc(bandwidth);
248 }
249
250 static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
251 {
252         /* First calcualte the latency in ns */
253         u32 mc_latency = 2000; /* 2000 ns. */
254         u32 available_bandwidth = dce6_available_bandwidth(wm);
255         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
256         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
257         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
258         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
259                 (wm->num_heads * cursor_line_pair_return_time);
260         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
261         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
262         u32 tmp, dmif_size = 12288;
263         fixed20_12 a, b, c;
264
265         if (wm->num_heads == 0)
266                 return 0;
267
268         a.full = dfixed_const(2);
269         b.full = dfixed_const(1);
270         if ((wm->vsc.full > a.full) ||
271             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
272             (wm->vtaps >= 5) ||
273             ((wm->vsc.full >= a.full) && wm->interlaced))
274                 max_src_lines_per_dst_line = 4;
275         else
276                 max_src_lines_per_dst_line = 2;
277
278         a.full = dfixed_const(available_bandwidth);
279         b.full = dfixed_const(wm->num_heads);
280         a.full = dfixed_div(a, b);
281
282         b.full = dfixed_const(mc_latency + 512);
283         c.full = dfixed_const(wm->disp_clk);
284         b.full = dfixed_div(b, c);
285
286         c.full = dfixed_const(dmif_size);
287         b.full = dfixed_div(c, b);
288
289         tmp = min(dfixed_trunc(a), dfixed_trunc(b));
290
291         b.full = dfixed_const(1000);
292         c.full = dfixed_const(wm->disp_clk);
293         b.full = dfixed_div(c, b);
294         c.full = dfixed_const(wm->bytes_per_pixel);
295         b.full = dfixed_mul(b, c);
296
297         lb_fill_bw = min(tmp, dfixed_trunc(b));
298
299         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
300         b.full = dfixed_const(1000);
301         c.full = dfixed_const(lb_fill_bw);
302         b.full = dfixed_div(c, b);
303         a.full = dfixed_div(a, b);
304         line_fill_time = dfixed_trunc(a);
305
306         if (line_fill_time < wm->active_time)
307                 return latency;
308         else
309                 return latency + (line_fill_time - wm->active_time);
310
311 }
312
313 static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
314 {
315         if (dce6_average_bandwidth(wm) <=
316             (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
317                 return true;
318         else
319                 return false;
320 };
321
322 static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
323 {
324         if (dce6_average_bandwidth(wm) <=
325             (dce6_available_bandwidth(wm) / wm->num_heads))
326                 return true;
327         else
328                 return false;
329 };
330
331 static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
332 {
333         u32 lb_partitions = wm->lb_size / wm->src_width;
334         u32 line_time = wm->active_time + wm->blank_time;
335         u32 latency_tolerant_lines;
336         u32 latency_hiding;
337         fixed20_12 a;
338
339         a.full = dfixed_const(1);
340         if (wm->vsc.full > a.full)
341                 latency_tolerant_lines = 1;
342         else {
343                 if (lb_partitions <= (wm->vtaps + 1))
344                         latency_tolerant_lines = 1;
345                 else
346                         latency_tolerant_lines = 2;
347         }
348
349         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
350
351         if (dce6_latency_watermark(wm) <= latency_hiding)
352                 return true;
353         else
354                 return false;
355 }
356
357 static void dce6_program_watermarks(struct radeon_device *rdev,
358                                          struct radeon_crtc *radeon_crtc,
359                                          u32 lb_size, u32 num_heads)
360 {
361         struct drm_display_mode *mode = &radeon_crtc->base.mode;
362         struct dce6_wm_params wm;
363         u32 pixel_period;
364         u32 line_time = 0;
365         u32 latency_watermark_a = 0, latency_watermark_b = 0;
366         u32 priority_a_mark = 0, priority_b_mark = 0;
367         u32 priority_a_cnt = PRIORITY_OFF;
368         u32 priority_b_cnt = PRIORITY_OFF;
369         u32 tmp, arb_control3;
370         fixed20_12 a, b, c;
371
372         if (radeon_crtc->base.enabled && num_heads && mode) {
373                 pixel_period = 1000000 / (u32)mode->clock;
374                 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
375                 priority_a_cnt = 0;
376                 priority_b_cnt = 0;
377
378                 wm.yclk = rdev->pm.current_mclk * 10;
379                 wm.sclk = rdev->pm.current_sclk * 10;
380                 wm.disp_clk = mode->clock;
381                 wm.src_width = mode->crtc_hdisplay;
382                 wm.active_time = mode->crtc_hdisplay * pixel_period;
383                 wm.blank_time = line_time - wm.active_time;
384                 wm.interlaced = false;
385                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
386                         wm.interlaced = true;
387                 wm.vsc = radeon_crtc->vsc;
388                 wm.vtaps = 1;
389                 if (radeon_crtc->rmx_type != RMX_OFF)
390                         wm.vtaps = 2;
391                 wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
392                 wm.lb_size = lb_size;
393                 wm.dram_channels = dce6_get_number_of_dram_channels(rdev);
394                 wm.num_heads = num_heads;
395
396                 /* set for high clocks */
397                 latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
398                 /* set for low clocks */
399                 /* wm.yclk = low clk; wm.sclk = low clk */
400                 latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
401
402                 /* possibly force display priority to high */
403                 /* should really do this at mode validation time... */
404                 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
405                     !dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
406                     !dce6_check_latency_hiding(&wm) ||
407                     (rdev->disp_priority == 2)) {
408                         DRM_DEBUG_KMS("force priority to high\n");
409                         priority_a_cnt |= PRIORITY_ALWAYS_ON;
410                         priority_b_cnt |= PRIORITY_ALWAYS_ON;
411                 }
412
413                 a.full = dfixed_const(1000);
414                 b.full = dfixed_const(mode->clock);
415                 b.full = dfixed_div(b, a);
416                 c.full = dfixed_const(latency_watermark_a);
417                 c.full = dfixed_mul(c, b);
418                 c.full = dfixed_mul(c, radeon_crtc->hsc);
419                 c.full = dfixed_div(c, a);
420                 a.full = dfixed_const(16);
421                 c.full = dfixed_div(c, a);
422                 priority_a_mark = dfixed_trunc(c);
423                 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
424
425                 a.full = dfixed_const(1000);
426                 b.full = dfixed_const(mode->clock);
427                 b.full = dfixed_div(b, a);
428                 c.full = dfixed_const(latency_watermark_b);
429                 c.full = dfixed_mul(c, b);
430                 c.full = dfixed_mul(c, radeon_crtc->hsc);
431                 c.full = dfixed_div(c, a);
432                 a.full = dfixed_const(16);
433                 c.full = dfixed_div(c, a);
434                 priority_b_mark = dfixed_trunc(c);
435                 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
436         }
437
438         /* select wm A */
439         arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
440         tmp = arb_control3;
441         tmp &= ~LATENCY_WATERMARK_MASK(3);
442         tmp |= LATENCY_WATERMARK_MASK(1);
443         WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
444         WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
445                (LATENCY_LOW_WATERMARK(latency_watermark_a) |
446                 LATENCY_HIGH_WATERMARK(line_time)));
447         /* select wm B */
448         tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
449         tmp &= ~LATENCY_WATERMARK_MASK(3);
450         tmp |= LATENCY_WATERMARK_MASK(2);
451         WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
452         WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
453                (LATENCY_LOW_WATERMARK(latency_watermark_b) |
454                 LATENCY_HIGH_WATERMARK(line_time)));
455         /* restore original selection */
456         WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
457
458         /* write the priority marks */
459         WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
460         WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
461
462 }
463
464 void dce6_bandwidth_update(struct radeon_device *rdev)
465 {
466         struct drm_display_mode *mode0 = NULL;
467         struct drm_display_mode *mode1 = NULL;
468         u32 num_heads = 0, lb_size;
469         int i;
470
471         radeon_update_display_priority(rdev);
472
473         for (i = 0; i < rdev->num_crtc; i++) {
474                 if (rdev->mode_info.crtcs[i]->base.enabled)
475                         num_heads++;
476         }
477         for (i = 0; i < rdev->num_crtc; i += 2) {
478                 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
479                 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
480                 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
481                 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
482                 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
483                 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
484         }
485 }
486