Commit | Line | Data |
---|---|---|
fade7ad5 BS |
1 | /* |
2 | * Copyright 2010 Red Hat Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Ben Skeggs | |
23 | */ | |
24 | ||
25 | #include "drmP.h" | |
26 | #include "nouveau_drv.h" | |
02a841d4 | 27 | #include <nouveau_bios.h> |
fade7ad5 BS |
28 | #include "nouveau_pm.h" |
29 | ||
ca94a71f | 30 | static u32 read_clk(struct drm_device *, int, bool); |
cec2a270 | 31 | static u32 read_pll(struct drm_device *, int, u32); |
3b0582d3 BS |
32 | |
33 | static u32 | |
ca94a71f BS |
34 | read_vco(struct drm_device *dev, int clk) |
35 | { | |
36 | u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4)); | |
37 | if ((sctl & 0x00000030) != 0x00000030) | |
cec2a270 BS |
38 | return read_pll(dev, 0x41, 0x00e820); |
39 | return read_pll(dev, 0x42, 0x00e8a0); | |
ca94a71f BS |
40 | } |
41 | ||
42 | static u32 | |
43 | read_clk(struct drm_device *dev, int clk, bool ignore_en) | |
3b0582d3 | 44 | { |
64e740bb | 45 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
3b0582d3 BS |
46 | u32 sctl, sdiv, sclk; |
47 | ||
64e740bb | 48 | /* refclk for the 0xe8xx plls is a fixed frequency */ |
378f85ed BS |
49 | if (clk >= 0x40) { |
50 | if (dev_priv->chipset == 0xaf) { | |
51 | /* no joke.. seriously.. sigh.. */ | |
52 | return nv_rd32(dev, 0x00471c) * 1000; | |
53 | } | |
54 | ||
64e740bb | 55 | return dev_priv->crystal; |
378f85ed | 56 | } |
3b0582d3 BS |
57 | |
58 | sctl = nv_rd32(dev, 0x4120 + (clk * 4)); | |
ca94a71f BS |
59 | if (!ignore_en && !(sctl & 0x00000100)) |
60 | return 0; | |
61 | ||
62 | switch (sctl & 0x00003000) { | |
63 | case 0x00000000: | |
64e740bb | 64 | return dev_priv->crystal; |
ca94a71f | 65 | case 0x00002000: |
3b0582d3 BS |
66 | if (sctl & 0x00000040) |
67 | return 108000; | |
68 | return 100000; | |
ca94a71f BS |
69 | case 0x00003000: |
70 | sclk = read_vco(dev, clk); | |
3b0582d3 | 71 | sdiv = ((sctl & 0x003f0000) >> 16) + 2; |
3b0582d3 BS |
72 | return (sclk * 2) / sdiv; |
73 | default: | |
74 | return 0; | |
75 | } | |
76 | } | |
77 | ||
78 | static u32 | |
cec2a270 | 79 | read_pll(struct drm_device *dev, int clk, u32 pll) |
3b0582d3 BS |
80 | { |
81 | u32 ctrl = nv_rd32(dev, pll + 0); | |
93e692dc | 82 | u32 sclk = 0, P = 1, N = 1, M = 1; |
3b0582d3 BS |
83 | |
84 | if (!(ctrl & 0x00000008)) { | |
93e692dc BS |
85 | if (ctrl & 0x00000001) { |
86 | u32 coef = nv_rd32(dev, pll + 4); | |
87 | M = (coef & 0x000000ff) >> 0; | |
88 | N = (coef & 0x0000ff00) >> 8; | |
89 | P = (coef & 0x003f0000) >> 16; | |
cec2a270 | 90 | |
93e692dc BS |
91 | /* no post-divider on these.. */ |
92 | if ((pll & 0x00ff00) == 0x00e800) | |
93 | P = 1; | |
3b0582d3 | 94 | |
93e692dc BS |
95 | sclk = read_clk(dev, 0x00 + clk, false); |
96 | } | |
3b0582d3 | 97 | } else { |
ca94a71f | 98 | sclk = read_clk(dev, 0x10 + clk, false); |
3b0582d3 BS |
99 | } |
100 | ||
074e747a BS |
101 | if (M * P) |
102 | return sclk * N / (M * P); | |
103 | return 0; | |
3b0582d3 | 104 | } |
fade7ad5 | 105 | |
ca94a71f BS |
106 | struct creg { |
107 | u32 clk; | |
108 | u32 pll; | |
fade7ad5 BS |
109 | }; |
110 | ||
215f902e | 111 | static int |
cec2a270 | 112 | calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg) |
215f902e | 113 | { |
ca94a71f BS |
114 | struct pll_lims limits; |
115 | u32 oclk, sclk, sdiv; | |
116 | int P, N, M, diff; | |
117 | int ret; | |
118 | ||
119 | reg->pll = 0; | |
120 | reg->clk = 0; | |
cec2a270 BS |
121 | if (!khz) { |
122 | NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk); | |
123 | return 0; | |
124 | } | |
ca94a71f BS |
125 | |
126 | switch (khz) { | |
127 | case 27000: | |
128 | reg->clk = 0x00000100; | |
129 | return khz; | |
130 | case 100000: | |
131 | reg->clk = 0x00002100; | |
132 | return khz; | |
133 | case 108000: | |
134 | reg->clk = 0x00002140; | |
135 | return khz; | |
136 | default: | |
137 | sclk = read_vco(dev, clk); | |
138 | sdiv = min((sclk * 2) / (khz - 2999), (u32)65); | |
cec2a270 BS |
139 | /* if the clock has a PLL attached, and we can get a within |
140 | * [-2, 3) MHz of a divider, we'll disable the PLL and use | |
141 | * the divider instead. | |
142 | * | |
143 | * divider can go as low as 2, limited here because NVIDIA | |
144 | * and the VBIOS on my NVA8 seem to prefer using the PLL | |
145 | * for 810MHz - is there a good reason? | |
146 | */ | |
ca94a71f BS |
147 | if (sdiv > 4) { |
148 | oclk = (sclk * 2) / sdiv; | |
149 | diff = khz - oclk; | |
150 | if (!pll || (diff >= -2000 && diff < 3000)) { | |
151 | reg->clk = (((sdiv - 2) << 16) | 0x00003100); | |
152 | return oclk; | |
153 | } | |
154 | } | |
cec2a270 BS |
155 | |
156 | if (!pll) { | |
157 | NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk); | |
158 | return -ERANGE; | |
159 | } | |
160 | ||
ca94a71f | 161 | break; |
215f902e BS |
162 | } |
163 | ||
ca94a71f BS |
164 | ret = get_pll_limits(dev, pll, &limits); |
165 | if (ret) | |
166 | return ret; | |
167 | ||
168 | limits.refclk = read_clk(dev, clk - 0x10, true); | |
169 | if (!limits.refclk) | |
170 | return -EINVAL; | |
171 | ||
172 | ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); | |
173 | if (ret >= 0) { | |
174 | reg->clk = nv_rd32(dev, 0x4120 + (clk * 4)); | |
175 | reg->pll = (P << 16) | (N << 8) | M; | |
176 | } | |
177 | return ret; | |
215f902e BS |
178 | } |
179 | ||
cec2a270 BS |
180 | static void |
181 | prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg) | |
182 | { | |
183 | const u32 src0 = 0x004120 + (clk * 4); | |
184 | const u32 src1 = 0x004160 + (clk * 4); | |
185 | const u32 ctrl = pll + 0; | |
186 | const u32 coef = pll + 4; | |
cec2a270 BS |
187 | |
188 | if (!reg->clk && !reg->pll) { | |
189 | NV_DEBUG(dev, "no clock for %02x\n", clk); | |
190 | return; | |
191 | } | |
192 | ||
cec2a270 BS |
193 | if (reg->pll) { |
194 | nv_mask(dev, src0, 0x00000101, 0x00000101); | |
195 | nv_wr32(dev, coef, reg->pll); | |
074e747a BS |
196 | nv_mask(dev, ctrl, 0x00000015, 0x00000015); |
197 | nv_mask(dev, ctrl, 0x00000010, 0x00000000); | |
198 | nv_wait(dev, ctrl, 0x00020000, 0x00020000); | |
199 | nv_mask(dev, ctrl, 0x00000010, 0x00000010); | |
200 | nv_mask(dev, ctrl, 0x00000008, 0x00000000); | |
cec2a270 BS |
201 | nv_mask(dev, src1, 0x00000100, 0x00000000); |
202 | nv_mask(dev, src1, 0x00000001, 0x00000000); | |
203 | } else { | |
204 | nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk); | |
074e747a BS |
205 | nv_mask(dev, ctrl, 0x00000018, 0x00000018); |
206 | udelay(20); | |
cec2a270 BS |
207 | nv_mask(dev, ctrl, 0x00000001, 0x00000000); |
208 | nv_mask(dev, src0, 0x00000100, 0x00000000); | |
209 | nv_mask(dev, src0, 0x00000001, 0x00000000); | |
210 | } | |
211 | } | |
212 | ||
213 | static void | |
214 | prog_clk(struct drm_device *dev, int clk, struct creg *reg) | |
215 | { | |
216 | if (!reg->clk) { | |
217 | NV_DEBUG(dev, "no clock for %02x\n", clk); | |
218 | return; | |
219 | } | |
220 | ||
221 | nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk); | |
222 | } | |
223 | ||
fade7ad5 | 224 | int |
ca94a71f | 225 | nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) |
fade7ad5 | 226 | { |
cec2a270 BS |
227 | perflvl->core = read_pll(dev, 0x00, 0x4200); |
228 | perflvl->shader = read_pll(dev, 0x01, 0x4220); | |
229 | perflvl->memory = read_pll(dev, 0x02, 0x4000); | |
4fd2847e BS |
230 | perflvl->unka0 = read_clk(dev, 0x20, false); |
231 | perflvl->vdec = read_clk(dev, 0x21, false); | |
9698b9a6 BS |
232 | perflvl->daemon = read_clk(dev, 0x25, false); |
233 | perflvl->copy = perflvl->core; | |
ca94a71f | 234 | return 0; |
fade7ad5 BS |
235 | } |
236 | ||
ca94a71f | 237 | struct nva3_pm_state { |
65115bb0 | 238 | struct nouveau_pm_level *perflvl; |
001a3990 | 239 | |
ca94a71f BS |
240 | struct creg nclk; |
241 | struct creg sclk; | |
4fd2847e BS |
242 | struct creg vdec; |
243 | struct creg unka0; | |
001a3990 BS |
244 | |
245 | struct creg mclk; | |
246 | u8 *rammap; | |
247 | u8 rammap_ver; | |
248 | u8 rammap_len; | |
249 | u8 *ramcfg; | |
250 | u8 ramcfg_len; | |
19a1e477 BS |
251 | u32 r004018; |
252 | u32 r100760; | |
ca94a71f BS |
253 | }; |
254 | ||
fade7ad5 | 255 | void * |
ca94a71f | 256 | nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) |
fade7ad5 | 257 | { |
ca94a71f | 258 | struct nva3_pm_state *info; |
001a3990 | 259 | u8 ramcfg_cnt; |
ca94a71f | 260 | int ret; |
fade7ad5 | 261 | |
ca94a71f BS |
262 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
263 | if (!info) | |
264 | return ERR_PTR(-ENOMEM); | |
265 | ||
cec2a270 | 266 | ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk); |
dac55b58 | 267 | if (ret < 0) |
ca94a71f | 268 | goto out; |
dac55b58 | 269 | |
cec2a270 | 270 | ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk); |
ca94a71f BS |
271 | if (ret < 0) |
272 | goto out; | |
dac55b58 | 273 | |
cec2a270 | 274 | ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk); |
ca94a71f BS |
275 | if (ret < 0) |
276 | goto out; | |
dac55b58 | 277 | |
cec2a270 | 278 | ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0); |
4fd2847e BS |
279 | if (ret < 0) |
280 | goto out; | |
281 | ||
cec2a270 | 282 | ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec); |
4fd2847e BS |
283 | if (ret < 0) |
284 | goto out; | |
285 | ||
001a3990 BS |
286 | info->rammap = nouveau_perf_rammap(dev, perflvl->memory, |
287 | &info->rammap_ver, | |
288 | &info->rammap_len, | |
289 | &ramcfg_cnt, &info->ramcfg_len); | |
290 | if (info->rammap_ver != 0x10 || info->rammap_len < 5) | |
291 | info->rammap = NULL; | |
292 | ||
293 | info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory, | |
294 | &info->rammap_ver, | |
295 | &info->ramcfg_len); | |
296 | if (info->rammap_ver != 0x10) | |
297 | info->ramcfg = NULL; | |
298 | ||
65115bb0 | 299 | info->perflvl = perflvl; |
ca94a71f BS |
300 | out: |
301 | if (ret < 0) { | |
302 | kfree(info); | |
303 | info = ERR_PTR(ret); | |
fade7ad5 | 304 | } |
ca94a71f BS |
305 | return info; |
306 | } | |
fade7ad5 | 307 | |
d0f67a48 BS |
308 | static bool |
309 | nva3_pm_grcp_idle(void *data) | |
310 | { | |
311 | struct drm_device *dev = data; | |
312 | ||
313 | if (!(nv_rd32(dev, 0x400304) & 0x00000001)) | |
314 | return true; | |
315 | if (nv_rd32(dev, 0x400308) == 0x0050001c) | |
316 | return true; | |
317 | return false; | |
318 | } | |
319 | ||
65115bb0 BS |
320 | static void |
321 | mclk_precharge(struct nouveau_mem_exec_func *exec) | |
322 | { | |
323 | nv_wr32(exec->dev, 0x1002d4, 0x00000001); | |
324 | } | |
325 | ||
326 | static void | |
327 | mclk_refresh(struct nouveau_mem_exec_func *exec) | |
328 | { | |
329 | nv_wr32(exec->dev, 0x1002d0, 0x00000001); | |
330 | } | |
331 | ||
332 | static void | |
333 | mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) | |
334 | { | |
335 | nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000); | |
336 | } | |
337 | ||
338 | static void | |
339 | mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) | |
340 | { | |
341 | nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000); | |
342 | } | |
343 | ||
344 | static void | |
345 | mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) | |
346 | { | |
78c20186 | 347 | volatile u32 post = nv_rd32(exec->dev, 0); (void)post; |
65115bb0 BS |
348 | udelay((nsec + 500) / 1000); |
349 | } | |
350 | ||
351 | static u32 | |
352 | mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) | |
353 | { | |
354 | if (mr <= 1) | |
355 | return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4)); | |
356 | if (mr <= 3) | |
357 | return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4)); | |
358 | return 0; | |
359 | } | |
360 | ||
361 | static void | |
362 | mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) | |
363 | { | |
364 | struct drm_nouveau_private *dev_priv = exec->dev->dev_private; | |
365 | ||
366 | if (mr <= 1) { | |
367 | if (dev_priv->vram_rank_B) | |
368 | nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data); | |
369 | nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data); | |
370 | } else | |
371 | if (mr <= 3) { | |
372 | if (dev_priv->vram_rank_B) | |
373 | nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data); | |
374 | nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data); | |
375 | } | |
376 | } | |
377 | ||
378 | static void | |
379 | mclk_clock_set(struct nouveau_mem_exec_func *exec) | |
380 | { | |
65115bb0 | 381 | struct drm_device *dev = exec->dev; |
27740383 | 382 | struct nva3_pm_state *info = exec->priv; |
5f54d29e | 383 | u32 ctrl; |
65115bb0 | 384 | |
5f54d29e BS |
385 | ctrl = nv_rd32(dev, 0x004000); |
386 | if (!(ctrl & 0x00000008) && info->mclk.pll) { | |
387 | nv_wr32(dev, 0x004000, (ctrl |= 0x00000008)); | |
388 | nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000); | |
19a1e477 | 389 | nv_wr32(dev, 0x004018, 0x00001000); |
5f54d29e BS |
390 | nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001)); |
391 | nv_wr32(dev, 0x004004, info->mclk.pll); | |
392 | nv_wr32(dev, 0x004000, (ctrl |= 0x00000001)); | |
393 | udelay(64); | |
19a1e477 | 394 | nv_wr32(dev, 0x004018, 0x00005000 | info->r004018); |
5f54d29e BS |
395 | udelay(20); |
396 | } else | |
4719b55b BS |
397 | if (!info->mclk.pll) { |
398 | nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk); | |
5f54d29e | 399 | nv_wr32(dev, 0x004000, (ctrl |= 0x00000008)); |
4719b55b | 400 | nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000); |
19a1e477 | 401 | nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018); |
4719b55b | 402 | } |
27740383 | 403 | |
001a3990 BS |
404 | if (info->rammap) { |
405 | if (info->ramcfg && (info->rammap[4] & 0x08)) { | |
406 | u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) | | |
407 | info->ramcfg[5]; | |
408 | u32 unk5a4 = ROM16(info->ramcfg[7]); | |
409 | u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 | | |
410 | (info->ramcfg[3] & 0x0f) << 16 | | |
411 | (info->ramcfg[9] & 0x0f) | | |
27740383 BS |
412 | 0x80000000; |
413 | nv_wr32(dev, 0x1005a0, unk5a0); | |
414 | nv_wr32(dev, 0x1005a4, unk5a4); | |
415 | nv_wr32(dev, 0x10f804, unk804); | |
416 | nv_mask(dev, 0x10053c, 0x00001000, 0x00000000); | |
417 | } else { | |
418 | nv_mask(dev, 0x10053c, 0x00001000, 0x00001000); | |
419 | nv_mask(dev, 0x10f804, 0x80000000, 0x00000000); | |
19a1e477 BS |
420 | nv_mask(dev, 0x100760, 0x22222222, info->r100760); |
421 | nv_mask(dev, 0x1007a0, 0x22222222, info->r100760); | |
422 | nv_mask(dev, 0x1007e0, 0x22222222, info->r100760); | |
27740383 BS |
423 | } |
424 | } | |
4719b55b BS |
425 | |
426 | if (info->mclk.pll) { | |
427 | nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000); | |
5f54d29e | 428 | nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008)); |
4719b55b | 429 | } |
65115bb0 BS |
430 | } |
431 | ||
432 | static void | |
433 | mclk_timing_set(struct nouveau_mem_exec_func *exec) | |
434 | { | |
30e53390 | 435 | struct drm_device *dev = exec->dev; |
65115bb0 BS |
436 | struct nva3_pm_state *info = exec->priv; |
437 | struct nouveau_pm_level *perflvl = info->perflvl; | |
438 | int i; | |
439 | ||
440 | for (i = 0; i < 9; i++) | |
30e53390 BS |
441 | nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]); |
442 | ||
001a3990 BS |
443 | if (info->ramcfg) { |
444 | u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000; | |
445 | nv_mask(dev, 0x100200, 0x00001000, data); | |
446 | } | |
447 | ||
448 | if (info->ramcfg) { | |
30e53390 BS |
449 | u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010; |
450 | u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100; | |
451 | u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100; | |
001a3990 | 452 | if ( (info->ramcfg[2] & 0x20)) |
30e53390 | 453 | unk714 |= 0xf0000000; |
001a3990 | 454 | if (!(info->ramcfg[2] & 0x04)) |
30e53390 BS |
455 | unk714 |= 0x00000010; |
456 | nv_wr32(dev, 0x100714, unk714); | |
457 | ||
001a3990 | 458 | if (info->ramcfg[2] & 0x01) |
30e53390 BS |
459 | unk71c |= 0x00000100; |
460 | nv_wr32(dev, 0x10071c, unk71c); | |
461 | ||
001a3990 | 462 | if (info->ramcfg[2] & 0x02) |
30e53390 BS |
463 | unk718 |= 0x00000100; |
464 | nv_wr32(dev, 0x100718, unk718); | |
2b20fd0a BS |
465 | |
466 | if (info->ramcfg[2] & 0x10) | |
467 | nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/ | |
30e53390 | 468 | } |
65115bb0 BS |
469 | } |
470 | ||
471 | static void | |
472 | prog_mem(struct drm_device *dev, struct nva3_pm_state *info) | |
473 | { | |
474 | struct nouveau_mem_exec_func exec = { | |
475 | .dev = dev, | |
476 | .precharge = mclk_precharge, | |
477 | .refresh = mclk_refresh, | |
478 | .refresh_auto = mclk_refresh_auto, | |
479 | .refresh_self = mclk_refresh_self, | |
480 | .wait = mclk_wait, | |
481 | .mrg = mclk_mrg, | |
482 | .mrs = mclk_mrs, | |
483 | .clock_set = mclk_clock_set, | |
484 | .timing_set = mclk_timing_set, | |
485 | .priv = info | |
486 | }; | |
4719b55b BS |
487 | u32 ctrl; |
488 | ||
19a1e477 BS |
489 | /* XXX: where the fuck does 750MHz come from? */ |
490 | if (info->perflvl->memory <= 750000) { | |
491 | info->r004018 = 0x10000000; | |
492 | info->r100760 = 0x22222222; | |
493 | } | |
494 | ||
4719b55b BS |
495 | ctrl = nv_rd32(dev, 0x004000); |
496 | if (ctrl & 0x00000008) { | |
497 | if (info->mclk.pll) { | |
498 | nv_mask(dev, 0x004128, 0x00000101, 0x00000101); | |
499 | nv_wr32(dev, 0x004004, info->mclk.pll); | |
500 | nv_wr32(dev, 0x004000, (ctrl |= 0x00000001)); | |
501 | nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef)); | |
502 | nv_wait(dev, 0x004000, 0x00020000, 0x00020000); | |
503 | nv_wr32(dev, 0x004000, (ctrl |= 0x00000010)); | |
19a1e477 | 504 | nv_wr32(dev, 0x004018, 0x00005000 | info->r004018); |
4719b55b BS |
505 | nv_wr32(dev, 0x004000, (ctrl |= 0x00000004)); |
506 | } | |
507 | } else { | |
5f54d29e BS |
508 | u32 ssel = 0x00000101; |
509 | if (info->mclk.clk) | |
510 | ssel |= info->mclk.clk; | |
511 | else | |
512 | ssel |= 0x00080000; /* 324MHz, shouldn't matter... */ | |
513 | nv_mask(dev, 0x004168, 0x003f3141, ctrl); | |
4719b55b | 514 | } |
65115bb0 | 515 | |
2b20fd0a BS |
516 | if (info->ramcfg) { |
517 | if (info->ramcfg[2] & 0x10) { | |
518 | nv_mask(dev, 0x111104, 0x00000600, 0x00000000); | |
519 | } else { | |
520 | nv_mask(dev, 0x111100, 0x40000000, 0x40000000); | |
521 | nv_mask(dev, 0x111104, 0x00000180, 0x00000000); | |
522 | } | |
523 | } | |
001a3990 BS |
524 | if (info->rammap && !(info->rammap[4] & 0x02)) |
525 | nv_mask(dev, 0x100200, 0x00000800, 0x00000000); | |
65115bb0 | 526 | nv_wr32(dev, 0x611200, 0x00003300); |
2b20fd0a BS |
527 | if (!(info->ramcfg[2] & 0x10)) |
528 | nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/ | |
001a3990 | 529 | |
65115bb0 | 530 | nouveau_mem_exec(&exec, info->perflvl); |
001a3990 | 531 | |
65115bb0 | 532 | nv_wr32(dev, 0x611200, 0x00003330); |
001a3990 BS |
533 | if (info->rammap && (info->rammap[4] & 0x02)) |
534 | nv_mask(dev, 0x100200, 0x00000800, 0x00000800); | |
2b20fd0a BS |
535 | if (info->ramcfg) { |
536 | if (info->ramcfg[2] & 0x10) { | |
537 | nv_mask(dev, 0x111104, 0x00000180, 0x00000180); | |
538 | nv_mask(dev, 0x111100, 0x40000000, 0x00000000); | |
539 | } else { | |
540 | nv_mask(dev, 0x111104, 0x00000600, 0x00000600); | |
541 | } | |
542 | } | |
4719b55b BS |
543 | |
544 | if (info->mclk.pll) { | |
545 | nv_mask(dev, 0x004168, 0x00000001, 0x00000000); | |
546 | nv_mask(dev, 0x004168, 0x00000100, 0x00000000); | |
547 | } else { | |
548 | nv_mask(dev, 0x004000, 0x00000001, 0x00000000); | |
549 | nv_mask(dev, 0x004128, 0x00000001, 0x00000000); | |
550 | nv_mask(dev, 0x004128, 0x00000100, 0x00000000); | |
551 | } | |
65115bb0 BS |
552 | } |
553 | ||
dd1da8de | 554 | int |
ca94a71f | 555 | nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) |
fade7ad5 | 556 | { |
d0f67a48 | 557 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
ca94a71f | 558 | struct nva3_pm_state *info = pre_state; |
d0f67a48 | 559 | unsigned long flags; |
dd1da8de | 560 | int ret = -EAGAIN; |
d0f67a48 BS |
561 | |
562 | /* prevent any new grctx switches from starting */ | |
563 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | |
564 | nv_wr32(dev, 0x400324, 0x00000000); | |
565 | nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */ | |
566 | /* wait for any pending grctx switches to complete */ | |
567 | if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) { | |
568 | NV_ERROR(dev, "pm: ctxprog didn't go idle\n"); | |
569 | goto cleanup; | |
570 | } | |
571 | /* freeze PFIFO */ | |
572 | nv_mask(dev, 0x002504, 0x00000001, 0x00000001); | |
573 | if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) { | |
574 | NV_ERROR(dev, "pm: fifo didn't go idle\n"); | |
575 | goto cleanup; | |
576 | } | |
ca94a71f | 577 | |
cec2a270 BS |
578 | prog_pll(dev, 0x00, 0x004200, &info->nclk); |
579 | prog_pll(dev, 0x01, 0x004220, &info->sclk); | |
4fd2847e BS |
580 | prog_clk(dev, 0x20, &info->unka0); |
581 | prog_clk(dev, 0x21, &info->vdec); | |
ca94a71f | 582 | |
65115bb0 BS |
583 | if (info->mclk.clk || info->mclk.pll) |
584 | prog_mem(dev, info); | |
ca94a71f | 585 | |
dd1da8de MP |
586 | ret = 0; |
587 | ||
d0f67a48 BS |
588 | cleanup: |
589 | /* unfreeze PFIFO */ | |
590 | nv_mask(dev, 0x002504, 0x00000001, 0x00000000); | |
591 | /* restore ctxprog to normal */ | |
592 | nv_wr32(dev, 0x400324, 0x00000000); | |
593 | nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */ | |
594 | /* unblock it if necessary */ | |
595 | if (nv_rd32(dev, 0x400308) == 0x0050001c) | |
596 | nv_mask(dev, 0x400824, 0x10000000, 0x10000000); | |
597 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | |
ca94a71f | 598 | kfree(info); |
dd1da8de | 599 | return ret; |
fade7ad5 | 600 | } |