Merge branch 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux into drm...
[linux-2.6-block.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
3bf599e8 101 "HAWAII",
b0a9f22a 102 "MULLINS",
1b5331d9
JG
103 "LAST",
104};
105
4807c5a8
AD
106#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
107#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
108
109struct radeon_px_quirk {
110 u32 chip_vendor;
111 u32 chip_device;
112 u32 subsys_vendor;
113 u32 subsys_device;
114 u32 px_quirk_flags;
115};
116
117static struct radeon_px_quirk radeon_px_quirk_list[] = {
118 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
119 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
120 */
121 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
122 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
123 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
124 */
125 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
126 /* macbook pro 8.2 */
127 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
128 { 0, 0, 0, 0, 0 },
129};
130
90c4cde9
AD
131bool radeon_is_px(struct drm_device *dev)
132{
133 struct radeon_device *rdev = dev->dev_private;
134
135 if (rdev->flags & RADEON_IS_PX)
136 return true;
137 return false;
138}
10ebc0bc 139
4807c5a8
AD
140static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
141{
142 struct radeon_px_quirk *p = radeon_px_quirk_list;
143
144 /* Apply PX quirks */
145 while (p && p->chip_device != 0) {
146 if (rdev->pdev->vendor == p->chip_vendor &&
147 rdev->pdev->device == p->chip_device &&
148 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
149 rdev->pdev->subsystem_device == p->subsys_device) {
150 rdev->px_quirk_flags = p->px_quirk_flags;
151 break;
152 }
153 ++p;
154 }
155
156 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
157 rdev->flags &= ~RADEON_IS_PX;
158}
159
2e1b65f9
AD
160/**
161 * radeon_program_register_sequence - program an array of registers.
162 *
163 * @rdev: radeon_device pointer
164 * @registers: pointer to the register array
165 * @array_size: size of the register array
166 *
167 * Programs an array or registers with and and or masks.
168 * This is a helper for setting golden registers.
169 */
170void radeon_program_register_sequence(struct radeon_device *rdev,
171 const u32 *registers,
172 const u32 array_size)
173{
174 u32 tmp, reg, and_mask, or_mask;
175 int i;
176
177 if (array_size % 3)
178 return;
179
180 for (i = 0; i < array_size; i +=3) {
181 reg = registers[i + 0];
182 and_mask = registers[i + 1];
183 or_mask = registers[i + 2];
184
185 if (and_mask == 0xffffffff) {
186 tmp = or_mask;
187 } else {
188 tmp = RREG32(reg);
189 tmp &= ~and_mask;
190 tmp |= or_mask;
191 }
192 WREG32(reg, tmp);
193 }
194}
195
1a0041b8
AD
196void radeon_pci_config_reset(struct radeon_device *rdev)
197{
198 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
199}
200
0c195119
AD
201/**
202 * radeon_surface_init - Clear GPU surface registers.
203 *
204 * @rdev: radeon_device pointer
205 *
206 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 207 */
3ce0a23d 208void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
209{
210 /* FIXME: check this out */
211 if (rdev->family < CHIP_R600) {
212 int i;
213
550e2d92
DA
214 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
215 if (rdev->surface_regs[i].bo)
216 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
217 else
218 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 219 }
e024e110
DA
220 /* enable surfaces */
221 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
222 }
223}
224
771fe6b9
JG
225/*
226 * GPU scratch registers helpers function.
227 */
0c195119
AD
228/**
229 * radeon_scratch_init - Init scratch register driver information.
230 *
231 * @rdev: radeon_device pointer
232 *
233 * Init CP scratch register driver information (r1xx-r5xx)
234 */
3ce0a23d 235void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
236{
237 int i;
238
239 /* FIXME: check this out */
240 if (rdev->family < CHIP_R300) {
241 rdev->scratch.num_reg = 5;
242 } else {
243 rdev->scratch.num_reg = 7;
244 }
724c80e1 245 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
246 for (i = 0; i < rdev->scratch.num_reg; i++) {
247 rdev->scratch.free[i] = true;
724c80e1 248 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
249 }
250}
251
0c195119
AD
252/**
253 * radeon_scratch_get - Allocate a scratch register
254 *
255 * @rdev: radeon_device pointer
256 * @reg: scratch register mmio offset
257 *
258 * Allocate a CP scratch register for use by the driver (all asics).
259 * Returns 0 on success or -EINVAL on failure.
260 */
771fe6b9
JG
261int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
262{
263 int i;
264
265 for (i = 0; i < rdev->scratch.num_reg; i++) {
266 if (rdev->scratch.free[i]) {
267 rdev->scratch.free[i] = false;
268 *reg = rdev->scratch.reg[i];
269 return 0;
270 }
271 }
272 return -EINVAL;
273}
274
0c195119
AD
275/**
276 * radeon_scratch_free - Free a scratch register
277 *
278 * @rdev: radeon_device pointer
279 * @reg: scratch register mmio offset
280 *
281 * Free a CP scratch register allocated for use by the driver (all asics)
282 */
771fe6b9
JG
283void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
284{
285 int i;
286
287 for (i = 0; i < rdev->scratch.num_reg; i++) {
288 if (rdev->scratch.reg[i] == reg) {
289 rdev->scratch.free[i] = true;
290 return;
291 }
292 }
293}
294
75efdee1
AD
295/*
296 * GPU doorbell aperture helpers function.
297 */
298/**
299 * radeon_doorbell_init - Init doorbell driver information.
300 *
301 * @rdev: radeon_device pointer
302 *
303 * Init doorbell driver information (CIK)
304 * Returns 0 on success, error on failure.
305 */
28f5a6cd 306static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 307{
75efdee1
AD
308 /* doorbell bar mapping */
309 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
310 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
311
d5754ab8
AL
312 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
313 if (rdev->doorbell.num_doorbells == 0)
314 return -EINVAL;
75efdee1 315
d5754ab8 316 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
317 if (rdev->doorbell.ptr == NULL) {
318 return -ENOMEM;
319 }
320 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
321 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
322
d5754ab8 323 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 324
75efdee1
AD
325 return 0;
326}
327
328/**
329 * radeon_doorbell_fini - Tear down doorbell driver information.
330 *
331 * @rdev: radeon_device pointer
332 *
333 * Tear down doorbell driver information (CIK)
334 */
28f5a6cd 335static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
336{
337 iounmap(rdev->doorbell.ptr);
338 rdev->doorbell.ptr = NULL;
339}
340
341/**
d5754ab8 342 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
343 *
344 * @rdev: radeon_device pointer
d5754ab8 345 * @doorbell: doorbell index
75efdee1 346 *
d5754ab8 347 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
348 * Returns 0 on success or -EINVAL on failure.
349 */
350int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
351{
d5754ab8
AL
352 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
353 if (offset < rdev->doorbell.num_doorbells) {
354 __set_bit(offset, rdev->doorbell.used);
355 *doorbell = offset;
356 return 0;
357 } else {
358 return -EINVAL;
75efdee1 359 }
75efdee1
AD
360}
361
362/**
d5754ab8 363 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
364 *
365 * @rdev: radeon_device pointer
d5754ab8 366 * @doorbell: doorbell index
75efdee1 367 *
d5754ab8 368 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
369 */
370void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
371{
d5754ab8
AL
372 if (doorbell < rdev->doorbell.num_doorbells)
373 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
374}
375
0c195119
AD
376/*
377 * radeon_wb_*()
378 * Writeback is the the method by which the the GPU updates special pages
379 * in memory with the status of certain GPU events (fences, ring pointers,
380 * etc.).
381 */
382
383/**
384 * radeon_wb_disable - Disable Writeback
385 *
386 * @rdev: radeon_device pointer
387 *
388 * Disables Writeback (all asics). Used for suspend.
389 */
724c80e1
AD
390void radeon_wb_disable(struct radeon_device *rdev)
391{
724c80e1
AD
392 rdev->wb.enabled = false;
393}
394
0c195119
AD
395/**
396 * radeon_wb_fini - Disable Writeback and free memory
397 *
398 * @rdev: radeon_device pointer
399 *
400 * Disables Writeback and frees the Writeback memory (all asics).
401 * Used at driver shutdown.
402 */
724c80e1
AD
403void radeon_wb_fini(struct radeon_device *rdev)
404{
405 radeon_wb_disable(rdev);
406 if (rdev->wb.wb_obj) {
089920f2
JG
407 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
408 radeon_bo_kunmap(rdev->wb.wb_obj);
409 radeon_bo_unpin(rdev->wb.wb_obj);
410 radeon_bo_unreserve(rdev->wb.wb_obj);
411 }
724c80e1
AD
412 radeon_bo_unref(&rdev->wb.wb_obj);
413 rdev->wb.wb = NULL;
414 rdev->wb.wb_obj = NULL;
415 }
416}
417
0c195119
AD
418/**
419 * radeon_wb_init- Init Writeback driver info and allocate memory
420 *
421 * @rdev: radeon_device pointer
422 *
423 * Disables Writeback and frees the Writeback memory (all asics).
424 * Used at driver startup.
425 * Returns 0 on success or an -error on failure.
426 */
724c80e1
AD
427int radeon_wb_init(struct radeon_device *rdev)
428{
429 int r;
430
431 if (rdev->wb.wb_obj == NULL) {
441921d5 432 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
02376d82
MD
433 RADEON_GEM_DOMAIN_GTT, 0, NULL,
434 &rdev->wb.wb_obj);
724c80e1
AD
435 if (r) {
436 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
437 return r;
438 }
089920f2
JG
439 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
440 if (unlikely(r != 0)) {
441 radeon_wb_fini(rdev);
442 return r;
443 }
444 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
445 &rdev->wb.gpu_addr);
446 if (r) {
447 radeon_bo_unreserve(rdev->wb.wb_obj);
448 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
449 radeon_wb_fini(rdev);
450 return r;
451 }
452 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 453 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
454 if (r) {
455 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
456 radeon_wb_fini(rdev);
457 return r;
458 }
724c80e1
AD
459 }
460
e6ba7599
AD
461 /* clear wb memory */
462 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
463 /* disable event_write fences */
464 rdev->wb.use_event = false;
724c80e1 465 /* disabled via module param */
3b7a2b24 466 if (radeon_no_wb == 1) {
724c80e1 467 rdev->wb.enabled = false;
3b7a2b24 468 } else {
724c80e1 469 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
470 /* often unreliable on AGP */
471 rdev->wb.enabled = false;
472 } else if (rdev->family < CHIP_R300) {
473 /* often unreliable on pre-r300 */
724c80e1 474 rdev->wb.enabled = false;
d0f8a854 475 } else {
724c80e1 476 rdev->wb.enabled = true;
d0f8a854 477 /* event_write fences are only available on r600+ */
3b7a2b24 478 if (rdev->family >= CHIP_R600) {
d0f8a854 479 rdev->wb.use_event = true;
3b7a2b24 480 }
d0f8a854 481 }
724c80e1 482 }
c994ead6
AD
483 /* always use writeback/events on NI, APUs */
484 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
485 rdev->wb.enabled = true;
486 rdev->wb.use_event = true;
487 }
724c80e1
AD
488
489 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
490
491 return 0;
492}
493
d594e46a
JG
494/**
495 * radeon_vram_location - try to find VRAM location
496 * @rdev: radeon device structure holding all necessary informations
497 * @mc: memory controller structure holding memory informations
498 * @base: base address at which to put VRAM
499 *
500 * Function will place try to place VRAM at base address provided
501 * as parameter (which is so far either PCI aperture address or
502 * for IGP TOM base address).
503 *
504 * If there is not enough space to fit the unvisible VRAM in the 32bits
505 * address space then we limit the VRAM size to the aperture.
506 *
507 * If we are using AGP and if the AGP aperture doesn't allow us to have
508 * room for all the VRAM than we restrict the VRAM to the PCI aperture
509 * size and print a warning.
510 *
511 * This function will never fails, worst case are limiting VRAM.
512 *
513 * Note: GTT start, end, size should be initialized before calling this
514 * function on AGP platform.
515 *
25985edc 516 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
517 * this shouldn't be a problem as we are using the PCI aperture as a reference.
518 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
519 * not IGP.
520 *
521 * Note: we use mc_vram_size as on some board we need to program the mc to
522 * cover the whole aperture even if VRAM size is inferior to aperture size
523 * Novell bug 204882 + along with lots of ubuntu ones
524 *
525 * Note: when limiting vram it's safe to overwritte real_vram_size because
526 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
527 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
528 * ones)
529 *
530 * Note: IGP TOM addr should be the same as the aperture addr, we don't
531 * explicitly check for that thought.
532 *
533 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 534 */
d594e46a 535void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 536{
1bcb04f7
CK
537 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
538
d594e46a 539 mc->vram_start = base;
9ed8b1f9 540 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
541 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
542 mc->real_vram_size = mc->aper_size;
543 mc->mc_vram_size = mc->aper_size;
544 }
545 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 546 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
547 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
548 mc->real_vram_size = mc->aper_size;
549 mc->mc_vram_size = mc->aper_size;
550 }
551 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
552 if (limit && limit < mc->real_vram_size)
553 mc->real_vram_size = limit;
dd7cc55a 554 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
555 mc->mc_vram_size >> 20, mc->vram_start,
556 mc->vram_end, mc->real_vram_size >> 20);
557}
771fe6b9 558
d594e46a
JG
559/**
560 * radeon_gtt_location - try to find GTT location
561 * @rdev: radeon device structure holding all necessary informations
562 * @mc: memory controller structure holding memory informations
563 *
564 * Function will place try to place GTT before or after VRAM.
565 *
566 * If GTT size is bigger than space left then we ajust GTT size.
567 * Thus function will never fails.
568 *
569 * FIXME: when reducing GTT size align new size on power of 2.
570 */
571void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
572{
573 u64 size_af, size_bf;
574
9ed8b1f9 575 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 576 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
577 if (size_bf > size_af) {
578 if (mc->gtt_size > size_bf) {
579 dev_warn(rdev->dev, "limiting GTT\n");
580 mc->gtt_size = size_bf;
771fe6b9 581 }
8d369bb1 582 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 583 } else {
d594e46a
JG
584 if (mc->gtt_size > size_af) {
585 dev_warn(rdev->dev, "limiting GTT\n");
586 mc->gtt_size = size_af;
587 }
8d369bb1 588 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 589 }
d594e46a 590 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 591 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 592 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
593}
594
771fe6b9
JG
595/*
596 * GPU helpers function.
597 */
0c195119
AD
598/**
599 * radeon_card_posted - check if the hw has already been initialized
600 *
601 * @rdev: radeon_device pointer
602 *
603 * Check if the asic has been initialized (all asics).
604 * Used at driver startup.
605 * Returns true if initialized or false if not.
606 */
9f022ddf 607bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
608{
609 uint32_t reg;
610
50a583f6 611 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 612 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
613 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
614 (rdev->family < CHIP_R600))
bcc65fd8
MG
615 return false;
616
2cf3a4fc
AD
617 if (ASIC_IS_NODCE(rdev))
618 goto check_memsize;
619
771fe6b9 620 /* first check CRTCs */
09fb8bd1 621 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
622 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
623 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
624 if (rdev->num_crtc >= 4) {
625 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
626 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
627 }
628 if (rdev->num_crtc >= 6) {
629 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
630 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
631 }
bcc1c2a1
AD
632 if (reg & EVERGREEN_CRTC_MASTER_EN)
633 return true;
634 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
635 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
636 RREG32(AVIVO_D2CRTC_CONTROL);
637 if (reg & AVIVO_CRTC_EN) {
638 return true;
639 }
640 } else {
641 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
642 RREG32(RADEON_CRTC2_GEN_CNTL);
643 if (reg & RADEON_CRTC_EN) {
644 return true;
645 }
646 }
647
2cf3a4fc 648check_memsize:
771fe6b9
JG
649 /* then check MEM_SIZE, in case the crtcs are off */
650 if (rdev->family >= CHIP_R600)
651 reg = RREG32(R600_CONFIG_MEMSIZE);
652 else
653 reg = RREG32(RADEON_CONFIG_MEMSIZE);
654
655 if (reg)
656 return true;
657
658 return false;
659
660}
661
0c195119
AD
662/**
663 * radeon_update_bandwidth_info - update display bandwidth params
664 *
665 * @rdev: radeon_device pointer
666 *
667 * Used when sclk/mclk are switched or display modes are set.
668 * params are used to calculate display watermarks (all asics)
669 */
f47299c5
AD
670void radeon_update_bandwidth_info(struct radeon_device *rdev)
671{
672 fixed20_12 a;
8807286e
AD
673 u32 sclk = rdev->pm.current_sclk;
674 u32 mclk = rdev->pm.current_mclk;
f47299c5 675
8807286e
AD
676 /* sclk/mclk in Mhz */
677 a.full = dfixed_const(100);
678 rdev->pm.sclk.full = dfixed_const(sclk);
679 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
680 rdev->pm.mclk.full = dfixed_const(mclk);
681 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 682
8807286e 683 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 684 a.full = dfixed_const(16);
f47299c5 685 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 686 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
687 }
688}
689
0c195119
AD
690/**
691 * radeon_boot_test_post_card - check and possibly initialize the hw
692 *
693 * @rdev: radeon_device pointer
694 *
695 * Check if the asic is initialized and if not, attempt to initialize
696 * it (all asics).
697 * Returns true if initialized or false if not.
698 */
72542d77
DA
699bool radeon_boot_test_post_card(struct radeon_device *rdev)
700{
701 if (radeon_card_posted(rdev))
702 return true;
703
704 if (rdev->bios) {
705 DRM_INFO("GPU not posted. posting now...\n");
706 if (rdev->is_atom_bios)
707 atom_asic_init(rdev->mode_info.atom_context);
708 else
709 radeon_combios_asic_init(rdev->ddev);
710 return true;
711 } else {
712 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
713 return false;
714 }
715}
716
0c195119
AD
717/**
718 * radeon_dummy_page_init - init dummy page used by the driver
719 *
720 * @rdev: radeon_device pointer
721 *
722 * Allocate the dummy page used by the driver (all asics).
723 * This dummy page is used by the driver as a filler for gart entries
724 * when pages are taken out of the GART
725 * Returns 0 on sucess, -ENOMEM on failure.
726 */
3ce0a23d
JG
727int radeon_dummy_page_init(struct radeon_device *rdev)
728{
82568565
DA
729 if (rdev->dummy_page.page)
730 return 0;
3ce0a23d
JG
731 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
732 if (rdev->dummy_page.page == NULL)
733 return -ENOMEM;
734 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
735 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
736 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
737 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
738 __free_page(rdev->dummy_page.page);
739 rdev->dummy_page.page = NULL;
740 return -ENOMEM;
741 }
742 return 0;
743}
744
0c195119
AD
745/**
746 * radeon_dummy_page_fini - free dummy page used by the driver
747 *
748 * @rdev: radeon_device pointer
749 *
750 * Frees the dummy page used by the driver (all asics).
751 */
3ce0a23d
JG
752void radeon_dummy_page_fini(struct radeon_device *rdev)
753{
754 if (rdev->dummy_page.page == NULL)
755 return;
756 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
757 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
758 __free_page(rdev->dummy_page.page);
759 rdev->dummy_page.page = NULL;
760}
761
771fe6b9 762
771fe6b9 763/* ATOM accessor methods */
0c195119
AD
764/*
765 * ATOM is an interpreted byte code stored in tables in the vbios. The
766 * driver registers callbacks to access registers and the interpreter
767 * in the driver parses the tables and executes then to program specific
768 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
769 * atombios.h, and atom.c
770 */
771
772/**
773 * cail_pll_read - read PLL register
774 *
775 * @info: atom card_info pointer
776 * @reg: PLL register offset
777 *
778 * Provides a PLL register accessor for the atom interpreter (r4xx+).
779 * Returns the value of the PLL register.
780 */
771fe6b9
JG
781static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
782{
783 struct radeon_device *rdev = info->dev->dev_private;
784 uint32_t r;
785
786 r = rdev->pll_rreg(rdev, reg);
787 return r;
788}
789
0c195119
AD
790/**
791 * cail_pll_write - write PLL register
792 *
793 * @info: atom card_info pointer
794 * @reg: PLL register offset
795 * @val: value to write to the pll register
796 *
797 * Provides a PLL register accessor for the atom interpreter (r4xx+).
798 */
771fe6b9
JG
799static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
800{
801 struct radeon_device *rdev = info->dev->dev_private;
802
803 rdev->pll_wreg(rdev, reg, val);
804}
805
0c195119
AD
806/**
807 * cail_mc_read - read MC (Memory Controller) register
808 *
809 * @info: atom card_info pointer
810 * @reg: MC register offset
811 *
812 * Provides an MC register accessor for the atom interpreter (r4xx+).
813 * Returns the value of the MC register.
814 */
771fe6b9
JG
815static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
816{
817 struct radeon_device *rdev = info->dev->dev_private;
818 uint32_t r;
819
820 r = rdev->mc_rreg(rdev, reg);
821 return r;
822}
823
0c195119
AD
824/**
825 * cail_mc_write - write MC (Memory Controller) register
826 *
827 * @info: atom card_info pointer
828 * @reg: MC register offset
829 * @val: value to write to the pll register
830 *
831 * Provides a MC register accessor for the atom interpreter (r4xx+).
832 */
771fe6b9
JG
833static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
834{
835 struct radeon_device *rdev = info->dev->dev_private;
836
837 rdev->mc_wreg(rdev, reg, val);
838}
839
0c195119
AD
840/**
841 * cail_reg_write - write MMIO register
842 *
843 * @info: atom card_info pointer
844 * @reg: MMIO register offset
845 * @val: value to write to the pll register
846 *
847 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
848 */
771fe6b9
JG
849static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
850{
851 struct radeon_device *rdev = info->dev->dev_private;
852
853 WREG32(reg*4, val);
854}
855
0c195119
AD
856/**
857 * cail_reg_read - read MMIO register
858 *
859 * @info: atom card_info pointer
860 * @reg: MMIO register offset
861 *
862 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
863 * Returns the value of the MMIO register.
864 */
771fe6b9
JG
865static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
866{
867 struct radeon_device *rdev = info->dev->dev_private;
868 uint32_t r;
869
870 r = RREG32(reg*4);
871 return r;
872}
873
0c195119
AD
874/**
875 * cail_ioreg_write - write IO register
876 *
877 * @info: atom card_info pointer
878 * @reg: IO register offset
879 * @val: value to write to the pll register
880 *
881 * Provides a IO register accessor for the atom interpreter (r4xx+).
882 */
351a52a2
AD
883static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
884{
885 struct radeon_device *rdev = info->dev->dev_private;
886
887 WREG32_IO(reg*4, val);
888}
889
0c195119
AD
890/**
891 * cail_ioreg_read - read IO register
892 *
893 * @info: atom card_info pointer
894 * @reg: IO register offset
895 *
896 * Provides an IO register accessor for the atom interpreter (r4xx+).
897 * Returns the value of the IO register.
898 */
351a52a2
AD
899static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
900{
901 struct radeon_device *rdev = info->dev->dev_private;
902 uint32_t r;
903
904 r = RREG32_IO(reg*4);
905 return r;
906}
907
0c195119
AD
908/**
909 * radeon_atombios_init - init the driver info and callbacks for atombios
910 *
911 * @rdev: radeon_device pointer
912 *
913 * Initializes the driver info and register access callbacks for the
914 * ATOM interpreter (r4xx+).
915 * Returns 0 on sucess, -ENOMEM on failure.
916 * Called at driver startup.
917 */
771fe6b9
JG
918int radeon_atombios_init(struct radeon_device *rdev)
919{
61c4b24b
MF
920 struct card_info *atom_card_info =
921 kzalloc(sizeof(struct card_info), GFP_KERNEL);
922
923 if (!atom_card_info)
924 return -ENOMEM;
925
926 rdev->mode_info.atom_card_info = atom_card_info;
927 atom_card_info->dev = rdev->ddev;
928 atom_card_info->reg_read = cail_reg_read;
929 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
930 /* needed for iio ops */
931 if (rdev->rio_mem) {
932 atom_card_info->ioreg_read = cail_ioreg_read;
933 atom_card_info->ioreg_write = cail_ioreg_write;
934 } else {
935 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
936 atom_card_info->ioreg_read = cail_reg_read;
937 atom_card_info->ioreg_write = cail_reg_write;
938 }
61c4b24b
MF
939 atom_card_info->mc_read = cail_mc_read;
940 atom_card_info->mc_write = cail_mc_write;
941 atom_card_info->pll_read = cail_pll_read;
942 atom_card_info->pll_write = cail_pll_write;
943
944 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
945 if (!rdev->mode_info.atom_context) {
946 radeon_atombios_fini(rdev);
947 return -ENOMEM;
948 }
949
c31ad97f 950 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 951 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 952 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
953 return 0;
954}
955
0c195119
AD
956/**
957 * radeon_atombios_fini - free the driver info and callbacks for atombios
958 *
959 * @rdev: radeon_device pointer
960 *
961 * Frees the driver info and register access callbacks for the ATOM
962 * interpreter (r4xx+).
963 * Called at driver shutdown.
964 */
771fe6b9
JG
965void radeon_atombios_fini(struct radeon_device *rdev)
966{
4a04a844
JG
967 if (rdev->mode_info.atom_context) {
968 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 969 }
0e34d094
TG
970 kfree(rdev->mode_info.atom_context);
971 rdev->mode_info.atom_context = NULL;
61c4b24b 972 kfree(rdev->mode_info.atom_card_info);
0e34d094 973 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
974}
975
0c195119
AD
976/* COMBIOS */
977/*
978 * COMBIOS is the bios format prior to ATOM. It provides
979 * command tables similar to ATOM, but doesn't have a unified
980 * parser. See radeon_combios.c
981 */
982
983/**
984 * radeon_combios_init - init the driver info for combios
985 *
986 * @rdev: radeon_device pointer
987 *
988 * Initializes the driver info for combios (r1xx-r3xx).
989 * Returns 0 on sucess.
990 * Called at driver startup.
991 */
771fe6b9
JG
992int radeon_combios_init(struct radeon_device *rdev)
993{
994 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
995 return 0;
996}
997
0c195119
AD
998/**
999 * radeon_combios_fini - free the driver info for combios
1000 *
1001 * @rdev: radeon_device pointer
1002 *
1003 * Frees the driver info for combios (r1xx-r3xx).
1004 * Called at driver shutdown.
1005 */
771fe6b9
JG
1006void radeon_combios_fini(struct radeon_device *rdev)
1007{
1008}
1009
0c195119
AD
1010/* if we get transitioned to only one device, take VGA back */
1011/**
1012 * radeon_vga_set_decode - enable/disable vga decode
1013 *
1014 * @cookie: radeon_device pointer
1015 * @state: enable/disable vga decode
1016 *
1017 * Enable/disable vga decode (all asics).
1018 * Returns VGA resource flags.
1019 */
28d52043
DA
1020static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1021{
1022 struct radeon_device *rdev = cookie;
28d52043
DA
1023 radeon_vga_set_state(rdev, state);
1024 if (state)
1025 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1026 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1027 else
1028 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1029}
c1176d6f 1030
1bcb04f7
CK
1031/**
1032 * radeon_check_pot_argument - check that argument is a power of two
1033 *
1034 * @arg: value to check
1035 *
1036 * Validates that a certain argument is a power of two (all asics).
1037 * Returns true if argument is valid.
1038 */
1039static bool radeon_check_pot_argument(int arg)
1040{
1041 return (arg & (arg - 1)) == 0;
1042}
1043
0c195119
AD
1044/**
1045 * radeon_check_arguments - validate module params
1046 *
1047 * @rdev: radeon_device pointer
1048 *
1049 * Validates certain module parameters and updates
1050 * the associated values used by the driver (all asics).
1051 */
1109ca09 1052static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1053{
1054 /* vramlimit must be a power of two */
1bcb04f7 1055 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1056 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1057 radeon_vram_limit);
1058 radeon_vram_limit = 0;
36421338 1059 }
1bcb04f7 1060
edcd26e8
AD
1061 if (radeon_gart_size == -1) {
1062 /* default to a larger gart size on newer asics */
1063 if (rdev->family >= CHIP_RV770)
1064 radeon_gart_size = 1024;
1065 else
1066 radeon_gart_size = 512;
1067 }
36421338 1068 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1069 if (radeon_gart_size < 32) {
edcd26e8 1070 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1071 radeon_gart_size);
edcd26e8
AD
1072 if (rdev->family >= CHIP_RV770)
1073 radeon_gart_size = 1024;
1074 else
1075 radeon_gart_size = 512;
1bcb04f7 1076 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1077 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1078 radeon_gart_size);
edcd26e8
AD
1079 if (rdev->family >= CHIP_RV770)
1080 radeon_gart_size = 1024;
1081 else
1082 radeon_gart_size = 512;
36421338 1083 }
1bcb04f7
CK
1084 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1085
36421338
JG
1086 /* AGP mode can only be -1, 1, 2, 4, 8 */
1087 switch (radeon_agpmode) {
1088 case -1:
1089 case 0:
1090 case 1:
1091 case 2:
1092 case 4:
1093 case 8:
1094 break;
1095 default:
1096 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1097 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1098 radeon_agpmode = 0;
1099 break;
1100 }
c1c44132
CK
1101
1102 if (!radeon_check_pot_argument(radeon_vm_size)) {
1103 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1104 radeon_vm_size);
20b2656d 1105 radeon_vm_size = 4;
c1c44132
CK
1106 }
1107
20b2656d
CK
1108 if (radeon_vm_size < 1) {
1109 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
c1c44132 1110 radeon_vm_size);
20b2656d 1111 radeon_vm_size = 4;
c1c44132
CK
1112 }
1113
1114 /*
1115 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1116 */
20b2656d
CK
1117 if (radeon_vm_size > 1024) {
1118 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
c1c44132 1119 radeon_vm_size);
20b2656d 1120 radeon_vm_size = 4;
c1c44132 1121 }
4510fb98
CK
1122
1123 /* defines number of bits in page table versus page directory,
1124 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1125 * page table and the remaining bits are in the page directory */
dfc230f9
CK
1126 if (radeon_vm_block_size == -1) {
1127
1128 /* Total bits covered by PD + PTs */
1129 unsigned bits = ilog2(radeon_vm_size) + 17;
1130
1131 /* Make sure the PD is 4K in size up to 8GB address space.
1132 Above that split equal between PD and PTs */
1133 if (radeon_vm_size <= 8)
1134 radeon_vm_block_size = bits - 9;
1135 else
1136 radeon_vm_block_size = (bits + 3) / 2;
1137
1138 } else if (radeon_vm_block_size < 9) {
20b2656d 1139 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
4510fb98
CK
1140 radeon_vm_block_size);
1141 radeon_vm_block_size = 9;
1142 }
1143
1144 if (radeon_vm_block_size > 24 ||
20b2656d
CK
1145 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1146 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
4510fb98
CK
1147 radeon_vm_block_size);
1148 radeon_vm_block_size = 9;
1149 }
36421338
JG
1150}
1151
0c195119
AD
1152/**
1153 * radeon_switcheroo_set_state - set switcheroo state
1154 *
1155 * @pdev: pci dev pointer
1156 * @state: vga switcheroo state
1157 *
1158 * Callback for the switcheroo driver. Suspends or resumes the
1159 * the asics before or after it is powered up using ACPI methods.
1160 */
6a9ee8af
DA
1161static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1162{
1163 struct drm_device *dev = pci_get_drvdata(pdev);
4807c5a8 1164 struct radeon_device *rdev = dev->dev_private;
10ebc0bc 1165
90c4cde9 1166 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1167 return;
1168
6a9ee8af 1169 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1170 unsigned d3_delay = dev->pdev->d3_delay;
1171
6a9ee8af
DA
1172 printk(KERN_INFO "radeon: switched on\n");
1173 /* don't suspend or resume card normally */
5bcf719b 1174 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e 1175
4807c5a8 1176 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
d1f9809e
ML
1177 dev->pdev->d3_delay = 20;
1178
10ebc0bc 1179 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1180
1181 dev->pdev->d3_delay = d3_delay;
1182
5bcf719b 1183 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1184 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1185 } else {
1186 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1187 drm_kms_helper_poll_disable(dev);
5bcf719b 1188 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
10ebc0bc 1189 radeon_suspend_kms(dev, true, true);
5bcf719b 1190 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1191 }
1192}
1193
0c195119
AD
1194/**
1195 * radeon_switcheroo_can_switch - see if switcheroo state can change
1196 *
1197 * @pdev: pci dev pointer
1198 *
1199 * Callback for the switcheroo driver. Check of the switcheroo
1200 * state can be changed.
1201 * Returns true if the state can be changed, false if not.
1202 */
6a9ee8af
DA
1203static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1204{
1205 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1206
fc8fd40e
DV
1207 /*
1208 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1209 * locking inversion with the driver load path. And the access here is
1210 * completely racy anyway. So don't bother with locking for now.
1211 */
1212 return dev->open_count == 0;
6a9ee8af
DA
1213}
1214
26ec685f
TI
1215static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1216 .set_gpu_state = radeon_switcheroo_set_state,
1217 .reprobe = NULL,
1218 .can_switch = radeon_switcheroo_can_switch,
1219};
6a9ee8af 1220
0c195119
AD
1221/**
1222 * radeon_device_init - initialize the driver
1223 *
1224 * @rdev: radeon_device pointer
1225 * @pdev: drm dev pointer
1226 * @pdev: pci dev pointer
1227 * @flags: driver flags
1228 *
1229 * Initializes the driver info and hw (all asics).
1230 * Returns 0 for success or an error on failure.
1231 * Called at driver startup.
1232 */
771fe6b9
JG
1233int radeon_device_init(struct radeon_device *rdev,
1234 struct drm_device *ddev,
1235 struct pci_dev *pdev,
1236 uint32_t flags)
1237{
351a52a2 1238 int r, i;
ad49f501 1239 int dma_bits;
10ebc0bc 1240 bool runtime = false;
771fe6b9 1241
771fe6b9 1242 rdev->shutdown = false;
9f022ddf 1243 rdev->dev = &pdev->dev;
771fe6b9
JG
1244 rdev->ddev = ddev;
1245 rdev->pdev = pdev;
1246 rdev->flags = flags;
1247 rdev->family = flags & RADEON_FAMILY_MASK;
1248 rdev->is_atom_bios = false;
1249 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1250 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1251 rdev->accel_working = false;
8b25ed34
AD
1252 /* set up ring ids */
1253 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1254 rdev->ring[i].idx = i;
1255 }
1b5331d9 1256
d522d9cc
TR
1257 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1258 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1259 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1260
771fe6b9
JG
1261 /* mutex initialization are all done here so we
1262 * can recall function without having locking issues */
d6999bc7 1263 mutex_init(&rdev->ring_lock);
40bacf16 1264 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1265 atomic_set(&rdev->ih.lock, 0);
4c788679 1266 mutex_init(&rdev->gem.mutex);
c913e23a 1267 mutex_init(&rdev->pm.mutex);
6759a0a7 1268 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1269 mutex_init(&rdev->srbm_mutex);
db7fce39 1270 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1271 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1272 init_waitqueue_head(&rdev->irq.vblank_queue);
341cb9e4
CK
1273 mutex_init(&rdev->mn_lock);
1274 hash_init(rdev->mn_hash);
1b9c3dd0
AD
1275 r = radeon_gem_init(rdev);
1276 if (r)
1277 return r;
529364e0 1278
c1c44132 1279 radeon_check_arguments(rdev);
23d4f1f2 1280 /* Adjust VM size here.
c1c44132 1281 * Max GPUVM size for cayman+ is 40 bits.
23d4f1f2 1282 */
20b2656d 1283 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
771fe6b9 1284
4aac0473
JG
1285 /* Set asic functions */
1286 r = radeon_asic_init(rdev);
36421338 1287 if (r)
4aac0473 1288 return r;
4aac0473 1289
f95df9ca
AD
1290 /* all of the newer IGP chips have an internal gart
1291 * However some rs4xx report as AGP, so remove that here.
1292 */
1293 if ((rdev->family >= CHIP_RS400) &&
1294 (rdev->flags & RADEON_IS_IGP)) {
1295 rdev->flags &= ~RADEON_IS_AGP;
1296 }
1297
30256a3f 1298 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1299 radeon_agp_disable(rdev);
771fe6b9
JG
1300 }
1301
9ed8b1f9
AD
1302 /* Set the internal MC address mask
1303 * This is the max address of the GPU's
1304 * internal address space.
1305 */
1306 if (rdev->family >= CHIP_CAYMAN)
1307 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1308 else if (rdev->family >= CHIP_CEDAR)
1309 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1310 else
1311 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1312
ad49f501
DA
1313 /* set DMA mask + need_dma32 flags.
1314 * PCIE - can handle 40-bits.
005a83f1 1315 * IGP - can handle 40-bits
ad49f501 1316 * AGP - generally dma32 is safest
005a83f1 1317 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1318 */
1319 rdev->need_dma32 = false;
1320 if (rdev->flags & RADEON_IS_AGP)
1321 rdev->need_dma32 = true;
005a83f1 1322 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1323 (rdev->family <= CHIP_RS740))
ad49f501
DA
1324 rdev->need_dma32 = true;
1325
1326 dma_bits = rdev->need_dma32 ? 32 : 40;
1327 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1328 if (r) {
62fff811 1329 rdev->need_dma32 = true;
c52494f6 1330 dma_bits = 32;
771fe6b9
JG
1331 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1332 }
c52494f6
KRW
1333 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1334 if (r) {
1335 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1336 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1337 }
771fe6b9
JG
1338
1339 /* Registers mapping */
1340 /* TODO: block userspace mapping of io register */
2c385151 1341 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1342 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1343 spin_lock_init(&rdev->pll_idx_lock);
1344 spin_lock_init(&rdev->mc_idx_lock);
1345 spin_lock_init(&rdev->pcie_idx_lock);
1346 spin_lock_init(&rdev->pciep_idx_lock);
1347 spin_lock_init(&rdev->pif_idx_lock);
1348 spin_lock_init(&rdev->cg_idx_lock);
1349 spin_lock_init(&rdev->uvd_idx_lock);
1350 spin_lock_init(&rdev->rcu_idx_lock);
1351 spin_lock_init(&rdev->didt_idx_lock);
1352 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1353 if (rdev->family >= CHIP_BONAIRE) {
1354 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1355 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1356 } else {
1357 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1358 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1359 }
771fe6b9
JG
1360 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1361 if (rdev->rmmio == NULL) {
1362 return -ENOMEM;
1363 }
1364 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1365 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1366
75efdee1
AD
1367 /* doorbell bar mapping */
1368 if (rdev->family >= CHIP_BONAIRE)
1369 radeon_doorbell_init(rdev);
1370
351a52a2
AD
1371 /* io port mapping */
1372 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1373 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1374 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1375 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1376 break;
1377 }
1378 }
1379 if (rdev->rio_mem == NULL)
1380 DRM_ERROR("Unable to find PCI I/O BAR\n");
1381
4807c5a8
AD
1382 if (rdev->flags & RADEON_IS_PX)
1383 radeon_device_handle_px_quirks(rdev);
1384
28d52043 1385 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1386 /* this will fail for cards that aren't VGA class devices, just
1387 * ignore it */
1388 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1389
90c4cde9 1390 if (rdev->flags & RADEON_IS_PX)
10ebc0bc
DA
1391 runtime = true;
1392 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1393 if (runtime)
1394 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1395
3ce0a23d 1396 r = radeon_init(rdev);
b574f251 1397 if (r)
3ce0a23d 1398 return r;
3ce0a23d 1399
04eb2206
CK
1400 r = radeon_ib_ring_tests(rdev);
1401 if (r)
1402 DRM_ERROR("ib ring test failed (%d).\n", r);
1403
409851f4
JG
1404 r = radeon_gem_debugfs_init(rdev);
1405 if (r) {
1406 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1407 }
1408
b574f251
JG
1409 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1410 /* Acceleration not working on AGP card try again
1411 * with fallback to PCI or PCIE GART
1412 */
a2d07b74 1413 radeon_asic_reset(rdev);
b574f251
JG
1414 radeon_fini(rdev);
1415 radeon_agp_disable(rdev);
1416 r = radeon_init(rdev);
4aac0473
JG
1417 if (r)
1418 return r;
771fe6b9 1419 }
6c7bccea 1420
60a7e396 1421 if ((radeon_testing & 1)) {
4a1132a0
AD
1422 if (rdev->accel_working)
1423 radeon_test_moves(rdev);
1424 else
1425 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1426 }
60a7e396 1427 if ((radeon_testing & 2)) {
4a1132a0
AD
1428 if (rdev->accel_working)
1429 radeon_test_syncing(rdev);
1430 else
1431 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1432 }
771fe6b9 1433 if (radeon_benchmarking) {
4a1132a0
AD
1434 if (rdev->accel_working)
1435 radeon_benchmark(rdev, radeon_benchmarking);
1436 else
1437 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1438 }
6cf8a3f5 1439 return 0;
771fe6b9
JG
1440}
1441
4d8bf9ae
CK
1442static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1443
0c195119
AD
1444/**
1445 * radeon_device_fini - tear down the driver
1446 *
1447 * @rdev: radeon_device pointer
1448 *
1449 * Tear down the driver info (all asics).
1450 * Called at driver shutdown.
1451 */
771fe6b9
JG
1452void radeon_device_fini(struct radeon_device *rdev)
1453{
771fe6b9
JG
1454 DRM_INFO("radeon: finishing device.\n");
1455 rdev->shutdown = true;
90aca4d2
JG
1456 /* evict vram memory */
1457 radeon_bo_evict_vram(rdev);
62a8ea3f 1458 radeon_fini(rdev);
6a9ee8af 1459 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1460 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1461 if (rdev->rio_mem)
1462 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1463 rdev->rio_mem = NULL;
771fe6b9
JG
1464 iounmap(rdev->rmmio);
1465 rdev->rmmio = NULL;
75efdee1
AD
1466 if (rdev->family >= CHIP_BONAIRE)
1467 radeon_doorbell_fini(rdev);
4d8bf9ae 1468 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1469}
1470
1471
1472/*
1473 * Suspend & resume.
1474 */
0c195119
AD
1475/**
1476 * radeon_suspend_kms - initiate device suspend
1477 *
1478 * @pdev: drm dev pointer
1479 * @state: suspend state
1480 *
1481 * Puts the hw in the suspend state (all asics).
1482 * Returns 0 for success or an error on failure.
1483 * Called at driver suspend.
1484 */
10ebc0bc 1485int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
771fe6b9 1486{
875c1866 1487 struct radeon_device *rdev;
771fe6b9 1488 struct drm_crtc *crtc;
d8dcaa1d 1489 struct drm_connector *connector;
7465280c 1490 int i, r;
5f8f635e 1491 bool force_completion = false;
771fe6b9 1492
875c1866 1493 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1494 return -ENODEV;
1495 }
7473e830 1496
875c1866
DJ
1497 rdev = dev->dev_private;
1498
5bcf719b 1499 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1500 return 0;
d8dcaa1d 1501
86698c20
SF
1502 drm_kms_helper_poll_disable(dev);
1503
d8dcaa1d
AD
1504 /* turn off display hw */
1505 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1506 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1507 }
1508
771fe6b9
JG
1509 /* unpin the front buffers */
1510 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f4510a27 1511 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1512 struct radeon_bo *robj;
771fe6b9
JG
1513
1514 if (rfb == NULL || rfb->obj == NULL) {
1515 continue;
1516 }
7e4d15d9 1517 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1518 /* don't unpin kernel fb objects */
1519 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1520 r = radeon_bo_reserve(robj, false);
38651674 1521 if (r == 0) {
4c788679
JG
1522 radeon_bo_unpin(robj);
1523 radeon_bo_unreserve(robj);
1524 }
771fe6b9
JG
1525 }
1526 }
1527 /* evict vram memory */
4c788679 1528 radeon_bo_evict_vram(rdev);
8a47cc9e 1529
771fe6b9 1530 /* wait for gpu to finish processing current batch */
5f8f635e 1531 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1532 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1533 if (r) {
1534 /* delay GPU reset to resume */
1535 force_completion = true;
1536 }
1537 }
1538 if (force_completion) {
1539 radeon_fence_driver_force_completion(rdev);
1540 }
771fe6b9 1541
f657c2a7
YZ
1542 radeon_save_bios_scratch_regs(rdev);
1543
62a8ea3f 1544 radeon_suspend(rdev);
d4877cf2 1545 radeon_hpd_fini(rdev);
771fe6b9 1546 /* evict remaining vram memory */
4c788679 1547 radeon_bo_evict_vram(rdev);
771fe6b9 1548
10b06122
JG
1549 radeon_agp_suspend(rdev);
1550
771fe6b9 1551 pci_save_state(dev->pdev);
7473e830 1552 if (suspend) {
771fe6b9
JG
1553 /* Shut down the device */
1554 pci_disable_device(dev->pdev);
1555 pci_set_power_state(dev->pdev, PCI_D3hot);
1556 }
10ebc0bc
DA
1557
1558 if (fbcon) {
1559 console_lock();
1560 radeon_fbdev_set_suspend(rdev, 1);
1561 console_unlock();
1562 }
771fe6b9
JG
1563 return 0;
1564}
1565
0c195119
AD
1566/**
1567 * radeon_resume_kms - initiate device resume
1568 *
1569 * @pdev: drm dev pointer
1570 *
1571 * Bring the hw back to operating state (all asics).
1572 * Returns 0 for success or an error on failure.
1573 * Called at driver resume.
1574 */
10ebc0bc 1575int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1576{
09bdf591 1577 struct drm_connector *connector;
771fe6b9 1578 struct radeon_device *rdev = dev->dev_private;
04eb2206 1579 int r;
771fe6b9 1580
5bcf719b 1581 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1582 return 0;
1583
10ebc0bc
DA
1584 if (fbcon) {
1585 console_lock();
1586 }
7473e830
DA
1587 if (resume) {
1588 pci_set_power_state(dev->pdev, PCI_D0);
1589 pci_restore_state(dev->pdev);
1590 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1591 if (fbcon)
1592 console_unlock();
7473e830
DA
1593 return -1;
1594 }
771fe6b9 1595 }
0ebf1717
DA
1596 /* resume AGP if in use */
1597 radeon_agp_resume(rdev);
62a8ea3f 1598 radeon_resume(rdev);
04eb2206
CK
1599
1600 r = radeon_ib_ring_tests(rdev);
1601 if (r)
1602 DRM_ERROR("ib ring test failed (%d).\n", r);
1603
bc6a6295 1604 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1605 /* do dpm late init */
1606 r = radeon_pm_late_init(rdev);
1607 if (r) {
1608 rdev->pm.dpm_enabled = false;
1609 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1610 }
bc6a6295
AD
1611 } else {
1612 /* resume old pm late */
1613 radeon_pm_resume(rdev);
6c7bccea
AD
1614 }
1615
f657c2a7 1616 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1617
3fa47d9e
AD
1618 /* init dig PHYs, disp eng pll */
1619 if (rdev->is_atom_bios) {
ac89af1e 1620 radeon_atom_encoder_init(rdev);
f3f1f03e 1621 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1622 /* turn on the BL */
1623 if (rdev->mode_info.bl_encoder) {
1624 u8 bl_level = radeon_get_backlight_level(rdev,
1625 rdev->mode_info.bl_encoder);
1626 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1627 bl_level);
1628 }
3fa47d9e 1629 }
d4877cf2
AD
1630 /* reset hpd state */
1631 radeon_hpd_init(rdev);
771fe6b9 1632 /* blat the mode back in */
ec9954fc
DA
1633 if (fbcon) {
1634 drm_helper_resume_force_mode(dev);
1635 /* turn on display hw */
1636 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1637 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1638 }
a93f344d 1639 }
86698c20
SF
1640
1641 drm_kms_helper_poll_enable(dev);
18ee37a4 1642
3640da2f
AD
1643 /* set the power state here in case we are a PX system or headless */
1644 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1645 radeon_pm_compute_clocks(rdev);
1646
18ee37a4
DV
1647 if (fbcon) {
1648 radeon_fbdev_set_suspend(rdev, 0);
1649 console_unlock();
1650 }
1651
771fe6b9
JG
1652 return 0;
1653}
1654
0c195119
AD
1655/**
1656 * radeon_gpu_reset - reset the asic
1657 *
1658 * @rdev: radeon device pointer
1659 *
1660 * Attempt the reset the GPU if it has hung (all asics).
1661 * Returns 0 for success or an error on failure.
1662 */
90aca4d2
JG
1663int radeon_gpu_reset(struct radeon_device *rdev)
1664{
55d7c221
CK
1665 unsigned ring_sizes[RADEON_NUM_RINGS];
1666 uint32_t *ring_data[RADEON_NUM_RINGS];
1667
1668 bool saved = false;
1669
1670 int i, r;
8fd1b84c 1671 int resched;
90aca4d2 1672
dee53e7f 1673 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1674
1675 if (!rdev->needs_reset) {
1676 up_write(&rdev->exclusive_lock);
1677 return 0;
1678 }
1679
1680 rdev->needs_reset = false;
1681
90aca4d2 1682 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1683 /* block TTM */
1684 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2 1685 radeon_suspend(rdev);
73ef0e0d 1686 radeon_hpd_fini(rdev);
90aca4d2 1687
55d7c221
CK
1688 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1689 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1690 &ring_data[i]);
1691 if (ring_sizes[i]) {
1692 saved = true;
1693 dev_info(rdev->dev, "Saved %d dwords of commands "
1694 "on ring %d.\n", ring_sizes[i], i);
1695 }
1696 }
1697
1698retry:
90aca4d2
JG
1699 r = radeon_asic_reset(rdev);
1700 if (!r) {
55d7c221 1701 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1702 radeon_resume(rdev);
55d7c221 1703 }
04eb2206 1704
55d7c221 1705 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1706
55d7c221
CK
1707 if (!r) {
1708 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1709 radeon_ring_restore(rdev, &rdev->ring[i],
1710 ring_sizes[i], ring_data[i]);
f54b350d
CK
1711 ring_sizes[i] = 0;
1712 ring_data[i] = NULL;
55d7c221
CK
1713 }
1714
1715 r = radeon_ib_ring_tests(rdev);
1716 if (r) {
1717 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1718 if (saved) {
f54b350d 1719 saved = false;
55d7c221
CK
1720 radeon_suspend(rdev);
1721 goto retry;
1722 }
1723 }
1724 } else {
76903b96 1725 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1726 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1727 kfree(ring_data[i]);
1728 }
90aca4d2 1729 }
7a1619b9 1730
c940b447
AD
1731 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1732 /* do dpm late init */
1733 r = radeon_pm_late_init(rdev);
1734 if (r) {
1735 rdev->pm.dpm_enabled = false;
1736 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1737 }
1738 } else {
1739 /* resume old pm late */
1740 radeon_pm_resume(rdev);
1741 }
1742
73ef0e0d
AD
1743 /* init dig PHYs, disp eng pll */
1744 if (rdev->is_atom_bios) {
1745 radeon_atom_encoder_init(rdev);
1746 radeon_atom_disp_eng_pll_init(rdev);
1747 /* turn on the BL */
1748 if (rdev->mode_info.bl_encoder) {
1749 u8 bl_level = radeon_get_backlight_level(rdev,
1750 rdev->mode_info.bl_encoder);
1751 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1752 bl_level);
1753 }
1754 }
1755 /* reset hpd state */
1756 radeon_hpd_init(rdev);
1757
d3493574
JG
1758 drm_helper_resume_force_mode(rdev->ddev);
1759
c940b447
AD
1760 /* set the power state here in case we are a PX system or headless */
1761 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1762 radeon_pm_compute_clocks(rdev);
1763
55d7c221 1764 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1765 if (r) {
1766 /* bad news, how to tell it to userspace ? */
1767 dev_info(rdev->dev, "GPU reset failed\n");
1768 }
1769
dee53e7f 1770 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1771 return r;
1772}
1773
771fe6b9
JG
1774
1775/*
1776 * Debugfs
1777 */
771fe6b9
JG
1778int radeon_debugfs_add_files(struct radeon_device *rdev,
1779 struct drm_info_list *files,
1780 unsigned nfiles)
1781{
1782 unsigned i;
1783
4d8bf9ae
CK
1784 for (i = 0; i < rdev->debugfs_count; i++) {
1785 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1786 /* Already registered */
1787 return 0;
1788 }
1789 }
c245cb9e 1790
4d8bf9ae 1791 i = rdev->debugfs_count + 1;
c245cb9e
MW
1792 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1793 DRM_ERROR("Reached maximum number of debugfs components.\n");
1794 DRM_ERROR("Report so we increase "
1795 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1796 return -EINVAL;
1797 }
4d8bf9ae
CK
1798 rdev->debugfs[rdev->debugfs_count].files = files;
1799 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1800 rdev->debugfs_count = i;
771fe6b9
JG
1801#if defined(CONFIG_DEBUG_FS)
1802 drm_debugfs_create_files(files, nfiles,
1803 rdev->ddev->control->debugfs_root,
1804 rdev->ddev->control);
1805 drm_debugfs_create_files(files, nfiles,
1806 rdev->ddev->primary->debugfs_root,
1807 rdev->ddev->primary);
1808#endif
1809 return 0;
1810}
1811
4d8bf9ae
CK
1812static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1813{
1814#if defined(CONFIG_DEBUG_FS)
1815 unsigned i;
1816
1817 for (i = 0; i < rdev->debugfs_count; i++) {
1818 drm_debugfs_remove_files(rdev->debugfs[i].files,
1819 rdev->debugfs[i].num_files,
1820 rdev->ddev->control);
1821 drm_debugfs_remove_files(rdev->debugfs[i].files,
1822 rdev->debugfs[i].num_files,
1823 rdev->ddev->primary);
1824 }
1825#endif
1826}
1827
771fe6b9
JG
1828#if defined(CONFIG_DEBUG_FS)
1829int radeon_debugfs_init(struct drm_minor *minor)
1830{
1831 return 0;
1832}
1833
1834void radeon_debugfs_cleanup(struct drm_minor *minor)
1835{
771fe6b9
JG
1836}
1837#endif