drm/radeon: fix card_posted check for newer asics
[linux-block.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
1b5331d9
JG
98 "LAST",
99};
100
2e1b65f9
AD
101/**
102 * radeon_program_register_sequence - program an array of registers.
103 *
104 * @rdev: radeon_device pointer
105 * @registers: pointer to the register array
106 * @array_size: size of the register array
107 *
108 * Programs an array or registers with and and or masks.
109 * This is a helper for setting golden registers.
110 */
111void radeon_program_register_sequence(struct radeon_device *rdev,
112 const u32 *registers,
113 const u32 array_size)
114{
115 u32 tmp, reg, and_mask, or_mask;
116 int i;
117
118 if (array_size % 3)
119 return;
120
121 for (i = 0; i < array_size; i +=3) {
122 reg = registers[i + 0];
123 and_mask = registers[i + 1];
124 or_mask = registers[i + 2];
125
126 if (and_mask == 0xffffffff) {
127 tmp = or_mask;
128 } else {
129 tmp = RREG32(reg);
130 tmp &= ~and_mask;
131 tmp |= or_mask;
132 }
133 WREG32(reg, tmp);
134 }
135}
136
0c195119
AD
137/**
138 * radeon_surface_init - Clear GPU surface registers.
139 *
140 * @rdev: radeon_device pointer
141 *
142 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 143 */
3ce0a23d 144void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
145{
146 /* FIXME: check this out */
147 if (rdev->family < CHIP_R600) {
148 int i;
149
550e2d92
DA
150 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
151 if (rdev->surface_regs[i].bo)
152 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
153 else
154 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 155 }
e024e110
DA
156 /* enable surfaces */
157 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
158 }
159}
160
771fe6b9
JG
161/*
162 * GPU scratch registers helpers function.
163 */
0c195119
AD
164/**
165 * radeon_scratch_init - Init scratch register driver information.
166 *
167 * @rdev: radeon_device pointer
168 *
169 * Init CP scratch register driver information (r1xx-r5xx)
170 */
3ce0a23d 171void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
172{
173 int i;
174
175 /* FIXME: check this out */
176 if (rdev->family < CHIP_R300) {
177 rdev->scratch.num_reg = 5;
178 } else {
179 rdev->scratch.num_reg = 7;
180 }
724c80e1 181 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
182 for (i = 0; i < rdev->scratch.num_reg; i++) {
183 rdev->scratch.free[i] = true;
724c80e1 184 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
185 }
186}
187
0c195119
AD
188/**
189 * radeon_scratch_get - Allocate a scratch register
190 *
191 * @rdev: radeon_device pointer
192 * @reg: scratch register mmio offset
193 *
194 * Allocate a CP scratch register for use by the driver (all asics).
195 * Returns 0 on success or -EINVAL on failure.
196 */
771fe6b9
JG
197int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
198{
199 int i;
200
201 for (i = 0; i < rdev->scratch.num_reg; i++) {
202 if (rdev->scratch.free[i]) {
203 rdev->scratch.free[i] = false;
204 *reg = rdev->scratch.reg[i];
205 return 0;
206 }
207 }
208 return -EINVAL;
209}
210
0c195119
AD
211/**
212 * radeon_scratch_free - Free a scratch register
213 *
214 * @rdev: radeon_device pointer
215 * @reg: scratch register mmio offset
216 *
217 * Free a CP scratch register allocated for use by the driver (all asics)
218 */
771fe6b9
JG
219void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
220{
221 int i;
222
223 for (i = 0; i < rdev->scratch.num_reg; i++) {
224 if (rdev->scratch.reg[i] == reg) {
225 rdev->scratch.free[i] = true;
226 return;
227 }
228 }
229}
230
0c195119
AD
231/*
232 * radeon_wb_*()
233 * Writeback is the the method by which the the GPU updates special pages
234 * in memory with the status of certain GPU events (fences, ring pointers,
235 * etc.).
236 */
237
238/**
239 * radeon_wb_disable - Disable Writeback
240 *
241 * @rdev: radeon_device pointer
242 *
243 * Disables Writeback (all asics). Used for suspend.
244 */
724c80e1
AD
245void radeon_wb_disable(struct radeon_device *rdev)
246{
247 int r;
248
249 if (rdev->wb.wb_obj) {
250 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
251 if (unlikely(r != 0))
252 return;
253 radeon_bo_kunmap(rdev->wb.wb_obj);
254 radeon_bo_unpin(rdev->wb.wb_obj);
255 radeon_bo_unreserve(rdev->wb.wb_obj);
256 }
257 rdev->wb.enabled = false;
258}
259
0c195119
AD
260/**
261 * radeon_wb_fini - Disable Writeback and free memory
262 *
263 * @rdev: radeon_device pointer
264 *
265 * Disables Writeback and frees the Writeback memory (all asics).
266 * Used at driver shutdown.
267 */
724c80e1
AD
268void radeon_wb_fini(struct radeon_device *rdev)
269{
270 radeon_wb_disable(rdev);
271 if (rdev->wb.wb_obj) {
272 radeon_bo_unref(&rdev->wb.wb_obj);
273 rdev->wb.wb = NULL;
274 rdev->wb.wb_obj = NULL;
275 }
276}
277
0c195119
AD
278/**
279 * radeon_wb_init- Init Writeback driver info and allocate memory
280 *
281 * @rdev: radeon_device pointer
282 *
283 * Disables Writeback and frees the Writeback memory (all asics).
284 * Used at driver startup.
285 * Returns 0 on success or an -error on failure.
286 */
724c80e1
AD
287int radeon_wb_init(struct radeon_device *rdev)
288{
289 int r;
290
291 if (rdev->wb.wb_obj == NULL) {
441921d5 292 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 293 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
294 if (r) {
295 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
296 return r;
297 }
298 }
299 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
300 if (unlikely(r != 0)) {
301 radeon_wb_fini(rdev);
302 return r;
303 }
304 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
305 &rdev->wb.gpu_addr);
306 if (r) {
307 radeon_bo_unreserve(rdev->wb.wb_obj);
308 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
309 radeon_wb_fini(rdev);
310 return r;
311 }
312 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
313 radeon_bo_unreserve(rdev->wb.wb_obj);
314 if (r) {
315 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
316 radeon_wb_fini(rdev);
317 return r;
318 }
319
e6ba7599
AD
320 /* clear wb memory */
321 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
322 /* disable event_write fences */
323 rdev->wb.use_event = false;
724c80e1 324 /* disabled via module param */
3b7a2b24 325 if (radeon_no_wb == 1) {
724c80e1 326 rdev->wb.enabled = false;
3b7a2b24 327 } else {
724c80e1 328 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
329 /* often unreliable on AGP */
330 rdev->wb.enabled = false;
331 } else if (rdev->family < CHIP_R300) {
332 /* often unreliable on pre-r300 */
724c80e1 333 rdev->wb.enabled = false;
d0f8a854 334 } else {
724c80e1 335 rdev->wb.enabled = true;
d0f8a854 336 /* event_write fences are only available on r600+ */
3b7a2b24 337 if (rdev->family >= CHIP_R600) {
d0f8a854 338 rdev->wb.use_event = true;
3b7a2b24 339 }
d0f8a854 340 }
724c80e1 341 }
c994ead6
AD
342 /* always use writeback/events on NI, APUs */
343 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
344 rdev->wb.enabled = true;
345 rdev->wb.use_event = true;
346 }
724c80e1
AD
347
348 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
349
350 return 0;
351}
352
d594e46a
JG
353/**
354 * radeon_vram_location - try to find VRAM location
355 * @rdev: radeon device structure holding all necessary informations
356 * @mc: memory controller structure holding memory informations
357 * @base: base address at which to put VRAM
358 *
359 * Function will place try to place VRAM at base address provided
360 * as parameter (which is so far either PCI aperture address or
361 * for IGP TOM base address).
362 *
363 * If there is not enough space to fit the unvisible VRAM in the 32bits
364 * address space then we limit the VRAM size to the aperture.
365 *
366 * If we are using AGP and if the AGP aperture doesn't allow us to have
367 * room for all the VRAM than we restrict the VRAM to the PCI aperture
368 * size and print a warning.
369 *
370 * This function will never fails, worst case are limiting VRAM.
371 *
372 * Note: GTT start, end, size should be initialized before calling this
373 * function on AGP platform.
374 *
25985edc 375 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
376 * this shouldn't be a problem as we are using the PCI aperture as a reference.
377 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
378 * not IGP.
379 *
380 * Note: we use mc_vram_size as on some board we need to program the mc to
381 * cover the whole aperture even if VRAM size is inferior to aperture size
382 * Novell bug 204882 + along with lots of ubuntu ones
383 *
384 * Note: when limiting vram it's safe to overwritte real_vram_size because
385 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
386 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
387 * ones)
388 *
389 * Note: IGP TOM addr should be the same as the aperture addr, we don't
390 * explicitly check for that thought.
391 *
392 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 393 */
d594e46a 394void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 395{
1bcb04f7
CK
396 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
397
d594e46a 398 mc->vram_start = base;
9ed8b1f9 399 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
400 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
401 mc->real_vram_size = mc->aper_size;
402 mc->mc_vram_size = mc->aper_size;
403 }
404 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 405 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
406 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
407 mc->real_vram_size = mc->aper_size;
408 mc->mc_vram_size = mc->aper_size;
409 }
410 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
411 if (limit && limit < mc->real_vram_size)
412 mc->real_vram_size = limit;
dd7cc55a 413 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
414 mc->mc_vram_size >> 20, mc->vram_start,
415 mc->vram_end, mc->real_vram_size >> 20);
416}
771fe6b9 417
d594e46a
JG
418/**
419 * radeon_gtt_location - try to find GTT location
420 * @rdev: radeon device structure holding all necessary informations
421 * @mc: memory controller structure holding memory informations
422 *
423 * Function will place try to place GTT before or after VRAM.
424 *
425 * If GTT size is bigger than space left then we ajust GTT size.
426 * Thus function will never fails.
427 *
428 * FIXME: when reducing GTT size align new size on power of 2.
429 */
430void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
431{
432 u64 size_af, size_bf;
433
9ed8b1f9 434 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 435 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
436 if (size_bf > size_af) {
437 if (mc->gtt_size > size_bf) {
438 dev_warn(rdev->dev, "limiting GTT\n");
439 mc->gtt_size = size_bf;
771fe6b9 440 }
8d369bb1 441 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 442 } else {
d594e46a
JG
443 if (mc->gtt_size > size_af) {
444 dev_warn(rdev->dev, "limiting GTT\n");
445 mc->gtt_size = size_af;
446 }
8d369bb1 447 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 448 }
d594e46a 449 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 450 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 451 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
452}
453
771fe6b9
JG
454/*
455 * GPU helpers function.
456 */
0c195119
AD
457/**
458 * radeon_card_posted - check if the hw has already been initialized
459 *
460 * @rdev: radeon_device pointer
461 *
462 * Check if the asic has been initialized (all asics).
463 * Used at driver startup.
464 * Returns true if initialized or false if not.
465 */
9f022ddf 466bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
467{
468 uint32_t reg;
469
83e68189
MF
470 if (efi_enabled(EFI_BOOT) &&
471 rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
bcc65fd8
MG
472 return false;
473
771fe6b9 474 /* first check CRTCs */
09fb8bd1 475 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
476 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
477 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
478 if (rdev->num_crtc >= 4) {
479 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
480 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
481 }
482 if (rdev->num_crtc >= 6) {
483 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
484 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
485 }
bcc1c2a1
AD
486 if (reg & EVERGREEN_CRTC_MASTER_EN)
487 return true;
488 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
489 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
490 RREG32(AVIVO_D2CRTC_CONTROL);
491 if (reg & AVIVO_CRTC_EN) {
492 return true;
493 }
494 } else {
495 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
496 RREG32(RADEON_CRTC2_GEN_CNTL);
497 if (reg & RADEON_CRTC_EN) {
498 return true;
499 }
500 }
501
502 /* then check MEM_SIZE, in case the crtcs are off */
503 if (rdev->family >= CHIP_R600)
504 reg = RREG32(R600_CONFIG_MEMSIZE);
505 else
506 reg = RREG32(RADEON_CONFIG_MEMSIZE);
507
508 if (reg)
509 return true;
510
511 return false;
512
513}
514
0c195119
AD
515/**
516 * radeon_update_bandwidth_info - update display bandwidth params
517 *
518 * @rdev: radeon_device pointer
519 *
520 * Used when sclk/mclk are switched or display modes are set.
521 * params are used to calculate display watermarks (all asics)
522 */
f47299c5
AD
523void radeon_update_bandwidth_info(struct radeon_device *rdev)
524{
525 fixed20_12 a;
8807286e
AD
526 u32 sclk = rdev->pm.current_sclk;
527 u32 mclk = rdev->pm.current_mclk;
f47299c5 528
8807286e
AD
529 /* sclk/mclk in Mhz */
530 a.full = dfixed_const(100);
531 rdev->pm.sclk.full = dfixed_const(sclk);
532 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
533 rdev->pm.mclk.full = dfixed_const(mclk);
534 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 535
8807286e 536 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 537 a.full = dfixed_const(16);
f47299c5 538 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 539 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
540 }
541}
542
0c195119
AD
543/**
544 * radeon_boot_test_post_card - check and possibly initialize the hw
545 *
546 * @rdev: radeon_device pointer
547 *
548 * Check if the asic is initialized and if not, attempt to initialize
549 * it (all asics).
550 * Returns true if initialized or false if not.
551 */
72542d77
DA
552bool radeon_boot_test_post_card(struct radeon_device *rdev)
553{
554 if (radeon_card_posted(rdev))
555 return true;
556
557 if (rdev->bios) {
558 DRM_INFO("GPU not posted. posting now...\n");
559 if (rdev->is_atom_bios)
560 atom_asic_init(rdev->mode_info.atom_context);
561 else
562 radeon_combios_asic_init(rdev->ddev);
563 return true;
564 } else {
565 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
566 return false;
567 }
568}
569
0c195119
AD
570/**
571 * radeon_dummy_page_init - init dummy page used by the driver
572 *
573 * @rdev: radeon_device pointer
574 *
575 * Allocate the dummy page used by the driver (all asics).
576 * This dummy page is used by the driver as a filler for gart entries
577 * when pages are taken out of the GART
578 * Returns 0 on sucess, -ENOMEM on failure.
579 */
3ce0a23d
JG
580int radeon_dummy_page_init(struct radeon_device *rdev)
581{
82568565
DA
582 if (rdev->dummy_page.page)
583 return 0;
3ce0a23d
JG
584 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
585 if (rdev->dummy_page.page == NULL)
586 return -ENOMEM;
587 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
588 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
589 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
590 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
591 __free_page(rdev->dummy_page.page);
592 rdev->dummy_page.page = NULL;
593 return -ENOMEM;
594 }
595 return 0;
596}
597
0c195119
AD
598/**
599 * radeon_dummy_page_fini - free dummy page used by the driver
600 *
601 * @rdev: radeon_device pointer
602 *
603 * Frees the dummy page used by the driver (all asics).
604 */
3ce0a23d
JG
605void radeon_dummy_page_fini(struct radeon_device *rdev)
606{
607 if (rdev->dummy_page.page == NULL)
608 return;
609 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
610 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
611 __free_page(rdev->dummy_page.page);
612 rdev->dummy_page.page = NULL;
613}
614
771fe6b9 615
771fe6b9 616/* ATOM accessor methods */
0c195119
AD
617/*
618 * ATOM is an interpreted byte code stored in tables in the vbios. The
619 * driver registers callbacks to access registers and the interpreter
620 * in the driver parses the tables and executes then to program specific
621 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
622 * atombios.h, and atom.c
623 */
624
625/**
626 * cail_pll_read - read PLL register
627 *
628 * @info: atom card_info pointer
629 * @reg: PLL register offset
630 *
631 * Provides a PLL register accessor for the atom interpreter (r4xx+).
632 * Returns the value of the PLL register.
633 */
771fe6b9
JG
634static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
635{
636 struct radeon_device *rdev = info->dev->dev_private;
637 uint32_t r;
638
639 r = rdev->pll_rreg(rdev, reg);
640 return r;
641}
642
0c195119
AD
643/**
644 * cail_pll_write - write PLL register
645 *
646 * @info: atom card_info pointer
647 * @reg: PLL register offset
648 * @val: value to write to the pll register
649 *
650 * Provides a PLL register accessor for the atom interpreter (r4xx+).
651 */
771fe6b9
JG
652static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
653{
654 struct radeon_device *rdev = info->dev->dev_private;
655
656 rdev->pll_wreg(rdev, reg, val);
657}
658
0c195119
AD
659/**
660 * cail_mc_read - read MC (Memory Controller) register
661 *
662 * @info: atom card_info pointer
663 * @reg: MC register offset
664 *
665 * Provides an MC register accessor for the atom interpreter (r4xx+).
666 * Returns the value of the MC register.
667 */
771fe6b9
JG
668static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
669{
670 struct radeon_device *rdev = info->dev->dev_private;
671 uint32_t r;
672
673 r = rdev->mc_rreg(rdev, reg);
674 return r;
675}
676
0c195119
AD
677/**
678 * cail_mc_write - write MC (Memory Controller) register
679 *
680 * @info: atom card_info pointer
681 * @reg: MC register offset
682 * @val: value to write to the pll register
683 *
684 * Provides a MC register accessor for the atom interpreter (r4xx+).
685 */
771fe6b9
JG
686static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
687{
688 struct radeon_device *rdev = info->dev->dev_private;
689
690 rdev->mc_wreg(rdev, reg, val);
691}
692
0c195119
AD
693/**
694 * cail_reg_write - write MMIO register
695 *
696 * @info: atom card_info pointer
697 * @reg: MMIO register offset
698 * @val: value to write to the pll register
699 *
700 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
701 */
771fe6b9
JG
702static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
703{
704 struct radeon_device *rdev = info->dev->dev_private;
705
706 WREG32(reg*4, val);
707}
708
0c195119
AD
709/**
710 * cail_reg_read - read MMIO register
711 *
712 * @info: atom card_info pointer
713 * @reg: MMIO register offset
714 *
715 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
716 * Returns the value of the MMIO register.
717 */
771fe6b9
JG
718static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
719{
720 struct radeon_device *rdev = info->dev->dev_private;
721 uint32_t r;
722
723 r = RREG32(reg*4);
724 return r;
725}
726
0c195119
AD
727/**
728 * cail_ioreg_write - write IO register
729 *
730 * @info: atom card_info pointer
731 * @reg: IO register offset
732 * @val: value to write to the pll register
733 *
734 * Provides a IO register accessor for the atom interpreter (r4xx+).
735 */
351a52a2
AD
736static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
737{
738 struct radeon_device *rdev = info->dev->dev_private;
739
740 WREG32_IO(reg*4, val);
741}
742
0c195119
AD
743/**
744 * cail_ioreg_read - read IO register
745 *
746 * @info: atom card_info pointer
747 * @reg: IO register offset
748 *
749 * Provides an IO register accessor for the atom interpreter (r4xx+).
750 * Returns the value of the IO register.
751 */
351a52a2
AD
752static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
753{
754 struct radeon_device *rdev = info->dev->dev_private;
755 uint32_t r;
756
757 r = RREG32_IO(reg*4);
758 return r;
759}
760
0c195119
AD
761/**
762 * radeon_atombios_init - init the driver info and callbacks for atombios
763 *
764 * @rdev: radeon_device pointer
765 *
766 * Initializes the driver info and register access callbacks for the
767 * ATOM interpreter (r4xx+).
768 * Returns 0 on sucess, -ENOMEM on failure.
769 * Called at driver startup.
770 */
771fe6b9
JG
771int radeon_atombios_init(struct radeon_device *rdev)
772{
61c4b24b
MF
773 struct card_info *atom_card_info =
774 kzalloc(sizeof(struct card_info), GFP_KERNEL);
775
776 if (!atom_card_info)
777 return -ENOMEM;
778
779 rdev->mode_info.atom_card_info = atom_card_info;
780 atom_card_info->dev = rdev->ddev;
781 atom_card_info->reg_read = cail_reg_read;
782 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
783 /* needed for iio ops */
784 if (rdev->rio_mem) {
785 atom_card_info->ioreg_read = cail_ioreg_read;
786 atom_card_info->ioreg_write = cail_ioreg_write;
787 } else {
788 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
789 atom_card_info->ioreg_read = cail_reg_read;
790 atom_card_info->ioreg_write = cail_reg_write;
791 }
61c4b24b
MF
792 atom_card_info->mc_read = cail_mc_read;
793 atom_card_info->mc_write = cail_mc_write;
794 atom_card_info->pll_read = cail_pll_read;
795 atom_card_info->pll_write = cail_pll_write;
796
797 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
798 if (!rdev->mode_info.atom_context) {
799 radeon_atombios_fini(rdev);
800 return -ENOMEM;
801 }
802
c31ad97f 803 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 804 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 805 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
806 return 0;
807}
808
0c195119
AD
809/**
810 * radeon_atombios_fini - free the driver info and callbacks for atombios
811 *
812 * @rdev: radeon_device pointer
813 *
814 * Frees the driver info and register access callbacks for the ATOM
815 * interpreter (r4xx+).
816 * Called at driver shutdown.
817 */
771fe6b9
JG
818void radeon_atombios_fini(struct radeon_device *rdev)
819{
4a04a844
JG
820 if (rdev->mode_info.atom_context) {
821 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 822 }
0e34d094
TG
823 kfree(rdev->mode_info.atom_context);
824 rdev->mode_info.atom_context = NULL;
61c4b24b 825 kfree(rdev->mode_info.atom_card_info);
0e34d094 826 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
827}
828
0c195119
AD
829/* COMBIOS */
830/*
831 * COMBIOS is the bios format prior to ATOM. It provides
832 * command tables similar to ATOM, but doesn't have a unified
833 * parser. See radeon_combios.c
834 */
835
836/**
837 * radeon_combios_init - init the driver info for combios
838 *
839 * @rdev: radeon_device pointer
840 *
841 * Initializes the driver info for combios (r1xx-r3xx).
842 * Returns 0 on sucess.
843 * Called at driver startup.
844 */
771fe6b9
JG
845int radeon_combios_init(struct radeon_device *rdev)
846{
847 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
848 return 0;
849}
850
0c195119
AD
851/**
852 * radeon_combios_fini - free the driver info for combios
853 *
854 * @rdev: radeon_device pointer
855 *
856 * Frees the driver info for combios (r1xx-r3xx).
857 * Called at driver shutdown.
858 */
771fe6b9
JG
859void radeon_combios_fini(struct radeon_device *rdev)
860{
861}
862
0c195119
AD
863/* if we get transitioned to only one device, take VGA back */
864/**
865 * radeon_vga_set_decode - enable/disable vga decode
866 *
867 * @cookie: radeon_device pointer
868 * @state: enable/disable vga decode
869 *
870 * Enable/disable vga decode (all asics).
871 * Returns VGA resource flags.
872 */
28d52043
DA
873static unsigned int radeon_vga_set_decode(void *cookie, bool state)
874{
875 struct radeon_device *rdev = cookie;
28d52043
DA
876 radeon_vga_set_state(rdev, state);
877 if (state)
878 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
879 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
880 else
881 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
882}
c1176d6f 883
1bcb04f7
CK
884/**
885 * radeon_check_pot_argument - check that argument is a power of two
886 *
887 * @arg: value to check
888 *
889 * Validates that a certain argument is a power of two (all asics).
890 * Returns true if argument is valid.
891 */
892static bool radeon_check_pot_argument(int arg)
893{
894 return (arg & (arg - 1)) == 0;
895}
896
0c195119
AD
897/**
898 * radeon_check_arguments - validate module params
899 *
900 * @rdev: radeon_device pointer
901 *
902 * Validates certain module parameters and updates
903 * the associated values used by the driver (all asics).
904 */
1109ca09 905static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
906{
907 /* vramlimit must be a power of two */
1bcb04f7 908 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
909 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
910 radeon_vram_limit);
911 radeon_vram_limit = 0;
36421338 912 }
1bcb04f7 913
36421338 914 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 915 if (radeon_gart_size < 32) {
36421338
JG
916 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
917 radeon_gart_size);
918 radeon_gart_size = 512;
1bcb04f7
CK
919
920 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
921 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
922 radeon_gart_size);
923 radeon_gart_size = 512;
36421338 924 }
1bcb04f7
CK
925 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
926
36421338
JG
927 /* AGP mode can only be -1, 1, 2, 4, 8 */
928 switch (radeon_agpmode) {
929 case -1:
930 case 0:
931 case 1:
932 case 2:
933 case 4:
934 case 8:
935 break;
936 default:
937 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
938 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
939 radeon_agpmode = 0;
940 break;
941 }
942}
943
d1f9809e
ML
944/**
945 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
946 * needed for waking up.
947 *
948 * @pdev: pci dev pointer
949 */
950static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
951{
952
953 /* 6600m in a macbook pro */
954 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
955 pdev->subsystem_device == 0x00e2) {
956 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
957 return true;
958 }
959
960 return false;
961}
962
0c195119
AD
963/**
964 * radeon_switcheroo_set_state - set switcheroo state
965 *
966 * @pdev: pci dev pointer
967 * @state: vga switcheroo state
968 *
969 * Callback for the switcheroo driver. Suspends or resumes the
970 * the asics before or after it is powered up using ACPI methods.
971 */
6a9ee8af
DA
972static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
973{
974 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
975 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
976 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
977 unsigned d3_delay = dev->pdev->d3_delay;
978
6a9ee8af
DA
979 printk(KERN_INFO "radeon: switched on\n");
980 /* don't suspend or resume card normally */
5bcf719b 981 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
982
983 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
984 dev->pdev->d3_delay = 20;
985
6a9ee8af 986 radeon_resume_kms(dev);
d1f9809e
ML
987
988 dev->pdev->d3_delay = d3_delay;
989
5bcf719b 990 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 991 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
992 } else {
993 printk(KERN_INFO "radeon: switched off\n");
fbf81762 994 drm_kms_helper_poll_disable(dev);
5bcf719b 995 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 996 radeon_suspend_kms(dev, pmm);
5bcf719b 997 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
998 }
999}
1000
0c195119
AD
1001/**
1002 * radeon_switcheroo_can_switch - see if switcheroo state can change
1003 *
1004 * @pdev: pci dev pointer
1005 *
1006 * Callback for the switcheroo driver. Check of the switcheroo
1007 * state can be changed.
1008 * Returns true if the state can be changed, false if not.
1009 */
6a9ee8af
DA
1010static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1011{
1012 struct drm_device *dev = pci_get_drvdata(pdev);
1013 bool can_switch;
1014
1015 spin_lock(&dev->count_lock);
1016 can_switch = (dev->open_count == 0);
1017 spin_unlock(&dev->count_lock);
1018 return can_switch;
1019}
1020
26ec685f
TI
1021static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1022 .set_gpu_state = radeon_switcheroo_set_state,
1023 .reprobe = NULL,
1024 .can_switch = radeon_switcheroo_can_switch,
1025};
6a9ee8af 1026
0c195119
AD
1027/**
1028 * radeon_device_init - initialize the driver
1029 *
1030 * @rdev: radeon_device pointer
1031 * @pdev: drm dev pointer
1032 * @pdev: pci dev pointer
1033 * @flags: driver flags
1034 *
1035 * Initializes the driver info and hw (all asics).
1036 * Returns 0 for success or an error on failure.
1037 * Called at driver startup.
1038 */
771fe6b9
JG
1039int radeon_device_init(struct radeon_device *rdev,
1040 struct drm_device *ddev,
1041 struct pci_dev *pdev,
1042 uint32_t flags)
1043{
351a52a2 1044 int r, i;
ad49f501 1045 int dma_bits;
771fe6b9 1046
771fe6b9 1047 rdev->shutdown = false;
9f022ddf 1048 rdev->dev = &pdev->dev;
771fe6b9
JG
1049 rdev->ddev = ddev;
1050 rdev->pdev = pdev;
1051 rdev->flags = flags;
1052 rdev->family = flags & RADEON_FAMILY_MASK;
1053 rdev->is_atom_bios = false;
1054 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1055 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 1056 rdev->accel_working = false;
8b25ed34
AD
1057 /* set up ring ids */
1058 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1059 rdev->ring[i].idx = i;
1060 }
1b5331d9 1061
d522d9cc
TR
1062 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1063 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1064 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1065
771fe6b9
JG
1066 /* mutex initialization are all done here so we
1067 * can recall function without having locking issues */
d6999bc7 1068 mutex_init(&rdev->ring_lock);
40bacf16 1069 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1070 atomic_set(&rdev->ih.lock, 0);
4c788679 1071 mutex_init(&rdev->gem.mutex);
c913e23a 1072 mutex_init(&rdev->pm.mutex);
6759a0a7 1073 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1074 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1075 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1076 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1077 r = radeon_gem_init(rdev);
1078 if (r)
1079 return r;
721604a1 1080 /* initialize vm here */
36ff39c4 1081 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1082 /* Adjust VM size here.
1083 * Currently set to 4GB ((1 << 20) 4k pages).
1084 * Max GPUVM size for cayman and SI is 40 bits.
1085 */
721604a1
JG
1086 rdev->vm_manager.max_pfn = 1 << 20;
1087 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1088
4aac0473
JG
1089 /* Set asic functions */
1090 r = radeon_asic_init(rdev);
36421338 1091 if (r)
4aac0473 1092 return r;
36421338 1093 radeon_check_arguments(rdev);
4aac0473 1094
f95df9ca
AD
1095 /* all of the newer IGP chips have an internal gart
1096 * However some rs4xx report as AGP, so remove that here.
1097 */
1098 if ((rdev->family >= CHIP_RS400) &&
1099 (rdev->flags & RADEON_IS_IGP)) {
1100 rdev->flags &= ~RADEON_IS_AGP;
1101 }
1102
30256a3f 1103 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1104 radeon_agp_disable(rdev);
771fe6b9
JG
1105 }
1106
9ed8b1f9
AD
1107 /* Set the internal MC address mask
1108 * This is the max address of the GPU's
1109 * internal address space.
1110 */
1111 if (rdev->family >= CHIP_CAYMAN)
1112 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1113 else if (rdev->family >= CHIP_CEDAR)
1114 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1115 else
1116 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1117
ad49f501
DA
1118 /* set DMA mask + need_dma32 flags.
1119 * PCIE - can handle 40-bits.
005a83f1 1120 * IGP - can handle 40-bits
ad49f501 1121 * AGP - generally dma32 is safest
005a83f1 1122 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1123 */
1124 rdev->need_dma32 = false;
1125 if (rdev->flags & RADEON_IS_AGP)
1126 rdev->need_dma32 = true;
005a83f1 1127 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1128 (rdev->family <= CHIP_RS740))
ad49f501
DA
1129 rdev->need_dma32 = true;
1130
1131 dma_bits = rdev->need_dma32 ? 32 : 40;
1132 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1133 if (r) {
62fff811 1134 rdev->need_dma32 = true;
c52494f6 1135 dma_bits = 32;
771fe6b9
JG
1136 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1137 }
c52494f6
KRW
1138 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1139 if (r) {
1140 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1141 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1142 }
771fe6b9
JG
1143
1144 /* Registers mapping */
1145 /* TODO: block userspace mapping of io register */
2c385151 1146 spin_lock_init(&rdev->mmio_idx_lock);
01d73a69
JC
1147 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1148 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
1149 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1150 if (rdev->rmmio == NULL) {
1151 return -ENOMEM;
1152 }
1153 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1154 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1155
351a52a2
AD
1156 /* io port mapping */
1157 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1158 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1159 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1160 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1161 break;
1162 }
1163 }
1164 if (rdev->rio_mem == NULL)
1165 DRM_ERROR("Unable to find PCI I/O BAR\n");
1166
28d52043 1167 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1168 /* this will fail for cards that aren't VGA class devices, just
1169 * ignore it */
1170 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1171 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1172
3ce0a23d 1173 r = radeon_init(rdev);
b574f251 1174 if (r)
3ce0a23d 1175 return r;
3ce0a23d 1176
04eb2206
CK
1177 r = radeon_ib_ring_tests(rdev);
1178 if (r)
1179 DRM_ERROR("ib ring test failed (%d).\n", r);
1180
409851f4
JG
1181 r = radeon_gem_debugfs_init(rdev);
1182 if (r) {
1183 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1184 }
1185
b574f251
JG
1186 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1187 /* Acceleration not working on AGP card try again
1188 * with fallback to PCI or PCIE GART
1189 */
a2d07b74 1190 radeon_asic_reset(rdev);
b574f251
JG
1191 radeon_fini(rdev);
1192 radeon_agp_disable(rdev);
1193 r = radeon_init(rdev);
4aac0473
JG
1194 if (r)
1195 return r;
771fe6b9 1196 }
60a7e396 1197 if ((radeon_testing & 1)) {
ecc0b326
MD
1198 radeon_test_moves(rdev);
1199 }
60a7e396
CK
1200 if ((radeon_testing & 2)) {
1201 radeon_test_syncing(rdev);
1202 }
771fe6b9 1203 if (radeon_benchmarking) {
638dd7db 1204 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1205 }
6cf8a3f5 1206 return 0;
771fe6b9
JG
1207}
1208
4d8bf9ae
CK
1209static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1210
0c195119
AD
1211/**
1212 * radeon_device_fini - tear down the driver
1213 *
1214 * @rdev: radeon_device pointer
1215 *
1216 * Tear down the driver info (all asics).
1217 * Called at driver shutdown.
1218 */
771fe6b9
JG
1219void radeon_device_fini(struct radeon_device *rdev)
1220{
771fe6b9
JG
1221 DRM_INFO("radeon: finishing device.\n");
1222 rdev->shutdown = true;
90aca4d2
JG
1223 /* evict vram memory */
1224 radeon_bo_evict_vram(rdev);
62a8ea3f 1225 radeon_fini(rdev);
6a9ee8af 1226 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1227 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1228 if (rdev->rio_mem)
1229 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1230 rdev->rio_mem = NULL;
771fe6b9
JG
1231 iounmap(rdev->rmmio);
1232 rdev->rmmio = NULL;
4d8bf9ae 1233 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1234}
1235
1236
1237/*
1238 * Suspend & resume.
1239 */
0c195119
AD
1240/**
1241 * radeon_suspend_kms - initiate device suspend
1242 *
1243 * @pdev: drm dev pointer
1244 * @state: suspend state
1245 *
1246 * Puts the hw in the suspend state (all asics).
1247 * Returns 0 for success or an error on failure.
1248 * Called at driver suspend.
1249 */
771fe6b9
JG
1250int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1251{
875c1866 1252 struct radeon_device *rdev;
771fe6b9 1253 struct drm_crtc *crtc;
d8dcaa1d 1254 struct drm_connector *connector;
7465280c 1255 int i, r;
5f8f635e 1256 bool force_completion = false;
771fe6b9 1257
875c1866 1258 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1259 return -ENODEV;
1260 }
1261 if (state.event == PM_EVENT_PRETHAW) {
1262 return 0;
1263 }
875c1866
DJ
1264 rdev = dev->dev_private;
1265
5bcf719b 1266 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1267 return 0;
d8dcaa1d 1268
86698c20
SF
1269 drm_kms_helper_poll_disable(dev);
1270
d8dcaa1d
AD
1271 /* turn off display hw */
1272 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1273 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1274 }
1275
771fe6b9
JG
1276 /* unpin the front buffers */
1277 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1278 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1279 struct radeon_bo *robj;
771fe6b9
JG
1280
1281 if (rfb == NULL || rfb->obj == NULL) {
1282 continue;
1283 }
7e4d15d9 1284 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1285 /* don't unpin kernel fb objects */
1286 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1287 r = radeon_bo_reserve(robj, false);
38651674 1288 if (r == 0) {
4c788679
JG
1289 radeon_bo_unpin(robj);
1290 radeon_bo_unreserve(robj);
1291 }
771fe6b9
JG
1292 }
1293 }
1294 /* evict vram memory */
4c788679 1295 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1296
1297 mutex_lock(&rdev->ring_lock);
771fe6b9 1298 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1299 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1300 r = radeon_fence_wait_empty_locked(rdev, i);
1301 if (r) {
1302 /* delay GPU reset to resume */
1303 force_completion = true;
1304 }
1305 }
1306 if (force_completion) {
1307 radeon_fence_driver_force_completion(rdev);
1308 }
8a47cc9e 1309 mutex_unlock(&rdev->ring_lock);
771fe6b9 1310
f657c2a7
YZ
1311 radeon_save_bios_scratch_regs(rdev);
1312
ce8f5370 1313 radeon_pm_suspend(rdev);
62a8ea3f 1314 radeon_suspend(rdev);
d4877cf2 1315 radeon_hpd_fini(rdev);
771fe6b9 1316 /* evict remaining vram memory */
4c788679 1317 radeon_bo_evict_vram(rdev);
771fe6b9 1318
10b06122
JG
1319 radeon_agp_suspend(rdev);
1320
771fe6b9
JG
1321 pci_save_state(dev->pdev);
1322 if (state.event == PM_EVENT_SUSPEND) {
1323 /* Shut down the device */
1324 pci_disable_device(dev->pdev);
1325 pci_set_power_state(dev->pdev, PCI_D3hot);
1326 }
ac751efa 1327 console_lock();
38651674 1328 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1329 console_unlock();
771fe6b9
JG
1330 return 0;
1331}
1332
0c195119
AD
1333/**
1334 * radeon_resume_kms - initiate device resume
1335 *
1336 * @pdev: drm dev pointer
1337 *
1338 * Bring the hw back to operating state (all asics).
1339 * Returns 0 for success or an error on failure.
1340 * Called at driver resume.
1341 */
771fe6b9
JG
1342int radeon_resume_kms(struct drm_device *dev)
1343{
09bdf591 1344 struct drm_connector *connector;
771fe6b9 1345 struct radeon_device *rdev = dev->dev_private;
04eb2206 1346 int r;
771fe6b9 1347
5bcf719b 1348 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1349 return 0;
1350
ac751efa 1351 console_lock();
771fe6b9
JG
1352 pci_set_power_state(dev->pdev, PCI_D0);
1353 pci_restore_state(dev->pdev);
1354 if (pci_enable_device(dev->pdev)) {
ac751efa 1355 console_unlock();
771fe6b9
JG
1356 return -1;
1357 }
0ebf1717
DA
1358 /* resume AGP if in use */
1359 radeon_agp_resume(rdev);
62a8ea3f 1360 radeon_resume(rdev);
04eb2206
CK
1361
1362 r = radeon_ib_ring_tests(rdev);
1363 if (r)
1364 DRM_ERROR("ib ring test failed (%d).\n", r);
1365
ce8f5370 1366 radeon_pm_resume(rdev);
f657c2a7 1367 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1368
38651674 1369 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1370 console_unlock();
771fe6b9 1371
3fa47d9e
AD
1372 /* init dig PHYs, disp eng pll */
1373 if (rdev->is_atom_bios) {
ac89af1e 1374 radeon_atom_encoder_init(rdev);
f3f1f03e 1375 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1376 /* turn on the BL */
1377 if (rdev->mode_info.bl_encoder) {
1378 u8 bl_level = radeon_get_backlight_level(rdev,
1379 rdev->mode_info.bl_encoder);
1380 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1381 bl_level);
1382 }
3fa47d9e 1383 }
d4877cf2
AD
1384 /* reset hpd state */
1385 radeon_hpd_init(rdev);
771fe6b9
JG
1386 /* blat the mode back in */
1387 drm_helper_resume_force_mode(dev);
a93f344d
AD
1388 /* turn on display hw */
1389 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1390 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1391 }
86698c20
SF
1392
1393 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1394 return 0;
1395}
1396
0c195119
AD
1397/**
1398 * radeon_gpu_reset - reset the asic
1399 *
1400 * @rdev: radeon device pointer
1401 *
1402 * Attempt the reset the GPU if it has hung (all asics).
1403 * Returns 0 for success or an error on failure.
1404 */
90aca4d2
JG
1405int radeon_gpu_reset(struct radeon_device *rdev)
1406{
55d7c221
CK
1407 unsigned ring_sizes[RADEON_NUM_RINGS];
1408 uint32_t *ring_data[RADEON_NUM_RINGS];
1409
1410 bool saved = false;
1411
1412 int i, r;
8fd1b84c 1413 int resched;
90aca4d2 1414
dee53e7f 1415 down_write(&rdev->exclusive_lock);
90aca4d2 1416 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1417 /* block TTM */
1418 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1419 radeon_suspend(rdev);
1420
55d7c221
CK
1421 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1422 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1423 &ring_data[i]);
1424 if (ring_sizes[i]) {
1425 saved = true;
1426 dev_info(rdev->dev, "Saved %d dwords of commands "
1427 "on ring %d.\n", ring_sizes[i], i);
1428 }
1429 }
1430
1431retry:
90aca4d2
JG
1432 r = radeon_asic_reset(rdev);
1433 if (!r) {
55d7c221 1434 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1435 radeon_resume(rdev);
55d7c221 1436 }
04eb2206 1437
55d7c221 1438 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1439
55d7c221
CK
1440 if (!r) {
1441 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1442 radeon_ring_restore(rdev, &rdev->ring[i],
1443 ring_sizes[i], ring_data[i]);
f54b350d
CK
1444 ring_sizes[i] = 0;
1445 ring_data[i] = NULL;
55d7c221
CK
1446 }
1447
1448 r = radeon_ib_ring_tests(rdev);
1449 if (r) {
1450 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1451 if (saved) {
f54b350d 1452 saved = false;
55d7c221
CK
1453 radeon_suspend(rdev);
1454 goto retry;
1455 }
1456 }
1457 } else {
76903b96 1458 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1459 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1460 kfree(ring_data[i]);
1461 }
90aca4d2 1462 }
7a1619b9 1463
d3493574
JG
1464 drm_helper_resume_force_mode(rdev->ddev);
1465
55d7c221 1466 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1467 if (r) {
1468 /* bad news, how to tell it to userspace ? */
1469 dev_info(rdev->dev, "GPU reset failed\n");
1470 }
1471
dee53e7f 1472 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1473 return r;
1474}
1475
771fe6b9
JG
1476
1477/*
1478 * Debugfs
1479 */
771fe6b9
JG
1480int radeon_debugfs_add_files(struct radeon_device *rdev,
1481 struct drm_info_list *files,
1482 unsigned nfiles)
1483{
1484 unsigned i;
1485
4d8bf9ae
CK
1486 for (i = 0; i < rdev->debugfs_count; i++) {
1487 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1488 /* Already registered */
1489 return 0;
1490 }
1491 }
c245cb9e 1492
4d8bf9ae 1493 i = rdev->debugfs_count + 1;
c245cb9e
MW
1494 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1495 DRM_ERROR("Reached maximum number of debugfs components.\n");
1496 DRM_ERROR("Report so we increase "
1497 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1498 return -EINVAL;
1499 }
4d8bf9ae
CK
1500 rdev->debugfs[rdev->debugfs_count].files = files;
1501 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1502 rdev->debugfs_count = i;
771fe6b9
JG
1503#if defined(CONFIG_DEBUG_FS)
1504 drm_debugfs_create_files(files, nfiles,
1505 rdev->ddev->control->debugfs_root,
1506 rdev->ddev->control);
1507 drm_debugfs_create_files(files, nfiles,
1508 rdev->ddev->primary->debugfs_root,
1509 rdev->ddev->primary);
1510#endif
1511 return 0;
1512}
1513
4d8bf9ae
CK
1514static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1515{
1516#if defined(CONFIG_DEBUG_FS)
1517 unsigned i;
1518
1519 for (i = 0; i < rdev->debugfs_count; i++) {
1520 drm_debugfs_remove_files(rdev->debugfs[i].files,
1521 rdev->debugfs[i].num_files,
1522 rdev->ddev->control);
1523 drm_debugfs_remove_files(rdev->debugfs[i].files,
1524 rdev->debugfs[i].num_files,
1525 rdev->ddev->primary);
1526 }
1527#endif
1528}
1529
771fe6b9
JG
1530#if defined(CONFIG_DEBUG_FS)
1531int radeon_debugfs_init(struct drm_minor *minor)
1532{
1533 return 0;
1534}
1535
1536void radeon_debugfs_cleanup(struct drm_minor *minor)
1537{
771fe6b9
JG
1538}
1539#endif