Merge remote-tracking branches 'regmap/topic/const' and 'regmap/topic/hwspinlock...
[linux-2.6-block.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
b8751946 33#include <linux/pm_runtime.h>
28d52043 34#include <linux/vgaarb.h>
6a9ee8af 35#include <linux/vga_switcheroo.h>
bcc65fd8 36#include <linux/efi.h>
771fe6b9
JG
37#include "radeon_reg.h"
38#include "radeon.h"
771fe6b9
JG
39#include "atom.h"
40
1b5331d9
JG
41static const char radeon_family_name[][16] = {
42 "R100",
43 "RV100",
44 "RS100",
45 "RV200",
46 "RS200",
47 "R200",
48 "RV250",
49 "RS300",
50 "RV280",
51 "R300",
52 "R350",
53 "RV350",
54 "RV380",
55 "R420",
56 "R423",
57 "RV410",
58 "RS400",
59 "RS480",
60 "RS600",
61 "RS690",
62 "RS740",
63 "RV515",
64 "R520",
65 "RV530",
66 "RV560",
67 "RV570",
68 "R580",
69 "R600",
70 "RV610",
71 "RV630",
72 "RV670",
73 "RV620",
74 "RV635",
75 "RS780",
76 "RS880",
77 "RV770",
78 "RV730",
79 "RV710",
80 "RV740",
81 "CEDAR",
82 "REDWOOD",
83 "JUNIPER",
84 "CYPRESS",
85 "HEMLOCK",
b08ebe7e 86 "PALM",
4df64e65
AD
87 "SUMO",
88 "SUMO2",
1fe18305
AD
89 "BARTS",
90 "TURKS",
91 "CAICOS",
b7cfc9fe 92 "CAYMAN",
8848f759 93 "ARUBA",
cb28bb34
AD
94 "TAHITI",
95 "PITCAIRN",
96 "VERDE",
624d3524 97 "OLAND",
b5d9d726 98 "HAINAN",
6eac752e
AD
99 "BONAIRE",
100 "KAVERI",
101 "KABINI",
3bf599e8 102 "HAWAII",
b0a9f22a 103 "MULLINS",
1b5331d9
JG
104 "LAST",
105};
106
066f1f0b
AD
107#if defined(CONFIG_VGA_SWITCHEROO)
108bool radeon_has_atpx_dgpu_power_cntl(void);
109bool radeon_is_atpx_hybrid(void);
110#else
111static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
112static inline bool radeon_is_atpx_hybrid(void) { return false; }
113#endif
114
4807c5a8 115#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
4807c5a8
AD
116
117struct radeon_px_quirk {
118 u32 chip_vendor;
119 u32 chip_device;
120 u32 subsys_vendor;
121 u32 subsys_device;
122 u32 px_quirk_flags;
123};
124
125static struct radeon_px_quirk radeon_px_quirk_list[] = {
126 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
127 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
128 */
129 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
130 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
131 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
132 */
133 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
ff1b1294
AD
134 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
135 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
136 */
137 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
4eb59793
AD
138 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
139 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
140 */
141 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
4807c5a8
AD
142 { 0, 0, 0, 0, 0 },
143};
144
90c4cde9
AD
145bool radeon_is_px(struct drm_device *dev)
146{
147 struct radeon_device *rdev = dev->dev_private;
148
149 if (rdev->flags & RADEON_IS_PX)
150 return true;
151 return false;
152}
10ebc0bc 153
4807c5a8
AD
154static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
155{
156 struct radeon_px_quirk *p = radeon_px_quirk_list;
157
158 /* Apply PX quirks */
159 while (p && p->chip_device != 0) {
160 if (rdev->pdev->vendor == p->chip_vendor &&
161 rdev->pdev->device == p->chip_device &&
162 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
163 rdev->pdev->subsystem_device == p->subsys_device) {
164 rdev->px_quirk_flags = p->px_quirk_flags;
165 break;
166 }
167 ++p;
168 }
169
170 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
171 rdev->flags &= ~RADEON_IS_PX;
066f1f0b
AD
172
173 /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
174 if (!radeon_is_atpx_hybrid() &&
175 !radeon_has_atpx_dgpu_power_cntl())
176 rdev->flags &= ~RADEON_IS_PX;
4807c5a8
AD
177}
178
2e1b65f9
AD
179/**
180 * radeon_program_register_sequence - program an array of registers.
181 *
182 * @rdev: radeon_device pointer
183 * @registers: pointer to the register array
184 * @array_size: size of the register array
185 *
186 * Programs an array or registers with and and or masks.
187 * This is a helper for setting golden registers.
188 */
189void radeon_program_register_sequence(struct radeon_device *rdev,
190 const u32 *registers,
191 const u32 array_size)
192{
193 u32 tmp, reg, and_mask, or_mask;
194 int i;
195
196 if (array_size % 3)
197 return;
198
199 for (i = 0; i < array_size; i +=3) {
200 reg = registers[i + 0];
201 and_mask = registers[i + 1];
202 or_mask = registers[i + 2];
203
204 if (and_mask == 0xffffffff) {
205 tmp = or_mask;
206 } else {
207 tmp = RREG32(reg);
208 tmp &= ~and_mask;
209 tmp |= or_mask;
210 }
211 WREG32(reg, tmp);
212 }
213}
214
1a0041b8
AD
215void radeon_pci_config_reset(struct radeon_device *rdev)
216{
217 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
218}
219
0c195119
AD
220/**
221 * radeon_surface_init - Clear GPU surface registers.
222 *
223 * @rdev: radeon_device pointer
224 *
225 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 226 */
3ce0a23d 227void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
228{
229 /* FIXME: check this out */
230 if (rdev->family < CHIP_R600) {
231 int i;
232
550e2d92
DA
233 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
234 if (rdev->surface_regs[i].bo)
235 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
236 else
237 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 238 }
e024e110
DA
239 /* enable surfaces */
240 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
241 }
242}
243
771fe6b9
JG
244/*
245 * GPU scratch registers helpers function.
246 */
0c195119
AD
247/**
248 * radeon_scratch_init - Init scratch register driver information.
249 *
250 * @rdev: radeon_device pointer
251 *
252 * Init CP scratch register driver information (r1xx-r5xx)
253 */
3ce0a23d 254void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
255{
256 int i;
257
258 /* FIXME: check this out */
259 if (rdev->family < CHIP_R300) {
260 rdev->scratch.num_reg = 5;
261 } else {
262 rdev->scratch.num_reg = 7;
263 }
724c80e1 264 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
265 for (i = 0; i < rdev->scratch.num_reg; i++) {
266 rdev->scratch.free[i] = true;
724c80e1 267 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
268 }
269}
270
0c195119
AD
271/**
272 * radeon_scratch_get - Allocate a scratch register
273 *
274 * @rdev: radeon_device pointer
275 * @reg: scratch register mmio offset
276 *
277 * Allocate a CP scratch register for use by the driver (all asics).
278 * Returns 0 on success or -EINVAL on failure.
279 */
771fe6b9
JG
280int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
281{
282 int i;
283
284 for (i = 0; i < rdev->scratch.num_reg; i++) {
285 if (rdev->scratch.free[i]) {
286 rdev->scratch.free[i] = false;
287 *reg = rdev->scratch.reg[i];
288 return 0;
289 }
290 }
291 return -EINVAL;
292}
293
0c195119
AD
294/**
295 * radeon_scratch_free - Free a scratch register
296 *
297 * @rdev: radeon_device pointer
298 * @reg: scratch register mmio offset
299 *
300 * Free a CP scratch register allocated for use by the driver (all asics)
301 */
771fe6b9
JG
302void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
303{
304 int i;
305
306 for (i = 0; i < rdev->scratch.num_reg; i++) {
307 if (rdev->scratch.reg[i] == reg) {
308 rdev->scratch.free[i] = true;
309 return;
310 }
311 }
312}
313
75efdee1
AD
314/*
315 * GPU doorbell aperture helpers function.
316 */
317/**
318 * radeon_doorbell_init - Init doorbell driver information.
319 *
320 * @rdev: radeon_device pointer
321 *
322 * Init doorbell driver information (CIK)
323 * Returns 0 on success, error on failure.
324 */
28f5a6cd 325static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 326{
75efdee1
AD
327 /* doorbell bar mapping */
328 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
329 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
330
d5754ab8
AL
331 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
332 if (rdev->doorbell.num_doorbells == 0)
333 return -EINVAL;
75efdee1 334
d5754ab8 335 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
336 if (rdev->doorbell.ptr == NULL) {
337 return -ENOMEM;
338 }
339 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
340 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
341
d5754ab8 342 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 343
75efdee1
AD
344 return 0;
345}
346
347/**
348 * radeon_doorbell_fini - Tear down doorbell driver information.
349 *
350 * @rdev: radeon_device pointer
351 *
352 * Tear down doorbell driver information (CIK)
353 */
28f5a6cd 354static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
355{
356 iounmap(rdev->doorbell.ptr);
357 rdev->doorbell.ptr = NULL;
358}
359
360/**
d5754ab8 361 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
362 *
363 * @rdev: radeon_device pointer
d5754ab8 364 * @doorbell: doorbell index
75efdee1 365 *
d5754ab8 366 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
367 * Returns 0 on success or -EINVAL on failure.
368 */
369int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
370{
d5754ab8
AL
371 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
372 if (offset < rdev->doorbell.num_doorbells) {
373 __set_bit(offset, rdev->doorbell.used);
374 *doorbell = offset;
375 return 0;
376 } else {
377 return -EINVAL;
75efdee1 378 }
75efdee1
AD
379}
380
381/**
d5754ab8 382 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
383 *
384 * @rdev: radeon_device pointer
d5754ab8 385 * @doorbell: doorbell index
75efdee1 386 *
d5754ab8 387 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
388 */
389void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
390{
d5754ab8
AL
391 if (doorbell < rdev->doorbell.num_doorbells)
392 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
393}
394
ebff8453
OG
395/**
396 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
397 * setup KFD
398 *
399 * @rdev: radeon_device pointer
400 * @aperture_base: output returning doorbell aperture base physical address
401 * @aperture_size: output returning doorbell aperture size in bytes
402 * @start_offset: output returning # of doorbell bytes reserved for radeon.
403 *
404 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
405 * takes doorbells required for its own rings and reports the setup to KFD.
406 * Radeon reserved doorbells are at the start of the doorbell aperture.
407 */
408void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
409 phys_addr_t *aperture_base,
410 size_t *aperture_size,
411 size_t *start_offset)
412{
413 /* The first num_doorbells are used by radeon.
414 * KFD takes whatever's left in the aperture. */
415 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
416 *aperture_base = rdev->doorbell.base;
417 *aperture_size = rdev->doorbell.size;
418 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
419 } else {
420 *aperture_base = 0;
421 *aperture_size = 0;
422 *start_offset = 0;
423 }
424}
425
0c195119
AD
426/*
427 * radeon_wb_*()
428 * Writeback is the the method by which the the GPU updates special pages
429 * in memory with the status of certain GPU events (fences, ring pointers,
430 * etc.).
431 */
432
433/**
434 * radeon_wb_disable - Disable Writeback
435 *
436 * @rdev: radeon_device pointer
437 *
438 * Disables Writeback (all asics). Used for suspend.
439 */
724c80e1
AD
440void radeon_wb_disable(struct radeon_device *rdev)
441{
724c80e1
AD
442 rdev->wb.enabled = false;
443}
444
0c195119
AD
445/**
446 * radeon_wb_fini - Disable Writeback and free memory
447 *
448 * @rdev: radeon_device pointer
449 *
450 * Disables Writeback and frees the Writeback memory (all asics).
451 * Used at driver shutdown.
452 */
724c80e1
AD
453void radeon_wb_fini(struct radeon_device *rdev)
454{
455 radeon_wb_disable(rdev);
456 if (rdev->wb.wb_obj) {
089920f2
JG
457 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
458 radeon_bo_kunmap(rdev->wb.wb_obj);
459 radeon_bo_unpin(rdev->wb.wb_obj);
460 radeon_bo_unreserve(rdev->wb.wb_obj);
461 }
724c80e1
AD
462 radeon_bo_unref(&rdev->wb.wb_obj);
463 rdev->wb.wb = NULL;
464 rdev->wb.wb_obj = NULL;
465 }
466}
467
0c195119
AD
468/**
469 * radeon_wb_init- Init Writeback driver info and allocate memory
470 *
471 * @rdev: radeon_device pointer
472 *
473 * Disables Writeback and frees the Writeback memory (all asics).
474 * Used at driver startup.
475 * Returns 0 on success or an -error on failure.
476 */
724c80e1
AD
477int radeon_wb_init(struct radeon_device *rdev)
478{
479 int r;
480
481 if (rdev->wb.wb_obj == NULL) {
441921d5 482 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
831b6966 483 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
02376d82 484 &rdev->wb.wb_obj);
724c80e1
AD
485 if (r) {
486 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
487 return r;
488 }
089920f2
JG
489 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
490 if (unlikely(r != 0)) {
491 radeon_wb_fini(rdev);
492 return r;
493 }
494 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
495 &rdev->wb.gpu_addr);
496 if (r) {
497 radeon_bo_unreserve(rdev->wb.wb_obj);
498 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
499 radeon_wb_fini(rdev);
500 return r;
501 }
502 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 503 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
504 if (r) {
505 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
506 radeon_wb_fini(rdev);
507 return r;
508 }
724c80e1
AD
509 }
510
e6ba7599
AD
511 /* clear wb memory */
512 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
513 /* disable event_write fences */
514 rdev->wb.use_event = false;
724c80e1 515 /* disabled via module param */
3b7a2b24 516 if (radeon_no_wb == 1) {
724c80e1 517 rdev->wb.enabled = false;
3b7a2b24 518 } else {
724c80e1 519 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
520 /* often unreliable on AGP */
521 rdev->wb.enabled = false;
522 } else if (rdev->family < CHIP_R300) {
523 /* often unreliable on pre-r300 */
724c80e1 524 rdev->wb.enabled = false;
d0f8a854 525 } else {
724c80e1 526 rdev->wb.enabled = true;
d0f8a854 527 /* event_write fences are only available on r600+ */
3b7a2b24 528 if (rdev->family >= CHIP_R600) {
d0f8a854 529 rdev->wb.use_event = true;
3b7a2b24 530 }
d0f8a854 531 }
724c80e1 532 }
c994ead6
AD
533 /* always use writeback/events on NI, APUs */
534 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
535 rdev->wb.enabled = true;
536 rdev->wb.use_event = true;
537 }
724c80e1
AD
538
539 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
540
541 return 0;
542}
543
d594e46a
JG
544/**
545 * radeon_vram_location - try to find VRAM location
546 * @rdev: radeon device structure holding all necessary informations
547 * @mc: memory controller structure holding memory informations
548 * @base: base address at which to put VRAM
549 *
550 * Function will place try to place VRAM at base address provided
551 * as parameter (which is so far either PCI aperture address or
552 * for IGP TOM base address).
553 *
554 * If there is not enough space to fit the unvisible VRAM in the 32bits
555 * address space then we limit the VRAM size to the aperture.
556 *
557 * If we are using AGP and if the AGP aperture doesn't allow us to have
558 * room for all the VRAM than we restrict the VRAM to the PCI aperture
559 * size and print a warning.
560 *
561 * This function will never fails, worst case are limiting VRAM.
562 *
563 * Note: GTT start, end, size should be initialized before calling this
564 * function on AGP platform.
565 *
25985edc 566 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
567 * this shouldn't be a problem as we are using the PCI aperture as a reference.
568 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
569 * not IGP.
570 *
571 * Note: we use mc_vram_size as on some board we need to program the mc to
572 * cover the whole aperture even if VRAM size is inferior to aperture size
573 * Novell bug 204882 + along with lots of ubuntu ones
574 *
575 * Note: when limiting vram it's safe to overwritte real_vram_size because
576 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
577 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
578 * ones)
579 *
580 * Note: IGP TOM addr should be the same as the aperture addr, we don't
581 * explicitly check for that thought.
582 *
583 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 584 */
d594e46a 585void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 586{
1bcb04f7
CK
587 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
588
d594e46a 589 mc->vram_start = base;
9ed8b1f9 590 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
591 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
592 mc->real_vram_size = mc->aper_size;
593 mc->mc_vram_size = mc->aper_size;
594 }
595 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 596 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
597 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
598 mc->real_vram_size = mc->aper_size;
599 mc->mc_vram_size = mc->aper_size;
600 }
601 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
602 if (limit && limit < mc->real_vram_size)
603 mc->real_vram_size = limit;
dd7cc55a 604 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
605 mc->mc_vram_size >> 20, mc->vram_start,
606 mc->vram_end, mc->real_vram_size >> 20);
607}
771fe6b9 608
d594e46a
JG
609/**
610 * radeon_gtt_location - try to find GTT location
611 * @rdev: radeon device structure holding all necessary informations
612 * @mc: memory controller structure holding memory informations
613 *
614 * Function will place try to place GTT before or after VRAM.
615 *
616 * If GTT size is bigger than space left then we ajust GTT size.
617 * Thus function will never fails.
618 *
619 * FIXME: when reducing GTT size align new size on power of 2.
620 */
621void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
622{
623 u64 size_af, size_bf;
624
9ed8b1f9 625 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 626 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
627 if (size_bf > size_af) {
628 if (mc->gtt_size > size_bf) {
629 dev_warn(rdev->dev, "limiting GTT\n");
630 mc->gtt_size = size_bf;
771fe6b9 631 }
8d369bb1 632 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 633 } else {
d594e46a
JG
634 if (mc->gtt_size > size_af) {
635 dev_warn(rdev->dev, "limiting GTT\n");
636 mc->gtt_size = size_af;
637 }
8d369bb1 638 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 639 }
d594e46a 640 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 641 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 642 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
643}
644
771fe6b9
JG
645/*
646 * GPU helpers function.
647 */
05082b8b
AD
648
649/**
650 * radeon_device_is_virtual - check if we are running is a virtual environment
651 *
652 * Check if the asic has been passed through to a VM (all asics).
653 * Used at driver startup.
654 * Returns true if virtual or false if not.
655 */
a801abe4 656bool radeon_device_is_virtual(void)
05082b8b
AD
657{
658#ifdef CONFIG_X86
659 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
660#else
661 return false;
662#endif
663}
664
0c195119
AD
665/**
666 * radeon_card_posted - check if the hw has already been initialized
667 *
668 * @rdev: radeon_device pointer
669 *
670 * Check if the asic has been initialized (all asics).
671 * Used at driver startup.
672 * Returns true if initialized or false if not.
673 */
9f022ddf 674bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
675{
676 uint32_t reg;
677
884031f0
AD
678 /* for pass through, always force asic_init for CI */
679 if (rdev->family >= CHIP_BONAIRE &&
680 radeon_device_is_virtual())
05082b8b
AD
681 return false;
682
50a583f6 683 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 684 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
685 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
686 (rdev->family < CHIP_R600))
bcc65fd8
MG
687 return false;
688
2cf3a4fc
AD
689 if (ASIC_IS_NODCE(rdev))
690 goto check_memsize;
691
771fe6b9 692 /* first check CRTCs */
09fb8bd1 693 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
694 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
695 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
696 if (rdev->num_crtc >= 4) {
697 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
698 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
699 }
700 if (rdev->num_crtc >= 6) {
701 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
702 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
703 }
bcc1c2a1
AD
704 if (reg & EVERGREEN_CRTC_MASTER_EN)
705 return true;
706 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
707 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
708 RREG32(AVIVO_D2CRTC_CONTROL);
709 if (reg & AVIVO_CRTC_EN) {
710 return true;
711 }
712 } else {
713 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
714 RREG32(RADEON_CRTC2_GEN_CNTL);
715 if (reg & RADEON_CRTC_EN) {
716 return true;
717 }
718 }
719
2cf3a4fc 720check_memsize:
771fe6b9
JG
721 /* then check MEM_SIZE, in case the crtcs are off */
722 if (rdev->family >= CHIP_R600)
723 reg = RREG32(R600_CONFIG_MEMSIZE);
724 else
725 reg = RREG32(RADEON_CONFIG_MEMSIZE);
726
727 if (reg)
728 return true;
729
730 return false;
731
732}
733
0c195119
AD
734/**
735 * radeon_update_bandwidth_info - update display bandwidth params
736 *
737 * @rdev: radeon_device pointer
738 *
739 * Used when sclk/mclk are switched or display modes are set.
740 * params are used to calculate display watermarks (all asics)
741 */
f47299c5
AD
742void radeon_update_bandwidth_info(struct radeon_device *rdev)
743{
744 fixed20_12 a;
8807286e
AD
745 u32 sclk = rdev->pm.current_sclk;
746 u32 mclk = rdev->pm.current_mclk;
f47299c5 747
8807286e
AD
748 /* sclk/mclk in Mhz */
749 a.full = dfixed_const(100);
750 rdev->pm.sclk.full = dfixed_const(sclk);
751 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
752 rdev->pm.mclk.full = dfixed_const(mclk);
753 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 754
8807286e 755 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 756 a.full = dfixed_const(16);
f47299c5 757 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 758 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
759 }
760}
761
0c195119
AD
762/**
763 * radeon_boot_test_post_card - check and possibly initialize the hw
764 *
765 * @rdev: radeon_device pointer
766 *
767 * Check if the asic is initialized and if not, attempt to initialize
768 * it (all asics).
769 * Returns true if initialized or false if not.
770 */
72542d77
DA
771bool radeon_boot_test_post_card(struct radeon_device *rdev)
772{
773 if (radeon_card_posted(rdev))
774 return true;
775
776 if (rdev->bios) {
777 DRM_INFO("GPU not posted. posting now...\n");
778 if (rdev->is_atom_bios)
779 atom_asic_init(rdev->mode_info.atom_context);
780 else
781 radeon_combios_asic_init(rdev->ddev);
782 return true;
783 } else {
784 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
785 return false;
786 }
787}
788
0c195119
AD
789/**
790 * radeon_dummy_page_init - init dummy page used by the driver
791 *
792 * @rdev: radeon_device pointer
793 *
794 * Allocate the dummy page used by the driver (all asics).
795 * This dummy page is used by the driver as a filler for gart entries
796 * when pages are taken out of the GART
797 * Returns 0 on sucess, -ENOMEM on failure.
798 */
3ce0a23d
JG
799int radeon_dummy_page_init(struct radeon_device *rdev)
800{
82568565
DA
801 if (rdev->dummy_page.page)
802 return 0;
3ce0a23d
JG
803 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
804 if (rdev->dummy_page.page == NULL)
805 return -ENOMEM;
806 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
807 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
808 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
809 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
810 __free_page(rdev->dummy_page.page);
811 rdev->dummy_page.page = NULL;
812 return -ENOMEM;
813 }
cb658906
MD
814 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
815 RADEON_GART_PAGE_DUMMY);
3ce0a23d
JG
816 return 0;
817}
818
0c195119
AD
819/**
820 * radeon_dummy_page_fini - free dummy page used by the driver
821 *
822 * @rdev: radeon_device pointer
823 *
824 * Frees the dummy page used by the driver (all asics).
825 */
3ce0a23d
JG
826void radeon_dummy_page_fini(struct radeon_device *rdev)
827{
828 if (rdev->dummy_page.page == NULL)
829 return;
830 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
831 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
832 __free_page(rdev->dummy_page.page);
833 rdev->dummy_page.page = NULL;
834}
835
771fe6b9 836
771fe6b9 837/* ATOM accessor methods */
0c195119
AD
838/*
839 * ATOM is an interpreted byte code stored in tables in the vbios. The
840 * driver registers callbacks to access registers and the interpreter
841 * in the driver parses the tables and executes then to program specific
842 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
843 * atombios.h, and atom.c
844 */
845
846/**
847 * cail_pll_read - read PLL register
848 *
849 * @info: atom card_info pointer
850 * @reg: PLL register offset
851 *
852 * Provides a PLL register accessor for the atom interpreter (r4xx+).
853 * Returns the value of the PLL register.
854 */
771fe6b9
JG
855static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
856{
857 struct radeon_device *rdev = info->dev->dev_private;
858 uint32_t r;
859
860 r = rdev->pll_rreg(rdev, reg);
861 return r;
862}
863
0c195119
AD
864/**
865 * cail_pll_write - write PLL register
866 *
867 * @info: atom card_info pointer
868 * @reg: PLL register offset
869 * @val: value to write to the pll register
870 *
871 * Provides a PLL register accessor for the atom interpreter (r4xx+).
872 */
771fe6b9
JG
873static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
874{
875 struct radeon_device *rdev = info->dev->dev_private;
876
877 rdev->pll_wreg(rdev, reg, val);
878}
879
0c195119
AD
880/**
881 * cail_mc_read - read MC (Memory Controller) register
882 *
883 * @info: atom card_info pointer
884 * @reg: MC register offset
885 *
886 * Provides an MC register accessor for the atom interpreter (r4xx+).
887 * Returns the value of the MC register.
888 */
771fe6b9
JG
889static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
890{
891 struct radeon_device *rdev = info->dev->dev_private;
892 uint32_t r;
893
894 r = rdev->mc_rreg(rdev, reg);
895 return r;
896}
897
0c195119
AD
898/**
899 * cail_mc_write - write MC (Memory Controller) register
900 *
901 * @info: atom card_info pointer
902 * @reg: MC register offset
903 * @val: value to write to the pll register
904 *
905 * Provides a MC register accessor for the atom interpreter (r4xx+).
906 */
771fe6b9
JG
907static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
908{
909 struct radeon_device *rdev = info->dev->dev_private;
910
911 rdev->mc_wreg(rdev, reg, val);
912}
913
0c195119
AD
914/**
915 * cail_reg_write - write MMIO register
916 *
917 * @info: atom card_info pointer
918 * @reg: MMIO register offset
919 * @val: value to write to the pll register
920 *
921 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
922 */
771fe6b9
JG
923static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
924{
925 struct radeon_device *rdev = info->dev->dev_private;
926
927 WREG32(reg*4, val);
928}
929
0c195119
AD
930/**
931 * cail_reg_read - read MMIO register
932 *
933 * @info: atom card_info pointer
934 * @reg: MMIO register offset
935 *
936 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
937 * Returns the value of the MMIO register.
938 */
771fe6b9
JG
939static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
940{
941 struct radeon_device *rdev = info->dev->dev_private;
942 uint32_t r;
943
944 r = RREG32(reg*4);
945 return r;
946}
947
0c195119
AD
948/**
949 * cail_ioreg_write - write IO register
950 *
951 * @info: atom card_info pointer
952 * @reg: IO register offset
953 * @val: value to write to the pll register
954 *
955 * Provides a IO register accessor for the atom interpreter (r4xx+).
956 */
351a52a2
AD
957static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
958{
959 struct radeon_device *rdev = info->dev->dev_private;
960
961 WREG32_IO(reg*4, val);
962}
963
0c195119
AD
964/**
965 * cail_ioreg_read - read IO register
966 *
967 * @info: atom card_info pointer
968 * @reg: IO register offset
969 *
970 * Provides an IO register accessor for the atom interpreter (r4xx+).
971 * Returns the value of the IO register.
972 */
351a52a2
AD
973static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
974{
975 struct radeon_device *rdev = info->dev->dev_private;
976 uint32_t r;
977
978 r = RREG32_IO(reg*4);
979 return r;
980}
981
0c195119
AD
982/**
983 * radeon_atombios_init - init the driver info and callbacks for atombios
984 *
985 * @rdev: radeon_device pointer
986 *
987 * Initializes the driver info and register access callbacks for the
988 * ATOM interpreter (r4xx+).
989 * Returns 0 on sucess, -ENOMEM on failure.
990 * Called at driver startup.
991 */
771fe6b9
JG
992int radeon_atombios_init(struct radeon_device *rdev)
993{
61c4b24b
MF
994 struct card_info *atom_card_info =
995 kzalloc(sizeof(struct card_info), GFP_KERNEL);
996
997 if (!atom_card_info)
998 return -ENOMEM;
999
1000 rdev->mode_info.atom_card_info = atom_card_info;
1001 atom_card_info->dev = rdev->ddev;
1002 atom_card_info->reg_read = cail_reg_read;
1003 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
1004 /* needed for iio ops */
1005 if (rdev->rio_mem) {
1006 atom_card_info->ioreg_read = cail_ioreg_read;
1007 atom_card_info->ioreg_write = cail_ioreg_write;
1008 } else {
1009 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1010 atom_card_info->ioreg_read = cail_reg_read;
1011 atom_card_info->ioreg_write = cail_reg_write;
1012 }
61c4b24b
MF
1013 atom_card_info->mc_read = cail_mc_read;
1014 atom_card_info->mc_write = cail_mc_write;
1015 atom_card_info->pll_read = cail_pll_read;
1016 atom_card_info->pll_write = cail_pll_write;
1017
1018 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
1019 if (!rdev->mode_info.atom_context) {
1020 radeon_atombios_fini(rdev);
1021 return -ENOMEM;
1022 }
1023
c31ad97f 1024 mutex_init(&rdev->mode_info.atom_context->mutex);
1c949842 1025 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
771fe6b9 1026 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 1027 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
1028 return 0;
1029}
1030
0c195119
AD
1031/**
1032 * radeon_atombios_fini - free the driver info and callbacks for atombios
1033 *
1034 * @rdev: radeon_device pointer
1035 *
1036 * Frees the driver info and register access callbacks for the ATOM
1037 * interpreter (r4xx+).
1038 * Called at driver shutdown.
1039 */
771fe6b9
JG
1040void radeon_atombios_fini(struct radeon_device *rdev)
1041{
4a04a844
JG
1042 if (rdev->mode_info.atom_context) {
1043 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 1044 }
0e34d094
TG
1045 kfree(rdev->mode_info.atom_context);
1046 rdev->mode_info.atom_context = NULL;
61c4b24b 1047 kfree(rdev->mode_info.atom_card_info);
0e34d094 1048 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
1049}
1050
0c195119
AD
1051/* COMBIOS */
1052/*
1053 * COMBIOS is the bios format prior to ATOM. It provides
1054 * command tables similar to ATOM, but doesn't have a unified
1055 * parser. See radeon_combios.c
1056 */
1057
1058/**
1059 * radeon_combios_init - init the driver info for combios
1060 *
1061 * @rdev: radeon_device pointer
1062 *
1063 * Initializes the driver info for combios (r1xx-r3xx).
1064 * Returns 0 on sucess.
1065 * Called at driver startup.
1066 */
771fe6b9
JG
1067int radeon_combios_init(struct radeon_device *rdev)
1068{
1069 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1070 return 0;
1071}
1072
0c195119
AD
1073/**
1074 * radeon_combios_fini - free the driver info for combios
1075 *
1076 * @rdev: radeon_device pointer
1077 *
1078 * Frees the driver info for combios (r1xx-r3xx).
1079 * Called at driver shutdown.
1080 */
771fe6b9
JG
1081void radeon_combios_fini(struct radeon_device *rdev)
1082{
1083}
1084
0c195119
AD
1085/* if we get transitioned to only one device, take VGA back */
1086/**
1087 * radeon_vga_set_decode - enable/disable vga decode
1088 *
1089 * @cookie: radeon_device pointer
1090 * @state: enable/disable vga decode
1091 *
1092 * Enable/disable vga decode (all asics).
1093 * Returns VGA resource flags.
1094 */
28d52043
DA
1095static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1096{
1097 struct radeon_device *rdev = cookie;
28d52043
DA
1098 radeon_vga_set_state(rdev, state);
1099 if (state)
1100 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1101 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1102 else
1103 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1104}
c1176d6f 1105
1bcb04f7
CK
1106/**
1107 * radeon_check_pot_argument - check that argument is a power of two
1108 *
1109 * @arg: value to check
1110 *
1111 * Validates that a certain argument is a power of two (all asics).
1112 * Returns true if argument is valid.
1113 */
1114static bool radeon_check_pot_argument(int arg)
1115{
1116 return (arg & (arg - 1)) == 0;
1117}
1118
5e3c4f90
GG
1119/**
1120 * Determine a sensible default GART size according to ASIC family.
1121 *
1122 * @family ASIC family name
1123 */
1124static int radeon_gart_size_auto(enum radeon_family family)
1125{
1126 /* default to a larger gart size on newer asics */
1127 if (family >= CHIP_TAHITI)
1128 return 2048;
1129 else if (family >= CHIP_RV770)
1130 return 1024;
1131 else
1132 return 512;
1133}
1134
0c195119
AD
1135/**
1136 * radeon_check_arguments - validate module params
1137 *
1138 * @rdev: radeon_device pointer
1139 *
1140 * Validates certain module parameters and updates
1141 * the associated values used by the driver (all asics).
1142 */
1109ca09 1143static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1144{
1145 /* vramlimit must be a power of two */
1bcb04f7 1146 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1147 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1148 radeon_vram_limit);
1149 radeon_vram_limit = 0;
36421338 1150 }
1bcb04f7 1151
edcd26e8 1152 if (radeon_gart_size == -1) {
5e3c4f90 1153 radeon_gart_size = radeon_gart_size_auto(rdev->family);
edcd26e8 1154 }
36421338 1155 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1156 if (radeon_gart_size < 32) {
edcd26e8 1157 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1158 radeon_gart_size);
5e3c4f90 1159 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1bcb04f7 1160 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1161 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1162 radeon_gart_size);
5e3c4f90 1163 radeon_gart_size = radeon_gart_size_auto(rdev->family);
36421338 1164 }
1bcb04f7
CK
1165 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1166
36421338
JG
1167 /* AGP mode can only be -1, 1, 2, 4, 8 */
1168 switch (radeon_agpmode) {
1169 case -1:
1170 case 0:
1171 case 1:
1172 case 2:
1173 case 4:
1174 case 8:
1175 break;
1176 default:
1177 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1178 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1179 radeon_agpmode = 0;
1180 break;
1181 }
c1c44132
CK
1182
1183 if (!radeon_check_pot_argument(radeon_vm_size)) {
1184 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1185 radeon_vm_size);
20b2656d 1186 radeon_vm_size = 4;
c1c44132
CK
1187 }
1188
20b2656d 1189 if (radeon_vm_size < 1) {
13c240ef 1190 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
c1c44132 1191 radeon_vm_size);
20b2656d 1192 radeon_vm_size = 4;
c1c44132
CK
1193 }
1194
3cf8bb1a
JG
1195 /*
1196 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1197 */
20b2656d
CK
1198 if (radeon_vm_size > 1024) {
1199 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
c1c44132 1200 radeon_vm_size);
20b2656d 1201 radeon_vm_size = 4;
c1c44132 1202 }
4510fb98
CK
1203
1204 /* defines number of bits in page table versus page directory,
1205 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1206 * page table and the remaining bits are in the page directory */
dfc230f9
CK
1207 if (radeon_vm_block_size == -1) {
1208
1209 /* Total bits covered by PD + PTs */
8e66e134 1210 unsigned bits = ilog2(radeon_vm_size) + 18;
dfc230f9
CK
1211
1212 /* Make sure the PD is 4K in size up to 8GB address space.
1213 Above that split equal between PD and PTs */
1214 if (radeon_vm_size <= 8)
1215 radeon_vm_block_size = bits - 9;
1216 else
1217 radeon_vm_block_size = (bits + 3) / 2;
1218
1219 } else if (radeon_vm_block_size < 9) {
20b2656d 1220 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
4510fb98
CK
1221 radeon_vm_block_size);
1222 radeon_vm_block_size = 9;
1223 }
1224
1225 if (radeon_vm_block_size > 24 ||
20b2656d
CK
1226 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1227 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
4510fb98
CK
1228 radeon_vm_block_size);
1229 radeon_vm_block_size = 9;
1230 }
36421338
JG
1231}
1232
0c195119
AD
1233/**
1234 * radeon_switcheroo_set_state - set switcheroo state
1235 *
1236 * @pdev: pci dev pointer
8e5de1d8 1237 * @state: vga_switcheroo state
0c195119
AD
1238 *
1239 * Callback for the switcheroo driver. Suspends or resumes the
1240 * the asics before or after it is powered up using ACPI methods.
1241 */
6a9ee8af
DA
1242static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1243{
1244 struct drm_device *dev = pci_get_drvdata(pdev);
10ebc0bc 1245
90c4cde9 1246 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1247 return;
1248
6a9ee8af 1249 if (state == VGA_SWITCHEROO_ON) {
7ca85295 1250 pr_info("radeon: switched on\n");
6a9ee8af 1251 /* don't suspend or resume card normally */
5bcf719b 1252 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e 1253
10ebc0bc 1254 radeon_resume_kms(dev, true, true);
d1f9809e 1255
5bcf719b 1256 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1257 drm_kms_helper_poll_enable(dev);
6a9ee8af 1258 } else {
7ca85295 1259 pr_info("radeon: switched off\n");
fbf81762 1260 drm_kms_helper_poll_disable(dev);
5bcf719b 1261 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
274ad65c 1262 radeon_suspend_kms(dev, true, true, false);
5bcf719b 1263 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1264 }
1265}
1266
0c195119
AD
1267/**
1268 * radeon_switcheroo_can_switch - see if switcheroo state can change
1269 *
1270 * @pdev: pci dev pointer
1271 *
1272 * Callback for the switcheroo driver. Check of the switcheroo
1273 * state can be changed.
1274 * Returns true if the state can be changed, false if not.
1275 */
6a9ee8af
DA
1276static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1277{
1278 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1279
fc8fd40e
DV
1280 /*
1281 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1282 * locking inversion with the driver load path. And the access here is
1283 * completely racy anyway. So don't bother with locking for now.
1284 */
1285 return dev->open_count == 0;
6a9ee8af
DA
1286}
1287
26ec685f
TI
1288static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1289 .set_gpu_state = radeon_switcheroo_set_state,
1290 .reprobe = NULL,
1291 .can_switch = radeon_switcheroo_can_switch,
1292};
6a9ee8af 1293
0c195119
AD
1294/**
1295 * radeon_device_init - initialize the driver
1296 *
1297 * @rdev: radeon_device pointer
1298 * @pdev: drm dev pointer
1299 * @pdev: pci dev pointer
1300 * @flags: driver flags
1301 *
1302 * Initializes the driver info and hw (all asics).
1303 * Returns 0 for success or an error on failure.
1304 * Called at driver startup.
1305 */
771fe6b9
JG
1306int radeon_device_init(struct radeon_device *rdev,
1307 struct drm_device *ddev,
1308 struct pci_dev *pdev,
1309 uint32_t flags)
1310{
351a52a2 1311 int r, i;
ad49f501 1312 int dma_bits;
10ebc0bc 1313 bool runtime = false;
771fe6b9 1314
771fe6b9 1315 rdev->shutdown = false;
9f022ddf 1316 rdev->dev = &pdev->dev;
771fe6b9
JG
1317 rdev->ddev = ddev;
1318 rdev->pdev = pdev;
1319 rdev->flags = flags;
1320 rdev->family = flags & RADEON_FAMILY_MASK;
1321 rdev->is_atom_bios = false;
1322 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1323 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1324 rdev->accel_working = false;
8b25ed34
AD
1325 /* set up ring ids */
1326 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1327 rdev->ring[i].idx = i;
1328 }
f54d1867 1329 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1b5331d9 1330
fe0d36e0
AD
1331 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1332 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1333 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1b5331d9 1334
771fe6b9
JG
1335 /* mutex initialization are all done here so we
1336 * can recall function without having locking issues */
d6999bc7 1337 mutex_init(&rdev->ring_lock);
40bacf16 1338 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1339 atomic_set(&rdev->ih.lock, 0);
4c788679 1340 mutex_init(&rdev->gem.mutex);
c913e23a 1341 mutex_init(&rdev->pm.mutex);
6759a0a7 1342 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1343 mutex_init(&rdev->srbm_mutex);
1c0a4625 1344 mutex_init(&rdev->grbm_idx_mutex);
db7fce39 1345 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1346 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1347 init_waitqueue_head(&rdev->irq.vblank_queue);
341cb9e4
CK
1348 mutex_init(&rdev->mn_lock);
1349 hash_init(rdev->mn_hash);
1b9c3dd0
AD
1350 r = radeon_gem_init(rdev);
1351 if (r)
1352 return r;
529364e0 1353
c1c44132 1354 radeon_check_arguments(rdev);
23d4f1f2 1355 /* Adjust VM size here.
c1c44132 1356 * Max GPUVM size for cayman+ is 40 bits.
23d4f1f2 1357 */
20b2656d 1358 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
771fe6b9 1359
4aac0473
JG
1360 /* Set asic functions */
1361 r = radeon_asic_init(rdev);
36421338 1362 if (r)
4aac0473 1363 return r;
4aac0473 1364
f95df9ca
AD
1365 /* all of the newer IGP chips have an internal gart
1366 * However some rs4xx report as AGP, so remove that here.
1367 */
1368 if ((rdev->family >= CHIP_RS400) &&
1369 (rdev->flags & RADEON_IS_IGP)) {
1370 rdev->flags &= ~RADEON_IS_AGP;
1371 }
1372
30256a3f 1373 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1374 radeon_agp_disable(rdev);
771fe6b9
JG
1375 }
1376
9ed8b1f9
AD
1377 /* Set the internal MC address mask
1378 * This is the max address of the GPU's
1379 * internal address space.
1380 */
1381 if (rdev->family >= CHIP_CAYMAN)
1382 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1383 else if (rdev->family >= CHIP_CEDAR)
1384 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1385 else
1386 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1387
ad49f501
DA
1388 /* set DMA mask + need_dma32 flags.
1389 * PCIE - can handle 40-bits.
005a83f1 1390 * IGP - can handle 40-bits
ad49f501 1391 * AGP - generally dma32 is safest
005a83f1 1392 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1393 */
1394 rdev->need_dma32 = false;
1395 if (rdev->flags & RADEON_IS_AGP)
1396 rdev->need_dma32 = true;
005a83f1 1397 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1398 (rdev->family <= CHIP_RS740))
ad49f501
DA
1399 rdev->need_dma32 = true;
1400
1401 dma_bits = rdev->need_dma32 ? 32 : 40;
1402 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1403 if (r) {
62fff811 1404 rdev->need_dma32 = true;
c52494f6 1405 dma_bits = 32;
7ca85295 1406 pr_warn("radeon: No suitable DMA available\n");
771fe6b9 1407 }
c52494f6
KRW
1408 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1409 if (r) {
1410 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
7ca85295 1411 pr_warn("radeon: No coherent DMA available\n");
c52494f6 1412 }
771fe6b9
JG
1413
1414 /* Registers mapping */
1415 /* TODO: block userspace mapping of io register */
2c385151 1416 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1417 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1418 spin_lock_init(&rdev->pll_idx_lock);
1419 spin_lock_init(&rdev->mc_idx_lock);
1420 spin_lock_init(&rdev->pcie_idx_lock);
1421 spin_lock_init(&rdev->pciep_idx_lock);
1422 spin_lock_init(&rdev->pif_idx_lock);
1423 spin_lock_init(&rdev->cg_idx_lock);
1424 spin_lock_init(&rdev->uvd_idx_lock);
1425 spin_lock_init(&rdev->rcu_idx_lock);
1426 spin_lock_init(&rdev->didt_idx_lock);
1427 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1428 if (rdev->family >= CHIP_BONAIRE) {
1429 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1430 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1431 } else {
1432 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1433 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1434 }
771fe6b9 1435 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
a33c1a82 1436 if (rdev->rmmio == NULL)
771fe6b9 1437 return -ENOMEM;
771fe6b9 1438
75efdee1
AD
1439 /* doorbell bar mapping */
1440 if (rdev->family >= CHIP_BONAIRE)
1441 radeon_doorbell_init(rdev);
1442
351a52a2
AD
1443 /* io port mapping */
1444 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1445 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1446 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1447 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1448 break;
1449 }
1450 }
1451 if (rdev->rio_mem == NULL)
1452 DRM_ERROR("Unable to find PCI I/O BAR\n");
1453
4807c5a8
AD
1454 if (rdev->flags & RADEON_IS_PX)
1455 radeon_device_handle_px_quirks(rdev);
1456
28d52043 1457 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1458 /* this will fail for cards that aren't VGA class devices, just
1459 * ignore it */
1460 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1461
bfaddd9f 1462 if (rdev->flags & RADEON_IS_PX)
10ebc0bc 1463 runtime = true;
7ffb0ce3
LW
1464 if (!pci_is_thunderbolt_attached(rdev->pdev))
1465 vga_switcheroo_register_client(rdev->pdev,
1466 &radeon_switcheroo_ops, runtime);
10ebc0bc
DA
1467 if (runtime)
1468 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1469
3ce0a23d 1470 r = radeon_init(rdev);
b574f251 1471 if (r)
2e97140d 1472 goto failed;
3ce0a23d 1473
409851f4
JG
1474 r = radeon_gem_debugfs_init(rdev);
1475 if (r) {
1476 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
9843ead0
DA
1477 }
1478
1479 r = radeon_mst_debugfs_init(rdev);
1480 if (r) {
1481 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
409851f4
JG
1482 }
1483
b574f251
JG
1484 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1485 /* Acceleration not working on AGP card try again
1486 * with fallback to PCI or PCIE GART
1487 */
a2d07b74 1488 radeon_asic_reset(rdev);
b574f251
JG
1489 radeon_fini(rdev);
1490 radeon_agp_disable(rdev);
1491 r = radeon_init(rdev);
4aac0473 1492 if (r)
2e97140d 1493 goto failed;
771fe6b9 1494 }
6c7bccea 1495
13a7d299
CK
1496 r = radeon_ib_ring_tests(rdev);
1497 if (r)
1498 DRM_ERROR("ib ring test failed (%d).\n", r);
1499
6dfd1972
JG
1500 /*
1501 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1502 * after the CP ring have chew one packet at least. Hence here we stop
1503 * and restart DPM after the radeon_ib_ring_tests().
1504 */
1505 if (rdev->pm.dpm_enabled &&
1506 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1507 (rdev->family == CHIP_TURKS) &&
1508 (rdev->flags & RADEON_IS_MOBILITY)) {
1509 mutex_lock(&rdev->pm.mutex);
1510 radeon_dpm_disable(rdev);
1511 radeon_dpm_enable(rdev);
1512 mutex_unlock(&rdev->pm.mutex);
1513 }
1514
60a7e396 1515 if ((radeon_testing & 1)) {
4a1132a0
AD
1516 if (rdev->accel_working)
1517 radeon_test_moves(rdev);
1518 else
1519 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1520 }
60a7e396 1521 if ((radeon_testing & 2)) {
4a1132a0
AD
1522 if (rdev->accel_working)
1523 radeon_test_syncing(rdev);
1524 else
1525 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1526 }
771fe6b9 1527 if (radeon_benchmarking) {
4a1132a0
AD
1528 if (rdev->accel_working)
1529 radeon_benchmark(rdev, radeon_benchmarking);
1530 else
1531 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1532 }
6cf8a3f5 1533 return 0;
2e97140d
AD
1534
1535failed:
b8751946
LW
1536 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1537 if (radeon_is_px(ddev))
1538 pm_runtime_put_noidle(ddev->dev);
2e97140d
AD
1539 if (runtime)
1540 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1541 return r;
771fe6b9
JG
1542}
1543
0c195119
AD
1544/**
1545 * radeon_device_fini - tear down the driver
1546 *
1547 * @rdev: radeon_device pointer
1548 *
1549 * Tear down the driver info (all asics).
1550 * Called at driver shutdown.
1551 */
771fe6b9
JG
1552void radeon_device_fini(struct radeon_device *rdev)
1553{
771fe6b9
JG
1554 DRM_INFO("radeon: finishing device.\n");
1555 rdev->shutdown = true;
90aca4d2
JG
1556 /* evict vram memory */
1557 radeon_bo_evict_vram(rdev);
62a8ea3f 1558 radeon_fini(rdev);
7ffb0ce3
LW
1559 if (!pci_is_thunderbolt_attached(rdev->pdev))
1560 vga_switcheroo_unregister_client(rdev->pdev);
2e97140d
AD
1561 if (rdev->flags & RADEON_IS_PX)
1562 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
c1176d6f 1563 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1564 if (rdev->rio_mem)
1565 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1566 rdev->rio_mem = NULL;
771fe6b9
JG
1567 iounmap(rdev->rmmio);
1568 rdev->rmmio = NULL;
75efdee1
AD
1569 if (rdev->family >= CHIP_BONAIRE)
1570 radeon_doorbell_fini(rdev);
771fe6b9
JG
1571}
1572
1573
1574/*
1575 * Suspend & resume.
1576 */
0c195119
AD
1577/**
1578 * radeon_suspend_kms - initiate device suspend
1579 *
1580 * @pdev: drm dev pointer
1581 * @state: suspend state
1582 *
1583 * Puts the hw in the suspend state (all asics).
1584 * Returns 0 for success or an error on failure.
1585 * Called at driver suspend.
1586 */
274ad65c
JG
1587int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1588 bool fbcon, bool freeze)
771fe6b9 1589{
875c1866 1590 struct radeon_device *rdev;
771fe6b9 1591 struct drm_crtc *crtc;
d8dcaa1d 1592 struct drm_connector *connector;
7465280c 1593 int i, r;
771fe6b9 1594
875c1866 1595 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1596 return -ENODEV;
1597 }
7473e830 1598
875c1866
DJ
1599 rdev = dev->dev_private;
1600
f2aba352 1601 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1602 return 0;
d8dcaa1d 1603
86698c20
SF
1604 drm_kms_helper_poll_disable(dev);
1605
6adaed5b 1606 drm_modeset_lock_all(dev);
d8dcaa1d
AD
1607 /* turn off display hw */
1608 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1609 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1610 }
6adaed5b 1611 drm_modeset_unlock_all(dev);
d8dcaa1d 1612
f3cbb17b 1613 /* unpin the front buffers and cursors */
771fe6b9 1614 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f3cbb17b 1615 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
f4510a27 1616 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1617 struct radeon_bo *robj;
771fe6b9 1618
f3cbb17b
GG
1619 if (radeon_crtc->cursor_bo) {
1620 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1621 r = radeon_bo_reserve(robj, false);
1622 if (r == 0) {
1623 radeon_bo_unpin(robj);
1624 radeon_bo_unreserve(robj);
1625 }
1626 }
1627
771fe6b9
JG
1628 if (rfb == NULL || rfb->obj == NULL) {
1629 continue;
1630 }
7e4d15d9 1631 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1632 /* don't unpin kernel fb objects */
1633 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1634 r = radeon_bo_reserve(robj, false);
38651674 1635 if (r == 0) {
4c788679
JG
1636 radeon_bo_unpin(robj);
1637 radeon_bo_unreserve(robj);
1638 }
771fe6b9
JG
1639 }
1640 }
1641 /* evict vram memory */
4c788679 1642 radeon_bo_evict_vram(rdev);
8a47cc9e 1643
771fe6b9 1644 /* wait for gpu to finish processing current batch */
5f8f635e 1645 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1646 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1647 if (r) {
1648 /* delay GPU reset to resume */
eb98c709 1649 radeon_fence_driver_force_completion(rdev, i);
5f8f635e
JG
1650 }
1651 }
771fe6b9 1652
f657c2a7
YZ
1653 radeon_save_bios_scratch_regs(rdev);
1654
62a8ea3f 1655 radeon_suspend(rdev);
d4877cf2 1656 radeon_hpd_fini(rdev);
ec9aaaff
AD
1657 /* evict remaining vram memory
1658 * This second call to evict vram is to evict the gart page table
1659 * using the CPU.
1660 */
4c788679 1661 radeon_bo_evict_vram(rdev);
771fe6b9 1662
10b06122
JG
1663 radeon_agp_suspend(rdev);
1664
771fe6b9 1665 pci_save_state(dev->pdev);
82060854 1666 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
274ad65c
JG
1667 rdev->asic->asic_reset(rdev, true);
1668 pci_restore_state(dev->pdev);
1669 } else if (suspend) {
771fe6b9
JG
1670 /* Shut down the device */
1671 pci_disable_device(dev->pdev);
1672 pci_set_power_state(dev->pdev, PCI_D3hot);
1673 }
10ebc0bc
DA
1674
1675 if (fbcon) {
1676 console_lock();
1677 radeon_fbdev_set_suspend(rdev, 1);
1678 console_unlock();
1679 }
771fe6b9
JG
1680 return 0;
1681}
1682
0c195119
AD
1683/**
1684 * radeon_resume_kms - initiate device resume
1685 *
1686 * @pdev: drm dev pointer
1687 *
1688 * Bring the hw back to operating state (all asics).
1689 * Returns 0 for success or an error on failure.
1690 * Called at driver resume.
1691 */
10ebc0bc 1692int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1693{
09bdf591 1694 struct drm_connector *connector;
771fe6b9 1695 struct radeon_device *rdev = dev->dev_private;
f3cbb17b 1696 struct drm_crtc *crtc;
04eb2206 1697 int r;
771fe6b9 1698
f2aba352 1699 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1700 return 0;
1701
10ebc0bc
DA
1702 if (fbcon) {
1703 console_lock();
1704 }
7473e830
DA
1705 if (resume) {
1706 pci_set_power_state(dev->pdev, PCI_D0);
1707 pci_restore_state(dev->pdev);
1708 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1709 if (fbcon)
1710 console_unlock();
7473e830
DA
1711 return -1;
1712 }
771fe6b9 1713 }
0ebf1717
DA
1714 /* resume AGP if in use */
1715 radeon_agp_resume(rdev);
62a8ea3f 1716 radeon_resume(rdev);
04eb2206
CK
1717
1718 r = radeon_ib_ring_tests(rdev);
1719 if (r)
1720 DRM_ERROR("ib ring test failed (%d).\n", r);
1721
bc6a6295 1722 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1723 /* do dpm late init */
1724 r = radeon_pm_late_init(rdev);
1725 if (r) {
1726 rdev->pm.dpm_enabled = false;
1727 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1728 }
bc6a6295
AD
1729 } else {
1730 /* resume old pm late */
1731 radeon_pm_resume(rdev);
6c7bccea
AD
1732 }
1733
f657c2a7 1734 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1735
f3cbb17b
GG
1736 /* pin cursors */
1737 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1738 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1739
1740 if (radeon_crtc->cursor_bo) {
1741 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1742 r = radeon_bo_reserve(robj, false);
1743 if (r == 0) {
1744 /* Only 27 bit offset for legacy cursor */
1745 r = radeon_bo_pin_restricted(robj,
1746 RADEON_GEM_DOMAIN_VRAM,
1747 ASIC_IS_AVIVO(rdev) ?
1748 0 : 1 << 27,
1749 &radeon_crtc->cursor_addr);
1750 if (r != 0)
1751 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1752 radeon_bo_unreserve(robj);
1753 }
1754 }
1755 }
1756
3fa47d9e
AD
1757 /* init dig PHYs, disp eng pll */
1758 if (rdev->is_atom_bios) {
ac89af1e 1759 radeon_atom_encoder_init(rdev);
f3f1f03e 1760 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1761 /* turn on the BL */
1762 if (rdev->mode_info.bl_encoder) {
1763 u8 bl_level = radeon_get_backlight_level(rdev,
1764 rdev->mode_info.bl_encoder);
1765 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1766 bl_level);
1767 }
3fa47d9e 1768 }
d4877cf2
AD
1769 /* reset hpd state */
1770 radeon_hpd_init(rdev);
771fe6b9 1771 /* blat the mode back in */
ec9954fc
DA
1772 if (fbcon) {
1773 drm_helper_resume_force_mode(dev);
1774 /* turn on display hw */
6adaed5b 1775 drm_modeset_lock_all(dev);
ec9954fc
DA
1776 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1777 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1778 }
6adaed5b 1779 drm_modeset_unlock_all(dev);
a93f344d 1780 }
86698c20
SF
1781
1782 drm_kms_helper_poll_enable(dev);
18ee37a4 1783
3640da2f
AD
1784 /* set the power state here in case we are a PX system or headless */
1785 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1786 radeon_pm_compute_clocks(rdev);
1787
18ee37a4
DV
1788 if (fbcon) {
1789 radeon_fbdev_set_suspend(rdev, 0);
1790 console_unlock();
1791 }
1792
771fe6b9
JG
1793 return 0;
1794}
1795
0c195119
AD
1796/**
1797 * radeon_gpu_reset - reset the asic
1798 *
1799 * @rdev: radeon device pointer
1800 *
1801 * Attempt the reset the GPU if it has hung (all asics).
1802 * Returns 0 for success or an error on failure.
1803 */
90aca4d2
JG
1804int radeon_gpu_reset(struct radeon_device *rdev)
1805{
55d7c221
CK
1806 unsigned ring_sizes[RADEON_NUM_RINGS];
1807 uint32_t *ring_data[RADEON_NUM_RINGS];
1808
1809 bool saved = false;
1810
1811 int i, r;
8fd1b84c 1812 int resched;
90aca4d2 1813
dee53e7f 1814 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1815
1816 if (!rdev->needs_reset) {
1817 up_write(&rdev->exclusive_lock);
1818 return 0;
1819 }
1820
72b9076b
MO
1821 atomic_inc(&rdev->gpu_reset_counter);
1822
90aca4d2 1823 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1824 /* block TTM */
1825 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2 1826 radeon_suspend(rdev);
73ef0e0d 1827 radeon_hpd_fini(rdev);
90aca4d2 1828
55d7c221
CK
1829 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1830 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1831 &ring_data[i]);
1832 if (ring_sizes[i]) {
1833 saved = true;
1834 dev_info(rdev->dev, "Saved %d dwords of commands "
1835 "on ring %d.\n", ring_sizes[i], i);
1836 }
1837 }
1838
90aca4d2
JG
1839 r = radeon_asic_reset(rdev);
1840 if (!r) {
55d7c221 1841 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1842 radeon_resume(rdev);
55d7c221 1843 }
04eb2206 1844
55d7c221 1845 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1846
9bb39ff4
ML
1847 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1848 if (!r && ring_data[i]) {
55d7c221
CK
1849 radeon_ring_restore(rdev, &rdev->ring[i],
1850 ring_sizes[i], ring_data[i]);
9bb39ff4 1851 } else {
eb98c709 1852 radeon_fence_driver_force_completion(rdev, i);
55d7c221
CK
1853 kfree(ring_data[i]);
1854 }
90aca4d2 1855 }
7a1619b9 1856
c940b447
AD
1857 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1858 /* do dpm late init */
1859 r = radeon_pm_late_init(rdev);
1860 if (r) {
1861 rdev->pm.dpm_enabled = false;
1862 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1863 }
1864 } else {
1865 /* resume old pm late */
1866 radeon_pm_resume(rdev);
1867 }
1868
73ef0e0d
AD
1869 /* init dig PHYs, disp eng pll */
1870 if (rdev->is_atom_bios) {
1871 radeon_atom_encoder_init(rdev);
1872 radeon_atom_disp_eng_pll_init(rdev);
1873 /* turn on the BL */
1874 if (rdev->mode_info.bl_encoder) {
1875 u8 bl_level = radeon_get_backlight_level(rdev,
1876 rdev->mode_info.bl_encoder);
1877 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1878 bl_level);
1879 }
1880 }
1881 /* reset hpd state */
1882 radeon_hpd_init(rdev);
1883
9bb39ff4 1884 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
3c036389
CK
1885
1886 rdev->in_reset = true;
1887 rdev->needs_reset = false;
1888
9bb39ff4
ML
1889 downgrade_write(&rdev->exclusive_lock);
1890
d3493574
JG
1891 drm_helper_resume_force_mode(rdev->ddev);
1892
c940b447
AD
1893 /* set the power state here in case we are a PX system or headless */
1894 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1895 radeon_pm_compute_clocks(rdev);
1896
9bb39ff4
ML
1897 if (!r) {
1898 r = radeon_ib_ring_tests(rdev);
1899 if (r && saved)
1900 r = -EAGAIN;
1901 } else {
7a1619b9
MD
1902 /* bad news, how to tell it to userspace ? */
1903 dev_info(rdev->dev, "GPU reset failed\n");
1904 }
1905
9bb39ff4
ML
1906 rdev->needs_reset = r == -EAGAIN;
1907 rdev->in_reset = false;
1908
1909 up_read(&rdev->exclusive_lock);
90aca4d2
JG
1910 return r;
1911}
1912
771fe6b9
JG
1913
1914/*
1915 * Debugfs
1916 */
771fe6b9
JG
1917int radeon_debugfs_add_files(struct radeon_device *rdev,
1918 struct drm_info_list *files,
1919 unsigned nfiles)
1920{
1921 unsigned i;
1922
4d8bf9ae
CK
1923 for (i = 0; i < rdev->debugfs_count; i++) {
1924 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1925 /* Already registered */
1926 return 0;
1927 }
1928 }
c245cb9e 1929
4d8bf9ae 1930 i = rdev->debugfs_count + 1;
c245cb9e
MW
1931 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1932 DRM_ERROR("Reached maximum number of debugfs components.\n");
1933 DRM_ERROR("Report so we increase "
3cf8bb1a 1934 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1935 return -EINVAL;
1936 }
4d8bf9ae
CK
1937 rdev->debugfs[rdev->debugfs_count].files = files;
1938 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1939 rdev->debugfs_count = i;
771fe6b9 1940#if defined(CONFIG_DEBUG_FS)
771fe6b9
JG
1941 drm_debugfs_create_files(files, nfiles,
1942 rdev->ddev->primary->debugfs_root,
1943 rdev->ddev->primary);
1944#endif
1945 return 0;
1946}