Merge remote-tracking branch 'airlied/drm-next' into topic/drm-misc
[linux-block.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
b8751946 33#include <linux/pm_runtime.h>
28d52043 34#include <linux/vgaarb.h>
6a9ee8af 35#include <linux/vga_switcheroo.h>
bcc65fd8 36#include <linux/efi.h>
771fe6b9
JG
37#include "radeon_reg.h"
38#include "radeon.h"
771fe6b9
JG
39#include "atom.h"
40
1b5331d9
JG
41static const char radeon_family_name[][16] = {
42 "R100",
43 "RV100",
44 "RS100",
45 "RV200",
46 "RS200",
47 "R200",
48 "RV250",
49 "RS300",
50 "RV280",
51 "R300",
52 "R350",
53 "RV350",
54 "RV380",
55 "R420",
56 "R423",
57 "RV410",
58 "RS400",
59 "RS480",
60 "RS600",
61 "RS690",
62 "RS740",
63 "RV515",
64 "R520",
65 "RV530",
66 "RV560",
67 "RV570",
68 "R580",
69 "R600",
70 "RV610",
71 "RV630",
72 "RV670",
73 "RV620",
74 "RV635",
75 "RS780",
76 "RS880",
77 "RV770",
78 "RV730",
79 "RV710",
80 "RV740",
81 "CEDAR",
82 "REDWOOD",
83 "JUNIPER",
84 "CYPRESS",
85 "HEMLOCK",
b08ebe7e 86 "PALM",
4df64e65
AD
87 "SUMO",
88 "SUMO2",
1fe18305
AD
89 "BARTS",
90 "TURKS",
91 "CAICOS",
b7cfc9fe 92 "CAYMAN",
8848f759 93 "ARUBA",
cb28bb34
AD
94 "TAHITI",
95 "PITCAIRN",
96 "VERDE",
624d3524 97 "OLAND",
b5d9d726 98 "HAINAN",
6eac752e
AD
99 "BONAIRE",
100 "KAVERI",
101 "KABINI",
3bf599e8 102 "HAWAII",
b0a9f22a 103 "MULLINS",
1b5331d9
JG
104 "LAST",
105};
106
4807c5a8
AD
107#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
108#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
109
110struct radeon_px_quirk {
111 u32 chip_vendor;
112 u32 chip_device;
113 u32 subsys_vendor;
114 u32 subsys_device;
115 u32 px_quirk_flags;
116};
117
118static struct radeon_px_quirk radeon_px_quirk_list[] = {
119 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
120 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
121 */
122 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
123 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
124 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
125 */
126 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
ff1b1294
AD
127 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
128 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
129 */
130 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
4807c5a8
AD
131 /* macbook pro 8.2 */
132 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
133 { 0, 0, 0, 0, 0 },
134};
135
90c4cde9
AD
136bool radeon_is_px(struct drm_device *dev)
137{
138 struct radeon_device *rdev = dev->dev_private;
139
140 if (rdev->flags & RADEON_IS_PX)
141 return true;
142 return false;
143}
10ebc0bc 144
4807c5a8
AD
145static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
146{
147 struct radeon_px_quirk *p = radeon_px_quirk_list;
148
149 /* Apply PX quirks */
150 while (p && p->chip_device != 0) {
151 if (rdev->pdev->vendor == p->chip_vendor &&
152 rdev->pdev->device == p->chip_device &&
153 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
154 rdev->pdev->subsystem_device == p->subsys_device) {
155 rdev->px_quirk_flags = p->px_quirk_flags;
156 break;
157 }
158 ++p;
159 }
160
161 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
162 rdev->flags &= ~RADEON_IS_PX;
163}
164
2e1b65f9
AD
165/**
166 * radeon_program_register_sequence - program an array of registers.
167 *
168 * @rdev: radeon_device pointer
169 * @registers: pointer to the register array
170 * @array_size: size of the register array
171 *
172 * Programs an array or registers with and and or masks.
173 * This is a helper for setting golden registers.
174 */
175void radeon_program_register_sequence(struct radeon_device *rdev,
176 const u32 *registers,
177 const u32 array_size)
178{
179 u32 tmp, reg, and_mask, or_mask;
180 int i;
181
182 if (array_size % 3)
183 return;
184
185 for (i = 0; i < array_size; i +=3) {
186 reg = registers[i + 0];
187 and_mask = registers[i + 1];
188 or_mask = registers[i + 2];
189
190 if (and_mask == 0xffffffff) {
191 tmp = or_mask;
192 } else {
193 tmp = RREG32(reg);
194 tmp &= ~and_mask;
195 tmp |= or_mask;
196 }
197 WREG32(reg, tmp);
198 }
199}
200
1a0041b8
AD
201void radeon_pci_config_reset(struct radeon_device *rdev)
202{
203 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
204}
205
0c195119
AD
206/**
207 * radeon_surface_init - Clear GPU surface registers.
208 *
209 * @rdev: radeon_device pointer
210 *
211 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 212 */
3ce0a23d 213void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
214{
215 /* FIXME: check this out */
216 if (rdev->family < CHIP_R600) {
217 int i;
218
550e2d92
DA
219 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
220 if (rdev->surface_regs[i].bo)
221 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
222 else
223 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 224 }
e024e110
DA
225 /* enable surfaces */
226 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
227 }
228}
229
771fe6b9
JG
230/*
231 * GPU scratch registers helpers function.
232 */
0c195119
AD
233/**
234 * radeon_scratch_init - Init scratch register driver information.
235 *
236 * @rdev: radeon_device pointer
237 *
238 * Init CP scratch register driver information (r1xx-r5xx)
239 */
3ce0a23d 240void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
241{
242 int i;
243
244 /* FIXME: check this out */
245 if (rdev->family < CHIP_R300) {
246 rdev->scratch.num_reg = 5;
247 } else {
248 rdev->scratch.num_reg = 7;
249 }
724c80e1 250 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
251 for (i = 0; i < rdev->scratch.num_reg; i++) {
252 rdev->scratch.free[i] = true;
724c80e1 253 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
254 }
255}
256
0c195119
AD
257/**
258 * radeon_scratch_get - Allocate a scratch register
259 *
260 * @rdev: radeon_device pointer
261 * @reg: scratch register mmio offset
262 *
263 * Allocate a CP scratch register for use by the driver (all asics).
264 * Returns 0 on success or -EINVAL on failure.
265 */
771fe6b9
JG
266int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
267{
268 int i;
269
270 for (i = 0; i < rdev->scratch.num_reg; i++) {
271 if (rdev->scratch.free[i]) {
272 rdev->scratch.free[i] = false;
273 *reg = rdev->scratch.reg[i];
274 return 0;
275 }
276 }
277 return -EINVAL;
278}
279
0c195119
AD
280/**
281 * radeon_scratch_free - Free a scratch register
282 *
283 * @rdev: radeon_device pointer
284 * @reg: scratch register mmio offset
285 *
286 * Free a CP scratch register allocated for use by the driver (all asics)
287 */
771fe6b9
JG
288void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
289{
290 int i;
291
292 for (i = 0; i < rdev->scratch.num_reg; i++) {
293 if (rdev->scratch.reg[i] == reg) {
294 rdev->scratch.free[i] = true;
295 return;
296 }
297 }
298}
299
75efdee1
AD
300/*
301 * GPU doorbell aperture helpers function.
302 */
303/**
304 * radeon_doorbell_init - Init doorbell driver information.
305 *
306 * @rdev: radeon_device pointer
307 *
308 * Init doorbell driver information (CIK)
309 * Returns 0 on success, error on failure.
310 */
28f5a6cd 311static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 312{
75efdee1
AD
313 /* doorbell bar mapping */
314 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
315 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
316
d5754ab8
AL
317 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
318 if (rdev->doorbell.num_doorbells == 0)
319 return -EINVAL;
75efdee1 320
d5754ab8 321 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
322 if (rdev->doorbell.ptr == NULL) {
323 return -ENOMEM;
324 }
325 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
326 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
327
d5754ab8 328 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 329
75efdee1
AD
330 return 0;
331}
332
333/**
334 * radeon_doorbell_fini - Tear down doorbell driver information.
335 *
336 * @rdev: radeon_device pointer
337 *
338 * Tear down doorbell driver information (CIK)
339 */
28f5a6cd 340static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
341{
342 iounmap(rdev->doorbell.ptr);
343 rdev->doorbell.ptr = NULL;
344}
345
346/**
d5754ab8 347 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
348 *
349 * @rdev: radeon_device pointer
d5754ab8 350 * @doorbell: doorbell index
75efdee1 351 *
d5754ab8 352 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
353 * Returns 0 on success or -EINVAL on failure.
354 */
355int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
356{
d5754ab8
AL
357 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
358 if (offset < rdev->doorbell.num_doorbells) {
359 __set_bit(offset, rdev->doorbell.used);
360 *doorbell = offset;
361 return 0;
362 } else {
363 return -EINVAL;
75efdee1 364 }
75efdee1
AD
365}
366
367/**
d5754ab8 368 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
369 *
370 * @rdev: radeon_device pointer
d5754ab8 371 * @doorbell: doorbell index
75efdee1 372 *
d5754ab8 373 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
374 */
375void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
376{
d5754ab8
AL
377 if (doorbell < rdev->doorbell.num_doorbells)
378 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
379}
380
ebff8453
OG
381/**
382 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
383 * setup KFD
384 *
385 * @rdev: radeon_device pointer
386 * @aperture_base: output returning doorbell aperture base physical address
387 * @aperture_size: output returning doorbell aperture size in bytes
388 * @start_offset: output returning # of doorbell bytes reserved for radeon.
389 *
390 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
391 * takes doorbells required for its own rings and reports the setup to KFD.
392 * Radeon reserved doorbells are at the start of the doorbell aperture.
393 */
394void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
395 phys_addr_t *aperture_base,
396 size_t *aperture_size,
397 size_t *start_offset)
398{
399 /* The first num_doorbells are used by radeon.
400 * KFD takes whatever's left in the aperture. */
401 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
402 *aperture_base = rdev->doorbell.base;
403 *aperture_size = rdev->doorbell.size;
404 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
405 } else {
406 *aperture_base = 0;
407 *aperture_size = 0;
408 *start_offset = 0;
409 }
410}
411
0c195119
AD
412/*
413 * radeon_wb_*()
414 * Writeback is the the method by which the the GPU updates special pages
415 * in memory with the status of certain GPU events (fences, ring pointers,
416 * etc.).
417 */
418
419/**
420 * radeon_wb_disable - Disable Writeback
421 *
422 * @rdev: radeon_device pointer
423 *
424 * Disables Writeback (all asics). Used for suspend.
425 */
724c80e1
AD
426void radeon_wb_disable(struct radeon_device *rdev)
427{
724c80e1
AD
428 rdev->wb.enabled = false;
429}
430
0c195119
AD
431/**
432 * radeon_wb_fini - Disable Writeback and free memory
433 *
434 * @rdev: radeon_device pointer
435 *
436 * Disables Writeback and frees the Writeback memory (all asics).
437 * Used at driver shutdown.
438 */
724c80e1
AD
439void radeon_wb_fini(struct radeon_device *rdev)
440{
441 radeon_wb_disable(rdev);
442 if (rdev->wb.wb_obj) {
089920f2
JG
443 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
444 radeon_bo_kunmap(rdev->wb.wb_obj);
445 radeon_bo_unpin(rdev->wb.wb_obj);
446 radeon_bo_unreserve(rdev->wb.wb_obj);
447 }
724c80e1
AD
448 radeon_bo_unref(&rdev->wb.wb_obj);
449 rdev->wb.wb = NULL;
450 rdev->wb.wb_obj = NULL;
451 }
452}
453
0c195119
AD
454/**
455 * radeon_wb_init- Init Writeback driver info and allocate memory
456 *
457 * @rdev: radeon_device pointer
458 *
459 * Disables Writeback and frees the Writeback memory (all asics).
460 * Used at driver startup.
461 * Returns 0 on success or an -error on failure.
462 */
724c80e1
AD
463int radeon_wb_init(struct radeon_device *rdev)
464{
465 int r;
466
467 if (rdev->wb.wb_obj == NULL) {
441921d5 468 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
831b6966 469 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
02376d82 470 &rdev->wb.wb_obj);
724c80e1
AD
471 if (r) {
472 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
473 return r;
474 }
089920f2
JG
475 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
476 if (unlikely(r != 0)) {
477 radeon_wb_fini(rdev);
478 return r;
479 }
480 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
481 &rdev->wb.gpu_addr);
482 if (r) {
483 radeon_bo_unreserve(rdev->wb.wb_obj);
484 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
485 radeon_wb_fini(rdev);
486 return r;
487 }
488 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 489 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
490 if (r) {
491 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
492 radeon_wb_fini(rdev);
493 return r;
494 }
724c80e1
AD
495 }
496
e6ba7599
AD
497 /* clear wb memory */
498 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
499 /* disable event_write fences */
500 rdev->wb.use_event = false;
724c80e1 501 /* disabled via module param */
3b7a2b24 502 if (radeon_no_wb == 1) {
724c80e1 503 rdev->wb.enabled = false;
3b7a2b24 504 } else {
724c80e1 505 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
506 /* often unreliable on AGP */
507 rdev->wb.enabled = false;
508 } else if (rdev->family < CHIP_R300) {
509 /* often unreliable on pre-r300 */
724c80e1 510 rdev->wb.enabled = false;
d0f8a854 511 } else {
724c80e1 512 rdev->wb.enabled = true;
d0f8a854 513 /* event_write fences are only available on r600+ */
3b7a2b24 514 if (rdev->family >= CHIP_R600) {
d0f8a854 515 rdev->wb.use_event = true;
3b7a2b24 516 }
d0f8a854 517 }
724c80e1 518 }
c994ead6
AD
519 /* always use writeback/events on NI, APUs */
520 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
521 rdev->wb.enabled = true;
522 rdev->wb.use_event = true;
523 }
724c80e1
AD
524
525 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
526
527 return 0;
528}
529
d594e46a
JG
530/**
531 * radeon_vram_location - try to find VRAM location
532 * @rdev: radeon device structure holding all necessary informations
533 * @mc: memory controller structure holding memory informations
534 * @base: base address at which to put VRAM
535 *
536 * Function will place try to place VRAM at base address provided
537 * as parameter (which is so far either PCI aperture address or
538 * for IGP TOM base address).
539 *
540 * If there is not enough space to fit the unvisible VRAM in the 32bits
541 * address space then we limit the VRAM size to the aperture.
542 *
543 * If we are using AGP and if the AGP aperture doesn't allow us to have
544 * room for all the VRAM than we restrict the VRAM to the PCI aperture
545 * size and print a warning.
546 *
547 * This function will never fails, worst case are limiting VRAM.
548 *
549 * Note: GTT start, end, size should be initialized before calling this
550 * function on AGP platform.
551 *
25985edc 552 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
553 * this shouldn't be a problem as we are using the PCI aperture as a reference.
554 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
555 * not IGP.
556 *
557 * Note: we use mc_vram_size as on some board we need to program the mc to
558 * cover the whole aperture even if VRAM size is inferior to aperture size
559 * Novell bug 204882 + along with lots of ubuntu ones
560 *
561 * Note: when limiting vram it's safe to overwritte real_vram_size because
562 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
563 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
564 * ones)
565 *
566 * Note: IGP TOM addr should be the same as the aperture addr, we don't
567 * explicitly check for that thought.
568 *
569 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 570 */
d594e46a 571void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 572{
1bcb04f7
CK
573 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
574
d594e46a 575 mc->vram_start = base;
9ed8b1f9 576 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
577 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
578 mc->real_vram_size = mc->aper_size;
579 mc->mc_vram_size = mc->aper_size;
580 }
581 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 582 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
583 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
584 mc->real_vram_size = mc->aper_size;
585 mc->mc_vram_size = mc->aper_size;
586 }
587 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
588 if (limit && limit < mc->real_vram_size)
589 mc->real_vram_size = limit;
dd7cc55a 590 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
591 mc->mc_vram_size >> 20, mc->vram_start,
592 mc->vram_end, mc->real_vram_size >> 20);
593}
771fe6b9 594
d594e46a
JG
595/**
596 * radeon_gtt_location - try to find GTT location
597 * @rdev: radeon device structure holding all necessary informations
598 * @mc: memory controller structure holding memory informations
599 *
600 * Function will place try to place GTT before or after VRAM.
601 *
602 * If GTT size is bigger than space left then we ajust GTT size.
603 * Thus function will never fails.
604 *
605 * FIXME: when reducing GTT size align new size on power of 2.
606 */
607void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
608{
609 u64 size_af, size_bf;
610
9ed8b1f9 611 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 612 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
613 if (size_bf > size_af) {
614 if (mc->gtt_size > size_bf) {
615 dev_warn(rdev->dev, "limiting GTT\n");
616 mc->gtt_size = size_bf;
771fe6b9 617 }
8d369bb1 618 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 619 } else {
d594e46a
JG
620 if (mc->gtt_size > size_af) {
621 dev_warn(rdev->dev, "limiting GTT\n");
622 mc->gtt_size = size_af;
623 }
8d369bb1 624 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 625 }
d594e46a 626 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 627 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 628 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
629}
630
771fe6b9
JG
631/*
632 * GPU helpers function.
633 */
05082b8b
AD
634
635/**
636 * radeon_device_is_virtual - check if we are running is a virtual environment
637 *
638 * Check if the asic has been passed through to a VM (all asics).
639 * Used at driver startup.
640 * Returns true if virtual or false if not.
641 */
a801abe4 642bool radeon_device_is_virtual(void)
05082b8b
AD
643{
644#ifdef CONFIG_X86
645 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
646#else
647 return false;
648#endif
649}
650
0c195119
AD
651/**
652 * radeon_card_posted - check if the hw has already been initialized
653 *
654 * @rdev: radeon_device pointer
655 *
656 * Check if the asic has been initialized (all asics).
657 * Used at driver startup.
658 * Returns true if initialized or false if not.
659 */
9f022ddf 660bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
661{
662 uint32_t reg;
663
884031f0
AD
664 /* for pass through, always force asic_init for CI */
665 if (rdev->family >= CHIP_BONAIRE &&
666 radeon_device_is_virtual())
05082b8b
AD
667 return false;
668
50a583f6 669 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 670 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
671 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
672 (rdev->family < CHIP_R600))
bcc65fd8
MG
673 return false;
674
2cf3a4fc
AD
675 if (ASIC_IS_NODCE(rdev))
676 goto check_memsize;
677
771fe6b9 678 /* first check CRTCs */
09fb8bd1 679 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
680 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
681 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
682 if (rdev->num_crtc >= 4) {
683 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
684 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
685 }
686 if (rdev->num_crtc >= 6) {
687 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
688 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
689 }
bcc1c2a1
AD
690 if (reg & EVERGREEN_CRTC_MASTER_EN)
691 return true;
692 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
693 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
694 RREG32(AVIVO_D2CRTC_CONTROL);
695 if (reg & AVIVO_CRTC_EN) {
696 return true;
697 }
698 } else {
699 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
700 RREG32(RADEON_CRTC2_GEN_CNTL);
701 if (reg & RADEON_CRTC_EN) {
702 return true;
703 }
704 }
705
2cf3a4fc 706check_memsize:
771fe6b9
JG
707 /* then check MEM_SIZE, in case the crtcs are off */
708 if (rdev->family >= CHIP_R600)
709 reg = RREG32(R600_CONFIG_MEMSIZE);
710 else
711 reg = RREG32(RADEON_CONFIG_MEMSIZE);
712
713 if (reg)
714 return true;
715
716 return false;
717
718}
719
0c195119
AD
720/**
721 * radeon_update_bandwidth_info - update display bandwidth params
722 *
723 * @rdev: radeon_device pointer
724 *
725 * Used when sclk/mclk are switched or display modes are set.
726 * params are used to calculate display watermarks (all asics)
727 */
f47299c5
AD
728void radeon_update_bandwidth_info(struct radeon_device *rdev)
729{
730 fixed20_12 a;
8807286e
AD
731 u32 sclk = rdev->pm.current_sclk;
732 u32 mclk = rdev->pm.current_mclk;
f47299c5 733
8807286e
AD
734 /* sclk/mclk in Mhz */
735 a.full = dfixed_const(100);
736 rdev->pm.sclk.full = dfixed_const(sclk);
737 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
738 rdev->pm.mclk.full = dfixed_const(mclk);
739 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 740
8807286e 741 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 742 a.full = dfixed_const(16);
f47299c5 743 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 744 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
745 }
746}
747
0c195119
AD
748/**
749 * radeon_boot_test_post_card - check and possibly initialize the hw
750 *
751 * @rdev: radeon_device pointer
752 *
753 * Check if the asic is initialized and if not, attempt to initialize
754 * it (all asics).
755 * Returns true if initialized or false if not.
756 */
72542d77
DA
757bool radeon_boot_test_post_card(struct radeon_device *rdev)
758{
759 if (radeon_card_posted(rdev))
760 return true;
761
762 if (rdev->bios) {
763 DRM_INFO("GPU not posted. posting now...\n");
764 if (rdev->is_atom_bios)
765 atom_asic_init(rdev->mode_info.atom_context);
766 else
767 radeon_combios_asic_init(rdev->ddev);
768 return true;
769 } else {
770 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
771 return false;
772 }
773}
774
0c195119
AD
775/**
776 * radeon_dummy_page_init - init dummy page used by the driver
777 *
778 * @rdev: radeon_device pointer
779 *
780 * Allocate the dummy page used by the driver (all asics).
781 * This dummy page is used by the driver as a filler for gart entries
782 * when pages are taken out of the GART
783 * Returns 0 on sucess, -ENOMEM on failure.
784 */
3ce0a23d
JG
785int radeon_dummy_page_init(struct radeon_device *rdev)
786{
82568565
DA
787 if (rdev->dummy_page.page)
788 return 0;
3ce0a23d
JG
789 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
790 if (rdev->dummy_page.page == NULL)
791 return -ENOMEM;
792 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
793 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
794 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
795 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
796 __free_page(rdev->dummy_page.page);
797 rdev->dummy_page.page = NULL;
798 return -ENOMEM;
799 }
cb658906
MD
800 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
801 RADEON_GART_PAGE_DUMMY);
3ce0a23d
JG
802 return 0;
803}
804
0c195119
AD
805/**
806 * radeon_dummy_page_fini - free dummy page used by the driver
807 *
808 * @rdev: radeon_device pointer
809 *
810 * Frees the dummy page used by the driver (all asics).
811 */
3ce0a23d
JG
812void radeon_dummy_page_fini(struct radeon_device *rdev)
813{
814 if (rdev->dummy_page.page == NULL)
815 return;
816 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
817 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
818 __free_page(rdev->dummy_page.page);
819 rdev->dummy_page.page = NULL;
820}
821
771fe6b9 822
771fe6b9 823/* ATOM accessor methods */
0c195119
AD
824/*
825 * ATOM is an interpreted byte code stored in tables in the vbios. The
826 * driver registers callbacks to access registers and the interpreter
827 * in the driver parses the tables and executes then to program specific
828 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
829 * atombios.h, and atom.c
830 */
831
832/**
833 * cail_pll_read - read PLL register
834 *
835 * @info: atom card_info pointer
836 * @reg: PLL register offset
837 *
838 * Provides a PLL register accessor for the atom interpreter (r4xx+).
839 * Returns the value of the PLL register.
840 */
771fe6b9
JG
841static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
842{
843 struct radeon_device *rdev = info->dev->dev_private;
844 uint32_t r;
845
846 r = rdev->pll_rreg(rdev, reg);
847 return r;
848}
849
0c195119
AD
850/**
851 * cail_pll_write - write PLL register
852 *
853 * @info: atom card_info pointer
854 * @reg: PLL register offset
855 * @val: value to write to the pll register
856 *
857 * Provides a PLL register accessor for the atom interpreter (r4xx+).
858 */
771fe6b9
JG
859static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
860{
861 struct radeon_device *rdev = info->dev->dev_private;
862
863 rdev->pll_wreg(rdev, reg, val);
864}
865
0c195119
AD
866/**
867 * cail_mc_read - read MC (Memory Controller) register
868 *
869 * @info: atom card_info pointer
870 * @reg: MC register offset
871 *
872 * Provides an MC register accessor for the atom interpreter (r4xx+).
873 * Returns the value of the MC register.
874 */
771fe6b9
JG
875static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
876{
877 struct radeon_device *rdev = info->dev->dev_private;
878 uint32_t r;
879
880 r = rdev->mc_rreg(rdev, reg);
881 return r;
882}
883
0c195119
AD
884/**
885 * cail_mc_write - write MC (Memory Controller) register
886 *
887 * @info: atom card_info pointer
888 * @reg: MC register offset
889 * @val: value to write to the pll register
890 *
891 * Provides a MC register accessor for the atom interpreter (r4xx+).
892 */
771fe6b9
JG
893static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
894{
895 struct radeon_device *rdev = info->dev->dev_private;
896
897 rdev->mc_wreg(rdev, reg, val);
898}
899
0c195119
AD
900/**
901 * cail_reg_write - write MMIO register
902 *
903 * @info: atom card_info pointer
904 * @reg: MMIO register offset
905 * @val: value to write to the pll register
906 *
907 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
908 */
771fe6b9
JG
909static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
910{
911 struct radeon_device *rdev = info->dev->dev_private;
912
913 WREG32(reg*4, val);
914}
915
0c195119
AD
916/**
917 * cail_reg_read - read MMIO register
918 *
919 * @info: atom card_info pointer
920 * @reg: MMIO register offset
921 *
922 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
923 * Returns the value of the MMIO register.
924 */
771fe6b9
JG
925static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
926{
927 struct radeon_device *rdev = info->dev->dev_private;
928 uint32_t r;
929
930 r = RREG32(reg*4);
931 return r;
932}
933
0c195119
AD
934/**
935 * cail_ioreg_write - write IO register
936 *
937 * @info: atom card_info pointer
938 * @reg: IO register offset
939 * @val: value to write to the pll register
940 *
941 * Provides a IO register accessor for the atom interpreter (r4xx+).
942 */
351a52a2
AD
943static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
944{
945 struct radeon_device *rdev = info->dev->dev_private;
946
947 WREG32_IO(reg*4, val);
948}
949
0c195119
AD
950/**
951 * cail_ioreg_read - read IO register
952 *
953 * @info: atom card_info pointer
954 * @reg: IO register offset
955 *
956 * Provides an IO register accessor for the atom interpreter (r4xx+).
957 * Returns the value of the IO register.
958 */
351a52a2
AD
959static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
960{
961 struct radeon_device *rdev = info->dev->dev_private;
962 uint32_t r;
963
964 r = RREG32_IO(reg*4);
965 return r;
966}
967
0c195119
AD
968/**
969 * radeon_atombios_init - init the driver info and callbacks for atombios
970 *
971 * @rdev: radeon_device pointer
972 *
973 * Initializes the driver info and register access callbacks for the
974 * ATOM interpreter (r4xx+).
975 * Returns 0 on sucess, -ENOMEM on failure.
976 * Called at driver startup.
977 */
771fe6b9
JG
978int radeon_atombios_init(struct radeon_device *rdev)
979{
61c4b24b
MF
980 struct card_info *atom_card_info =
981 kzalloc(sizeof(struct card_info), GFP_KERNEL);
982
983 if (!atom_card_info)
984 return -ENOMEM;
985
986 rdev->mode_info.atom_card_info = atom_card_info;
987 atom_card_info->dev = rdev->ddev;
988 atom_card_info->reg_read = cail_reg_read;
989 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
990 /* needed for iio ops */
991 if (rdev->rio_mem) {
992 atom_card_info->ioreg_read = cail_ioreg_read;
993 atom_card_info->ioreg_write = cail_ioreg_write;
994 } else {
995 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
996 atom_card_info->ioreg_read = cail_reg_read;
997 atom_card_info->ioreg_write = cail_reg_write;
998 }
61c4b24b
MF
999 atom_card_info->mc_read = cail_mc_read;
1000 atom_card_info->mc_write = cail_mc_write;
1001 atom_card_info->pll_read = cail_pll_read;
1002 atom_card_info->pll_write = cail_pll_write;
1003
1004 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
1005 if (!rdev->mode_info.atom_context) {
1006 radeon_atombios_fini(rdev);
1007 return -ENOMEM;
1008 }
1009
c31ad97f 1010 mutex_init(&rdev->mode_info.atom_context->mutex);
1c949842 1011 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
771fe6b9 1012 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 1013 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
1014 return 0;
1015}
1016
0c195119
AD
1017/**
1018 * radeon_atombios_fini - free the driver info and callbacks for atombios
1019 *
1020 * @rdev: radeon_device pointer
1021 *
1022 * Frees the driver info and register access callbacks for the ATOM
1023 * interpreter (r4xx+).
1024 * Called at driver shutdown.
1025 */
771fe6b9
JG
1026void radeon_atombios_fini(struct radeon_device *rdev)
1027{
4a04a844
JG
1028 if (rdev->mode_info.atom_context) {
1029 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 1030 }
0e34d094
TG
1031 kfree(rdev->mode_info.atom_context);
1032 rdev->mode_info.atom_context = NULL;
61c4b24b 1033 kfree(rdev->mode_info.atom_card_info);
0e34d094 1034 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
1035}
1036
0c195119
AD
1037/* COMBIOS */
1038/*
1039 * COMBIOS is the bios format prior to ATOM. It provides
1040 * command tables similar to ATOM, but doesn't have a unified
1041 * parser. See radeon_combios.c
1042 */
1043
1044/**
1045 * radeon_combios_init - init the driver info for combios
1046 *
1047 * @rdev: radeon_device pointer
1048 *
1049 * Initializes the driver info for combios (r1xx-r3xx).
1050 * Returns 0 on sucess.
1051 * Called at driver startup.
1052 */
771fe6b9
JG
1053int radeon_combios_init(struct radeon_device *rdev)
1054{
1055 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1056 return 0;
1057}
1058
0c195119
AD
1059/**
1060 * radeon_combios_fini - free the driver info for combios
1061 *
1062 * @rdev: radeon_device pointer
1063 *
1064 * Frees the driver info for combios (r1xx-r3xx).
1065 * Called at driver shutdown.
1066 */
771fe6b9
JG
1067void radeon_combios_fini(struct radeon_device *rdev)
1068{
1069}
1070
0c195119
AD
1071/* if we get transitioned to only one device, take VGA back */
1072/**
1073 * radeon_vga_set_decode - enable/disable vga decode
1074 *
1075 * @cookie: radeon_device pointer
1076 * @state: enable/disable vga decode
1077 *
1078 * Enable/disable vga decode (all asics).
1079 * Returns VGA resource flags.
1080 */
28d52043
DA
1081static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1082{
1083 struct radeon_device *rdev = cookie;
28d52043
DA
1084 radeon_vga_set_state(rdev, state);
1085 if (state)
1086 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1087 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1088 else
1089 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1090}
c1176d6f 1091
1bcb04f7
CK
1092/**
1093 * radeon_check_pot_argument - check that argument is a power of two
1094 *
1095 * @arg: value to check
1096 *
1097 * Validates that a certain argument is a power of two (all asics).
1098 * Returns true if argument is valid.
1099 */
1100static bool radeon_check_pot_argument(int arg)
1101{
1102 return (arg & (arg - 1)) == 0;
1103}
1104
5e3c4f90
GG
1105/**
1106 * Determine a sensible default GART size according to ASIC family.
1107 *
1108 * @family ASIC family name
1109 */
1110static int radeon_gart_size_auto(enum radeon_family family)
1111{
1112 /* default to a larger gart size on newer asics */
1113 if (family >= CHIP_TAHITI)
1114 return 2048;
1115 else if (family >= CHIP_RV770)
1116 return 1024;
1117 else
1118 return 512;
1119}
1120
0c195119
AD
1121/**
1122 * radeon_check_arguments - validate module params
1123 *
1124 * @rdev: radeon_device pointer
1125 *
1126 * Validates certain module parameters and updates
1127 * the associated values used by the driver (all asics).
1128 */
1109ca09 1129static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1130{
1131 /* vramlimit must be a power of two */
1bcb04f7 1132 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1133 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1134 radeon_vram_limit);
1135 radeon_vram_limit = 0;
36421338 1136 }
1bcb04f7 1137
edcd26e8 1138 if (radeon_gart_size == -1) {
5e3c4f90 1139 radeon_gart_size = radeon_gart_size_auto(rdev->family);
edcd26e8 1140 }
36421338 1141 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1142 if (radeon_gart_size < 32) {
edcd26e8 1143 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1144 radeon_gart_size);
5e3c4f90 1145 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1bcb04f7 1146 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1147 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1148 radeon_gart_size);
5e3c4f90 1149 radeon_gart_size = radeon_gart_size_auto(rdev->family);
36421338 1150 }
1bcb04f7
CK
1151 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1152
36421338
JG
1153 /* AGP mode can only be -1, 1, 2, 4, 8 */
1154 switch (radeon_agpmode) {
1155 case -1:
1156 case 0:
1157 case 1:
1158 case 2:
1159 case 4:
1160 case 8:
1161 break;
1162 default:
1163 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1164 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1165 radeon_agpmode = 0;
1166 break;
1167 }
c1c44132
CK
1168
1169 if (!radeon_check_pot_argument(radeon_vm_size)) {
1170 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1171 radeon_vm_size);
20b2656d 1172 radeon_vm_size = 4;
c1c44132
CK
1173 }
1174
20b2656d 1175 if (radeon_vm_size < 1) {
13c240ef 1176 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
c1c44132 1177 radeon_vm_size);
20b2656d 1178 radeon_vm_size = 4;
c1c44132
CK
1179 }
1180
3cf8bb1a
JG
1181 /*
1182 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1183 */
20b2656d
CK
1184 if (radeon_vm_size > 1024) {
1185 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
c1c44132 1186 radeon_vm_size);
20b2656d 1187 radeon_vm_size = 4;
c1c44132 1188 }
4510fb98
CK
1189
1190 /* defines number of bits in page table versus page directory,
1191 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1192 * page table and the remaining bits are in the page directory */
dfc230f9
CK
1193 if (radeon_vm_block_size == -1) {
1194
1195 /* Total bits covered by PD + PTs */
8e66e134 1196 unsigned bits = ilog2(radeon_vm_size) + 18;
dfc230f9
CK
1197
1198 /* Make sure the PD is 4K in size up to 8GB address space.
1199 Above that split equal between PD and PTs */
1200 if (radeon_vm_size <= 8)
1201 radeon_vm_block_size = bits - 9;
1202 else
1203 radeon_vm_block_size = (bits + 3) / 2;
1204
1205 } else if (radeon_vm_block_size < 9) {
20b2656d 1206 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
4510fb98
CK
1207 radeon_vm_block_size);
1208 radeon_vm_block_size = 9;
1209 }
1210
1211 if (radeon_vm_block_size > 24 ||
20b2656d
CK
1212 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1213 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
4510fb98
CK
1214 radeon_vm_block_size);
1215 radeon_vm_block_size = 9;
1216 }
36421338
JG
1217}
1218
0c195119
AD
1219/**
1220 * radeon_switcheroo_set_state - set switcheroo state
1221 *
1222 * @pdev: pci dev pointer
8e5de1d8 1223 * @state: vga_switcheroo state
0c195119
AD
1224 *
1225 * Callback for the switcheroo driver. Suspends or resumes the
1226 * the asics before or after it is powered up using ACPI methods.
1227 */
6a9ee8af
DA
1228static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1229{
1230 struct drm_device *dev = pci_get_drvdata(pdev);
4807c5a8 1231 struct radeon_device *rdev = dev->dev_private;
10ebc0bc 1232
90c4cde9 1233 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1234 return;
1235
6a9ee8af 1236 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1237 unsigned d3_delay = dev->pdev->d3_delay;
1238
6a9ee8af
DA
1239 printk(KERN_INFO "radeon: switched on\n");
1240 /* don't suspend or resume card normally */
5bcf719b 1241 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e 1242
4807c5a8 1243 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
d1f9809e
ML
1244 dev->pdev->d3_delay = 20;
1245
10ebc0bc 1246 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1247
1248 dev->pdev->d3_delay = d3_delay;
1249
5bcf719b 1250 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1251 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1252 } else {
1253 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1254 drm_kms_helper_poll_disable(dev);
5bcf719b 1255 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
274ad65c 1256 radeon_suspend_kms(dev, true, true, false);
5bcf719b 1257 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1258 }
1259}
1260
0c195119
AD
1261/**
1262 * radeon_switcheroo_can_switch - see if switcheroo state can change
1263 *
1264 * @pdev: pci dev pointer
1265 *
1266 * Callback for the switcheroo driver. Check of the switcheroo
1267 * state can be changed.
1268 * Returns true if the state can be changed, false if not.
1269 */
6a9ee8af
DA
1270static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1271{
1272 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1273
fc8fd40e
DV
1274 /*
1275 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1276 * locking inversion with the driver load path. And the access here is
1277 * completely racy anyway. So don't bother with locking for now.
1278 */
1279 return dev->open_count == 0;
6a9ee8af
DA
1280}
1281
26ec685f
TI
1282static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1283 .set_gpu_state = radeon_switcheroo_set_state,
1284 .reprobe = NULL,
1285 .can_switch = radeon_switcheroo_can_switch,
1286};
6a9ee8af 1287
0c195119
AD
1288/**
1289 * radeon_device_init - initialize the driver
1290 *
1291 * @rdev: radeon_device pointer
1292 * @pdev: drm dev pointer
1293 * @pdev: pci dev pointer
1294 * @flags: driver flags
1295 *
1296 * Initializes the driver info and hw (all asics).
1297 * Returns 0 for success or an error on failure.
1298 * Called at driver startup.
1299 */
771fe6b9
JG
1300int radeon_device_init(struct radeon_device *rdev,
1301 struct drm_device *ddev,
1302 struct pci_dev *pdev,
1303 uint32_t flags)
1304{
351a52a2 1305 int r, i;
ad49f501 1306 int dma_bits;
10ebc0bc 1307 bool runtime = false;
771fe6b9 1308
771fe6b9 1309 rdev->shutdown = false;
9f022ddf 1310 rdev->dev = &pdev->dev;
771fe6b9
JG
1311 rdev->ddev = ddev;
1312 rdev->pdev = pdev;
1313 rdev->flags = flags;
1314 rdev->family = flags & RADEON_FAMILY_MASK;
1315 rdev->is_atom_bios = false;
1316 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1317 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1318 rdev->accel_working = false;
8b25ed34
AD
1319 /* set up ring ids */
1320 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1321 rdev->ring[i].idx = i;
1322 }
954605ca 1323 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1b5331d9 1324
fe0d36e0
AD
1325 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1326 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1327 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1b5331d9 1328
771fe6b9
JG
1329 /* mutex initialization are all done here so we
1330 * can recall function without having locking issues */
d6999bc7 1331 mutex_init(&rdev->ring_lock);
40bacf16 1332 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1333 atomic_set(&rdev->ih.lock, 0);
4c788679 1334 mutex_init(&rdev->gem.mutex);
c913e23a 1335 mutex_init(&rdev->pm.mutex);
6759a0a7 1336 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1337 mutex_init(&rdev->srbm_mutex);
1c0a4625 1338 mutex_init(&rdev->grbm_idx_mutex);
db7fce39 1339 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1340 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1341 init_waitqueue_head(&rdev->irq.vblank_queue);
341cb9e4
CK
1342 mutex_init(&rdev->mn_lock);
1343 hash_init(rdev->mn_hash);
1b9c3dd0
AD
1344 r = radeon_gem_init(rdev);
1345 if (r)
1346 return r;
529364e0 1347
c1c44132 1348 radeon_check_arguments(rdev);
23d4f1f2 1349 /* Adjust VM size here.
c1c44132 1350 * Max GPUVM size for cayman+ is 40 bits.
23d4f1f2 1351 */
20b2656d 1352 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
771fe6b9 1353
4aac0473
JG
1354 /* Set asic functions */
1355 r = radeon_asic_init(rdev);
36421338 1356 if (r)
4aac0473 1357 return r;
4aac0473 1358
f95df9ca
AD
1359 /* all of the newer IGP chips have an internal gart
1360 * However some rs4xx report as AGP, so remove that here.
1361 */
1362 if ((rdev->family >= CHIP_RS400) &&
1363 (rdev->flags & RADEON_IS_IGP)) {
1364 rdev->flags &= ~RADEON_IS_AGP;
1365 }
1366
30256a3f 1367 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1368 radeon_agp_disable(rdev);
771fe6b9
JG
1369 }
1370
9ed8b1f9
AD
1371 /* Set the internal MC address mask
1372 * This is the max address of the GPU's
1373 * internal address space.
1374 */
1375 if (rdev->family >= CHIP_CAYMAN)
1376 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1377 else if (rdev->family >= CHIP_CEDAR)
1378 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1379 else
1380 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1381
ad49f501
DA
1382 /* set DMA mask + need_dma32 flags.
1383 * PCIE - can handle 40-bits.
005a83f1 1384 * IGP - can handle 40-bits
ad49f501 1385 * AGP - generally dma32 is safest
005a83f1 1386 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1387 */
1388 rdev->need_dma32 = false;
1389 if (rdev->flags & RADEON_IS_AGP)
1390 rdev->need_dma32 = true;
005a83f1 1391 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1392 (rdev->family <= CHIP_RS740))
ad49f501
DA
1393 rdev->need_dma32 = true;
1394
1395 dma_bits = rdev->need_dma32 ? 32 : 40;
1396 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1397 if (r) {
62fff811 1398 rdev->need_dma32 = true;
c52494f6 1399 dma_bits = 32;
771fe6b9
JG
1400 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1401 }
c52494f6
KRW
1402 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1403 if (r) {
1404 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1405 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1406 }
771fe6b9
JG
1407
1408 /* Registers mapping */
1409 /* TODO: block userspace mapping of io register */
2c385151 1410 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1411 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1412 spin_lock_init(&rdev->pll_idx_lock);
1413 spin_lock_init(&rdev->mc_idx_lock);
1414 spin_lock_init(&rdev->pcie_idx_lock);
1415 spin_lock_init(&rdev->pciep_idx_lock);
1416 spin_lock_init(&rdev->pif_idx_lock);
1417 spin_lock_init(&rdev->cg_idx_lock);
1418 spin_lock_init(&rdev->uvd_idx_lock);
1419 spin_lock_init(&rdev->rcu_idx_lock);
1420 spin_lock_init(&rdev->didt_idx_lock);
1421 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1422 if (rdev->family >= CHIP_BONAIRE) {
1423 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1424 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1425 } else {
1426 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1427 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1428 }
771fe6b9
JG
1429 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1430 if (rdev->rmmio == NULL) {
1431 return -ENOMEM;
1432 }
1433 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1434 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1435
75efdee1
AD
1436 /* doorbell bar mapping */
1437 if (rdev->family >= CHIP_BONAIRE)
1438 radeon_doorbell_init(rdev);
1439
351a52a2
AD
1440 /* io port mapping */
1441 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1442 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1443 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1444 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1445 break;
1446 }
1447 }
1448 if (rdev->rio_mem == NULL)
1449 DRM_ERROR("Unable to find PCI I/O BAR\n");
1450
4807c5a8
AD
1451 if (rdev->flags & RADEON_IS_PX)
1452 radeon_device_handle_px_quirks(rdev);
1453
28d52043 1454 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1455 /* this will fail for cards that aren't VGA class devices, just
1456 * ignore it */
1457 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1458
bfaddd9f 1459 if (rdev->flags & RADEON_IS_PX)
10ebc0bc
DA
1460 runtime = true;
1461 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1462 if (runtime)
1463 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1464
3ce0a23d 1465 r = radeon_init(rdev);
b574f251 1466 if (r)
2e97140d 1467 goto failed;
3ce0a23d 1468
409851f4
JG
1469 r = radeon_gem_debugfs_init(rdev);
1470 if (r) {
1471 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
9843ead0
DA
1472 }
1473
1474 r = radeon_mst_debugfs_init(rdev);
1475 if (r) {
1476 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
409851f4
JG
1477 }
1478
b574f251
JG
1479 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1480 /* Acceleration not working on AGP card try again
1481 * with fallback to PCI or PCIE GART
1482 */
a2d07b74 1483 radeon_asic_reset(rdev);
b574f251
JG
1484 radeon_fini(rdev);
1485 radeon_agp_disable(rdev);
1486 r = radeon_init(rdev);
4aac0473 1487 if (r)
2e97140d 1488 goto failed;
771fe6b9 1489 }
6c7bccea 1490
13a7d299
CK
1491 r = radeon_ib_ring_tests(rdev);
1492 if (r)
1493 DRM_ERROR("ib ring test failed (%d).\n", r);
1494
6dfd1972
JG
1495 /*
1496 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1497 * after the CP ring have chew one packet at least. Hence here we stop
1498 * and restart DPM after the radeon_ib_ring_tests().
1499 */
1500 if (rdev->pm.dpm_enabled &&
1501 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1502 (rdev->family == CHIP_TURKS) &&
1503 (rdev->flags & RADEON_IS_MOBILITY)) {
1504 mutex_lock(&rdev->pm.mutex);
1505 radeon_dpm_disable(rdev);
1506 radeon_dpm_enable(rdev);
1507 mutex_unlock(&rdev->pm.mutex);
1508 }
1509
60a7e396 1510 if ((radeon_testing & 1)) {
4a1132a0
AD
1511 if (rdev->accel_working)
1512 radeon_test_moves(rdev);
1513 else
1514 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1515 }
60a7e396 1516 if ((radeon_testing & 2)) {
4a1132a0
AD
1517 if (rdev->accel_working)
1518 radeon_test_syncing(rdev);
1519 else
1520 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1521 }
771fe6b9 1522 if (radeon_benchmarking) {
4a1132a0
AD
1523 if (rdev->accel_working)
1524 radeon_benchmark(rdev, radeon_benchmarking);
1525 else
1526 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1527 }
6cf8a3f5 1528 return 0;
2e97140d
AD
1529
1530failed:
b8751946
LW
1531 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1532 if (radeon_is_px(ddev))
1533 pm_runtime_put_noidle(ddev->dev);
2e97140d
AD
1534 if (runtime)
1535 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1536 return r;
771fe6b9
JG
1537}
1538
4d8bf9ae
CK
1539static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1540
0c195119
AD
1541/**
1542 * radeon_device_fini - tear down the driver
1543 *
1544 * @rdev: radeon_device pointer
1545 *
1546 * Tear down the driver info (all asics).
1547 * Called at driver shutdown.
1548 */
771fe6b9
JG
1549void radeon_device_fini(struct radeon_device *rdev)
1550{
771fe6b9
JG
1551 DRM_INFO("radeon: finishing device.\n");
1552 rdev->shutdown = true;
90aca4d2
JG
1553 /* evict vram memory */
1554 radeon_bo_evict_vram(rdev);
62a8ea3f 1555 radeon_fini(rdev);
6a9ee8af 1556 vga_switcheroo_unregister_client(rdev->pdev);
2e97140d
AD
1557 if (rdev->flags & RADEON_IS_PX)
1558 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
c1176d6f 1559 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1560 if (rdev->rio_mem)
1561 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1562 rdev->rio_mem = NULL;
771fe6b9
JG
1563 iounmap(rdev->rmmio);
1564 rdev->rmmio = NULL;
75efdee1
AD
1565 if (rdev->family >= CHIP_BONAIRE)
1566 radeon_doorbell_fini(rdev);
4d8bf9ae 1567 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1568}
1569
1570
1571/*
1572 * Suspend & resume.
1573 */
0c195119
AD
1574/**
1575 * radeon_suspend_kms - initiate device suspend
1576 *
1577 * @pdev: drm dev pointer
1578 * @state: suspend state
1579 *
1580 * Puts the hw in the suspend state (all asics).
1581 * Returns 0 for success or an error on failure.
1582 * Called at driver suspend.
1583 */
274ad65c
JG
1584int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1585 bool fbcon, bool freeze)
771fe6b9 1586{
875c1866 1587 struct radeon_device *rdev;
771fe6b9 1588 struct drm_crtc *crtc;
d8dcaa1d 1589 struct drm_connector *connector;
7465280c 1590 int i, r;
771fe6b9 1591
875c1866 1592 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1593 return -ENODEV;
1594 }
7473e830 1595
875c1866
DJ
1596 rdev = dev->dev_private;
1597
f2aba352 1598 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1599 return 0;
d8dcaa1d 1600
86698c20
SF
1601 drm_kms_helper_poll_disable(dev);
1602
6adaed5b 1603 drm_modeset_lock_all(dev);
d8dcaa1d
AD
1604 /* turn off display hw */
1605 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1606 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1607 }
6adaed5b 1608 drm_modeset_unlock_all(dev);
d8dcaa1d 1609
f3cbb17b 1610 /* unpin the front buffers and cursors */
771fe6b9 1611 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f3cbb17b 1612 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
f4510a27 1613 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1614 struct radeon_bo *robj;
771fe6b9 1615
f3cbb17b
GG
1616 if (radeon_crtc->cursor_bo) {
1617 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1618 r = radeon_bo_reserve(robj, false);
1619 if (r == 0) {
1620 radeon_bo_unpin(robj);
1621 radeon_bo_unreserve(robj);
1622 }
1623 }
1624
771fe6b9
JG
1625 if (rfb == NULL || rfb->obj == NULL) {
1626 continue;
1627 }
7e4d15d9 1628 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1629 /* don't unpin kernel fb objects */
1630 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1631 r = radeon_bo_reserve(robj, false);
38651674 1632 if (r == 0) {
4c788679
JG
1633 radeon_bo_unpin(robj);
1634 radeon_bo_unreserve(robj);
1635 }
771fe6b9
JG
1636 }
1637 }
1638 /* evict vram memory */
4c788679 1639 radeon_bo_evict_vram(rdev);
8a47cc9e 1640
771fe6b9 1641 /* wait for gpu to finish processing current batch */
5f8f635e 1642 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1643 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1644 if (r) {
1645 /* delay GPU reset to resume */
eb98c709 1646 radeon_fence_driver_force_completion(rdev, i);
5f8f635e
JG
1647 }
1648 }
771fe6b9 1649
f657c2a7
YZ
1650 radeon_save_bios_scratch_regs(rdev);
1651
62a8ea3f 1652 radeon_suspend(rdev);
d4877cf2 1653 radeon_hpd_fini(rdev);
771fe6b9 1654 /* evict remaining vram memory */
4c788679 1655 radeon_bo_evict_vram(rdev);
771fe6b9 1656
10b06122
JG
1657 radeon_agp_suspend(rdev);
1658
771fe6b9 1659 pci_save_state(dev->pdev);
ccaa2c12 1660 if (freeze && rdev->family >= CHIP_CEDAR) {
274ad65c
JG
1661 rdev->asic->asic_reset(rdev, true);
1662 pci_restore_state(dev->pdev);
1663 } else if (suspend) {
771fe6b9
JG
1664 /* Shut down the device */
1665 pci_disable_device(dev->pdev);
1666 pci_set_power_state(dev->pdev, PCI_D3hot);
1667 }
10ebc0bc
DA
1668
1669 if (fbcon) {
1670 console_lock();
1671 radeon_fbdev_set_suspend(rdev, 1);
1672 console_unlock();
1673 }
771fe6b9
JG
1674 return 0;
1675}
1676
0c195119
AD
1677/**
1678 * radeon_resume_kms - initiate device resume
1679 *
1680 * @pdev: drm dev pointer
1681 *
1682 * Bring the hw back to operating state (all asics).
1683 * Returns 0 for success or an error on failure.
1684 * Called at driver resume.
1685 */
10ebc0bc 1686int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1687{
09bdf591 1688 struct drm_connector *connector;
771fe6b9 1689 struct radeon_device *rdev = dev->dev_private;
f3cbb17b 1690 struct drm_crtc *crtc;
04eb2206 1691 int r;
771fe6b9 1692
f2aba352 1693 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1694 return 0;
1695
10ebc0bc
DA
1696 if (fbcon) {
1697 console_lock();
1698 }
7473e830
DA
1699 if (resume) {
1700 pci_set_power_state(dev->pdev, PCI_D0);
1701 pci_restore_state(dev->pdev);
1702 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1703 if (fbcon)
1704 console_unlock();
7473e830
DA
1705 return -1;
1706 }
771fe6b9 1707 }
0ebf1717
DA
1708 /* resume AGP if in use */
1709 radeon_agp_resume(rdev);
62a8ea3f 1710 radeon_resume(rdev);
04eb2206
CK
1711
1712 r = radeon_ib_ring_tests(rdev);
1713 if (r)
1714 DRM_ERROR("ib ring test failed (%d).\n", r);
1715
bc6a6295 1716 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1717 /* do dpm late init */
1718 r = radeon_pm_late_init(rdev);
1719 if (r) {
1720 rdev->pm.dpm_enabled = false;
1721 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1722 }
bc6a6295
AD
1723 } else {
1724 /* resume old pm late */
1725 radeon_pm_resume(rdev);
6c7bccea
AD
1726 }
1727
f657c2a7 1728 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1729
f3cbb17b
GG
1730 /* pin cursors */
1731 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1732 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1733
1734 if (radeon_crtc->cursor_bo) {
1735 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1736 r = radeon_bo_reserve(robj, false);
1737 if (r == 0) {
1738 /* Only 27 bit offset for legacy cursor */
1739 r = radeon_bo_pin_restricted(robj,
1740 RADEON_GEM_DOMAIN_VRAM,
1741 ASIC_IS_AVIVO(rdev) ?
1742 0 : 1 << 27,
1743 &radeon_crtc->cursor_addr);
1744 if (r != 0)
1745 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1746 radeon_bo_unreserve(robj);
1747 }
1748 }
1749 }
1750
3fa47d9e
AD
1751 /* init dig PHYs, disp eng pll */
1752 if (rdev->is_atom_bios) {
ac89af1e 1753 radeon_atom_encoder_init(rdev);
f3f1f03e 1754 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1755 /* turn on the BL */
1756 if (rdev->mode_info.bl_encoder) {
1757 u8 bl_level = radeon_get_backlight_level(rdev,
1758 rdev->mode_info.bl_encoder);
1759 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1760 bl_level);
1761 }
3fa47d9e 1762 }
d4877cf2
AD
1763 /* reset hpd state */
1764 radeon_hpd_init(rdev);
771fe6b9 1765 /* blat the mode back in */
ec9954fc
DA
1766 if (fbcon) {
1767 drm_helper_resume_force_mode(dev);
1768 /* turn on display hw */
6adaed5b 1769 drm_modeset_lock_all(dev);
ec9954fc
DA
1770 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1771 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1772 }
6adaed5b 1773 drm_modeset_unlock_all(dev);
a93f344d 1774 }
86698c20
SF
1775
1776 drm_kms_helper_poll_enable(dev);
18ee37a4 1777
3640da2f
AD
1778 /* set the power state here in case we are a PX system or headless */
1779 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1780 radeon_pm_compute_clocks(rdev);
1781
18ee37a4
DV
1782 if (fbcon) {
1783 radeon_fbdev_set_suspend(rdev, 0);
1784 console_unlock();
1785 }
1786
771fe6b9
JG
1787 return 0;
1788}
1789
0c195119
AD
1790/**
1791 * radeon_gpu_reset - reset the asic
1792 *
1793 * @rdev: radeon device pointer
1794 *
1795 * Attempt the reset the GPU if it has hung (all asics).
1796 * Returns 0 for success or an error on failure.
1797 */
90aca4d2
JG
1798int radeon_gpu_reset(struct radeon_device *rdev)
1799{
55d7c221
CK
1800 unsigned ring_sizes[RADEON_NUM_RINGS];
1801 uint32_t *ring_data[RADEON_NUM_RINGS];
1802
1803 bool saved = false;
1804
1805 int i, r;
8fd1b84c 1806 int resched;
90aca4d2 1807
dee53e7f 1808 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1809
1810 if (!rdev->needs_reset) {
1811 up_write(&rdev->exclusive_lock);
1812 return 0;
1813 }
1814
72b9076b
MO
1815 atomic_inc(&rdev->gpu_reset_counter);
1816
90aca4d2 1817 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1818 /* block TTM */
1819 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2 1820 radeon_suspend(rdev);
73ef0e0d 1821 radeon_hpd_fini(rdev);
90aca4d2 1822
55d7c221
CK
1823 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1824 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1825 &ring_data[i]);
1826 if (ring_sizes[i]) {
1827 saved = true;
1828 dev_info(rdev->dev, "Saved %d dwords of commands "
1829 "on ring %d.\n", ring_sizes[i], i);
1830 }
1831 }
1832
90aca4d2
JG
1833 r = radeon_asic_reset(rdev);
1834 if (!r) {
55d7c221 1835 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1836 radeon_resume(rdev);
55d7c221 1837 }
04eb2206 1838
55d7c221 1839 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1840
9bb39ff4
ML
1841 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1842 if (!r && ring_data[i]) {
55d7c221
CK
1843 radeon_ring_restore(rdev, &rdev->ring[i],
1844 ring_sizes[i], ring_data[i]);
9bb39ff4 1845 } else {
eb98c709 1846 radeon_fence_driver_force_completion(rdev, i);
55d7c221
CK
1847 kfree(ring_data[i]);
1848 }
90aca4d2 1849 }
7a1619b9 1850
c940b447
AD
1851 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1852 /* do dpm late init */
1853 r = radeon_pm_late_init(rdev);
1854 if (r) {
1855 rdev->pm.dpm_enabled = false;
1856 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1857 }
1858 } else {
1859 /* resume old pm late */
1860 radeon_pm_resume(rdev);
1861 }
1862
73ef0e0d
AD
1863 /* init dig PHYs, disp eng pll */
1864 if (rdev->is_atom_bios) {
1865 radeon_atom_encoder_init(rdev);
1866 radeon_atom_disp_eng_pll_init(rdev);
1867 /* turn on the BL */
1868 if (rdev->mode_info.bl_encoder) {
1869 u8 bl_level = radeon_get_backlight_level(rdev,
1870 rdev->mode_info.bl_encoder);
1871 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1872 bl_level);
1873 }
1874 }
1875 /* reset hpd state */
1876 radeon_hpd_init(rdev);
1877
9bb39ff4 1878 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
3c036389
CK
1879
1880 rdev->in_reset = true;
1881 rdev->needs_reset = false;
1882
9bb39ff4
ML
1883 downgrade_write(&rdev->exclusive_lock);
1884
d3493574
JG
1885 drm_helper_resume_force_mode(rdev->ddev);
1886
c940b447
AD
1887 /* set the power state here in case we are a PX system or headless */
1888 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1889 radeon_pm_compute_clocks(rdev);
1890
9bb39ff4
ML
1891 if (!r) {
1892 r = radeon_ib_ring_tests(rdev);
1893 if (r && saved)
1894 r = -EAGAIN;
1895 } else {
7a1619b9
MD
1896 /* bad news, how to tell it to userspace ? */
1897 dev_info(rdev->dev, "GPU reset failed\n");
1898 }
1899
9bb39ff4
ML
1900 rdev->needs_reset = r == -EAGAIN;
1901 rdev->in_reset = false;
1902
1903 up_read(&rdev->exclusive_lock);
90aca4d2
JG
1904 return r;
1905}
1906
771fe6b9
JG
1907
1908/*
1909 * Debugfs
1910 */
771fe6b9
JG
1911int radeon_debugfs_add_files(struct radeon_device *rdev,
1912 struct drm_info_list *files,
1913 unsigned nfiles)
1914{
1915 unsigned i;
1916
4d8bf9ae
CK
1917 for (i = 0; i < rdev->debugfs_count; i++) {
1918 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1919 /* Already registered */
1920 return 0;
1921 }
1922 }
c245cb9e 1923
4d8bf9ae 1924 i = rdev->debugfs_count + 1;
c245cb9e
MW
1925 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1926 DRM_ERROR("Reached maximum number of debugfs components.\n");
1927 DRM_ERROR("Report so we increase "
3cf8bb1a 1928 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1929 return -EINVAL;
1930 }
4d8bf9ae
CK
1931 rdev->debugfs[rdev->debugfs_count].files = files;
1932 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1933 rdev->debugfs_count = i;
771fe6b9
JG
1934#if defined(CONFIG_DEBUG_FS)
1935 drm_debugfs_create_files(files, nfiles,
1936 rdev->ddev->control->debugfs_root,
1937 rdev->ddev->control);
1938 drm_debugfs_create_files(files, nfiles,
1939 rdev->ddev->primary->debugfs_root,
1940 rdev->ddev->primary);
1941#endif
1942 return 0;
1943}
1944
4d8bf9ae
CK
1945static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1946{
1947#if defined(CONFIG_DEBUG_FS)
1948 unsigned i;
1949
1950 for (i = 0; i < rdev->debugfs_count; i++) {
1951 drm_debugfs_remove_files(rdev->debugfs[i].files,
1952 rdev->debugfs[i].num_files,
1953 rdev->ddev->control);
1954 drm_debugfs_remove_files(rdev->debugfs[i].files,
1955 rdev->debugfs[i].num_files,
1956 rdev->ddev->primary);
1957 }
1958#endif
1959}