2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include "drm_crtc_helper.h"
30 #include "radeon_drm.h"
31 #include "radeon_reg.h"
35 #define RADEON_WAIT_IDLE_TIMEOUT 200
37 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
39 struct drm_device *dev = (struct drm_device *) arg;
40 struct radeon_device *rdev = dev->dev_private;
42 return radeon_irq_process(rdev);
46 * Handle hotplug events outside the interrupt handler proper.
48 static void radeon_hotplug_work_func(struct work_struct *work)
50 struct radeon_device *rdev = container_of(work, struct radeon_device,
52 struct drm_device *dev = rdev->ddev;
53 struct drm_mode_config *mode_config = &dev->mode_config;
54 struct drm_connector *connector;
56 if (mode_config->num_connector) {
57 list_for_each_entry(connector, &mode_config->connector_list, head)
58 radeon_connector_hotplug(connector);
60 /* Just fire off a uevent and let userspace tell us what to do */
61 drm_helper_hpd_irq_event(dev);
64 void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
66 struct radeon_device *rdev = dev->dev_private;
67 unsigned long irqflags;
70 spin_lock_irqsave(&rdev->irq.lock, irqflags);
71 /* Disable *all* interrupts */
72 for (i = 0; i < RADEON_NUM_RINGS; i++)
73 rdev->irq.sw_int[i] = false;
74 rdev->irq.gui_idle = false;
75 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
76 rdev->irq.hpd[i] = false;
77 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
78 rdev->irq.crtc_vblank_int[i] = false;
79 rdev->irq.pflip[i] = false;
80 rdev->irq.afmt[i] = false;
83 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
85 radeon_irq_process(rdev);
88 int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
90 struct radeon_device *rdev = dev->dev_private;
91 unsigned long irqflags;
94 dev->max_vblank_count = 0x001fffff;
95 spin_lock_irqsave(&rdev->irq.lock, irqflags);
96 for (i = 0; i < RADEON_NUM_RINGS; i++)
97 rdev->irq.sw_int[i] = true;
99 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
103 void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
105 struct radeon_device *rdev = dev->dev_private;
106 unsigned long irqflags;
112 spin_lock_irqsave(&rdev->irq.lock, irqflags);
113 /* Disable *all* interrupts */
114 for (i = 0; i < RADEON_NUM_RINGS; i++)
115 rdev->irq.sw_int[i] = false;
116 rdev->irq.gui_idle = false;
117 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
118 rdev->irq.hpd[i] = false;
119 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
120 rdev->irq.crtc_vblank_int[i] = false;
121 rdev->irq.pflip[i] = false;
122 rdev->irq.afmt[i] = false;
124 radeon_irq_set(rdev);
125 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
128 static bool radeon_msi_ok(struct radeon_device *rdev)
130 /* RV370/RV380 was first asic with MSI support */
131 if (rdev->family < CHIP_RV380)
134 /* MSIs don't work on AGP */
135 if (rdev->flags & RADEON_IS_AGP)
141 else if (radeon_msi == 0)
145 /* HP RS690 only seems to work with MSIs. */
146 if ((rdev->pdev->device == 0x791f) &&
147 (rdev->pdev->subsystem_vendor == 0x103c) &&
148 (rdev->pdev->subsystem_device == 0x30c2))
151 /* Dell RS690 only seems to work with MSIs. */
152 if ((rdev->pdev->device == 0x791f) &&
153 (rdev->pdev->subsystem_vendor == 0x1028) &&
154 (rdev->pdev->subsystem_device == 0x01fc))
157 /* Dell RS690 only seems to work with MSIs. */
158 if ((rdev->pdev->device == 0x791f) &&
159 (rdev->pdev->subsystem_vendor == 0x1028) &&
160 (rdev->pdev->subsystem_device == 0x01fd))
163 /* RV515 seems to have MSI issues where it loses
164 * MSI rearms occasionally. This leads to lockups and freezes.
165 * disable it by default.
167 if (rdev->family == CHIP_RV515)
169 if (rdev->flags & RADEON_IS_IGP) {
170 /* APUs work fine with MSIs */
171 if (rdev->family >= CHIP_PALM)
173 /* lots of IGPs have problems with MSIs */
180 int radeon_irq_kms_init(struct radeon_device *rdev)
184 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
185 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
187 spin_lock_init(&rdev->irq.lock);
188 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
193 rdev->msi_enabled = 0;
195 if (radeon_msi_ok(rdev)) {
196 int ret = pci_enable_msi(rdev->pdev);
198 rdev->msi_enabled = 1;
199 dev_info(rdev->dev, "radeon: using MSI.\n");
202 rdev->irq.installed = true;
203 r = drm_irq_install(rdev->ddev);
205 rdev->irq.installed = false;
208 DRM_INFO("radeon: irq initialized.\n");
212 void radeon_irq_kms_fini(struct radeon_device *rdev)
214 drm_vblank_cleanup(rdev->ddev);
215 if (rdev->irq.installed) {
216 drm_irq_uninstall(rdev->ddev);
217 rdev->irq.installed = false;
218 if (rdev->msi_enabled)
219 pci_disable_msi(rdev->pdev);
221 flush_work_sync(&rdev->hotplug_work);
224 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
226 unsigned long irqflags;
228 spin_lock_irqsave(&rdev->irq.lock, irqflags);
229 if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
230 rdev->irq.sw_int[ring] = true;
231 radeon_irq_set(rdev);
233 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
236 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
238 unsigned long irqflags;
240 spin_lock_irqsave(&rdev->irq.lock, irqflags);
241 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
242 if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
243 rdev->irq.sw_int[ring] = false;
244 radeon_irq_set(rdev);
246 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
249 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
251 unsigned long irqflags;
253 if (crtc < 0 || crtc >= rdev->num_crtc)
256 spin_lock_irqsave(&rdev->irq.lock, irqflags);
257 if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
258 rdev->irq.pflip[crtc] = true;
259 radeon_irq_set(rdev);
261 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
264 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
266 unsigned long irqflags;
268 if (crtc < 0 || crtc >= rdev->num_crtc)
271 spin_lock_irqsave(&rdev->irq.lock, irqflags);
272 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
273 if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
274 rdev->irq.pflip[crtc] = false;
275 radeon_irq_set(rdev);
277 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
280 void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
282 unsigned long irqflags;
284 spin_lock_irqsave(&rdev->irq.lock, irqflags);
285 rdev->irq.afmt[block] = true;
286 radeon_irq_set(rdev);
287 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
291 void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
293 unsigned long irqflags;
295 spin_lock_irqsave(&rdev->irq.lock, irqflags);
296 rdev->irq.afmt[block] = false;
297 radeon_irq_set(rdev);
298 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
301 void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
303 unsigned long irqflags;
306 spin_lock_irqsave(&rdev->irq.lock, irqflags);
307 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
308 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
309 radeon_irq_set(rdev);
310 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
313 void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
315 unsigned long irqflags;
318 spin_lock_irqsave(&rdev->irq.lock, irqflags);
319 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
320 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
321 radeon_irq_set(rdev);
322 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
325 int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
327 unsigned long irqflags;
330 spin_lock_irqsave(&rdev->irq.lock, irqflags);
331 rdev->irq.gui_idle = true;
332 radeon_irq_set(rdev);
333 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
335 r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
336 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
338 spin_lock_irqsave(&rdev->irq.lock, irqflags);
339 rdev->irq.gui_idle = false;
340 radeon_irq_set(rdev);
341 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);