ae7d3a82a6d1279e726d603a3e739ce4e1e504b5
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 #define MAX_NOPID ((u32)~0)
35
36 /** These are the interrupts used by the driver */
37 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT |               \
38                                     I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
39                                     I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
40                                     I915_ASLE_INTERRUPT |               \
41                                     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
42
43 void
44 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
45 {
46         if ((dev_priv->irq_mask_reg & mask) != 0) {
47                 dev_priv->irq_mask_reg &= ~mask;
48                 I915_WRITE(IMR, dev_priv->irq_mask_reg);
49                 (void) I915_READ(IMR);
50         }
51 }
52
53 static inline void
54 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
55 {
56         if ((dev_priv->irq_mask_reg & mask) != mask) {
57                 dev_priv->irq_mask_reg |= mask;
58                 I915_WRITE(IMR, dev_priv->irq_mask_reg);
59                 (void) I915_READ(IMR);
60         }
61 }
62
63 /**
64  * Emit blits for scheduled buffer swaps.
65  *
66  * This function will be called with the HW lock held.
67  */
68 static void i915_vblank_tasklet(struct drm_device *dev)
69 {
70         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
71         unsigned long irqflags;
72         struct list_head *list, *tmp, hits, *hit;
73         int nhits, nrects, slice[2], upper[2], lower[2], i;
74         unsigned counter[2] = { atomic_read(&dev->vbl_received),
75                                 atomic_read(&dev->vbl_received2) };
76         struct drm_drawable_info *drw;
77         drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
78         u32 cpp = dev_priv->cpp;
79         u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
80                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
81                                 XY_SRC_COPY_BLT_WRITE_RGB)
82                              : XY_SRC_COPY_BLT_CMD;
83         u32 src_pitch = sarea_priv->pitch * cpp;
84         u32 dst_pitch = sarea_priv->pitch * cpp;
85         u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
86         RING_LOCALS;
87
88         if (IS_I965G(dev) && sarea_priv->front_tiled) {
89                 cmd |= XY_SRC_COPY_BLT_DST_TILED;
90                 dst_pitch >>= 2;
91         }
92         if (IS_I965G(dev) && sarea_priv->back_tiled) {
93                 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
94                 src_pitch >>= 2;
95         }
96
97         DRM_DEBUG("\n");
98
99         INIT_LIST_HEAD(&hits);
100
101         nhits = nrects = 0;
102
103         spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
104
105         /* Find buffer swaps scheduled for this vertical blank */
106         list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
107                 drm_i915_vbl_swap_t *vbl_swap =
108                         list_entry(list, drm_i915_vbl_swap_t, head);
109
110                 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
111                         continue;
112
113                 list_del(list);
114                 dev_priv->swaps_pending--;
115
116                 spin_unlock(&dev_priv->swaps_lock);
117                 spin_lock(&dev->drw_lock);
118
119                 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
120
121                 if (!drw) {
122                         spin_unlock(&dev->drw_lock);
123                         drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
124                         spin_lock(&dev_priv->swaps_lock);
125                         continue;
126                 }
127
128                 list_for_each(hit, &hits) {
129                         drm_i915_vbl_swap_t *swap_cmp =
130                                 list_entry(hit, drm_i915_vbl_swap_t, head);
131                         struct drm_drawable_info *drw_cmp =
132                                 drm_get_drawable_info(dev, swap_cmp->drw_id);
133
134                         if (drw_cmp &&
135                             drw_cmp->rects[0].y1 > drw->rects[0].y1) {
136                                 list_add_tail(list, hit);
137                                 break;
138                         }
139                 }
140
141                 spin_unlock(&dev->drw_lock);
142
143                 /* List of hits was empty, or we reached the end of it */
144                 if (hit == &hits)
145                         list_add_tail(list, hits.prev);
146
147                 nhits++;
148
149                 spin_lock(&dev_priv->swaps_lock);
150         }
151
152         if (nhits == 0) {
153                 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
154                 return;
155         }
156
157         spin_unlock(&dev_priv->swaps_lock);
158
159         i915_kernel_lost_context(dev);
160
161         if (IS_I965G(dev)) {
162                 BEGIN_LP_RING(4);
163
164                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
165                 OUT_RING(0);
166                 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
167                 OUT_RING(0);
168                 ADVANCE_LP_RING();
169         } else {
170                 BEGIN_LP_RING(6);
171
172                 OUT_RING(GFX_OP_DRAWRECT_INFO);
173                 OUT_RING(0);
174                 OUT_RING(0);
175                 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
176                 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
177                 OUT_RING(0);
178
179                 ADVANCE_LP_RING();
180         }
181
182         sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
183
184         upper[0] = upper[1] = 0;
185         slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
186         slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
187         lower[0] = sarea_priv->pipeA_y + slice[0];
188         lower[1] = sarea_priv->pipeB_y + slice[0];
189
190         spin_lock(&dev->drw_lock);
191
192         /* Emit blits for buffer swaps, partitioning both outputs into as many
193          * slices as there are buffer swaps scheduled in order to avoid tearing
194          * (based on the assumption that a single buffer swap would always
195          * complete before scanout starts).
196          */
197         for (i = 0; i++ < nhits;
198              upper[0] = lower[0], lower[0] += slice[0],
199              upper[1] = lower[1], lower[1] += slice[1]) {
200                 if (i == nhits)
201                         lower[0] = lower[1] = sarea_priv->height;
202
203                 list_for_each(hit, &hits) {
204                         drm_i915_vbl_swap_t *swap_hit =
205                                 list_entry(hit, drm_i915_vbl_swap_t, head);
206                         struct drm_clip_rect *rect;
207                         int num_rects, pipe;
208                         unsigned short top, bottom;
209
210                         drw = drm_get_drawable_info(dev, swap_hit->drw_id);
211
212                         if (!drw)
213                                 continue;
214
215                         rect = drw->rects;
216                         pipe = swap_hit->pipe;
217                         top = upper[pipe];
218                         bottom = lower[pipe];
219
220                         for (num_rects = drw->num_rects; num_rects--; rect++) {
221                                 int y1 = max(rect->y1, top);
222                                 int y2 = min(rect->y2, bottom);
223
224                                 if (y1 >= y2)
225                                         continue;
226
227                                 BEGIN_LP_RING(8);
228
229                                 OUT_RING(cmd);
230                                 OUT_RING(ropcpp | dst_pitch);
231                                 OUT_RING((y1 << 16) | rect->x1);
232                                 OUT_RING((y2 << 16) | rect->x2);
233                                 OUT_RING(sarea_priv->front_offset);
234                                 OUT_RING((y1 << 16) | rect->x1);
235                                 OUT_RING(src_pitch);
236                                 OUT_RING(sarea_priv->back_offset);
237
238                                 ADVANCE_LP_RING();
239                         }
240                 }
241         }
242
243         spin_unlock_irqrestore(&dev->drw_lock, irqflags);
244
245         list_for_each_safe(hit, tmp, &hits) {
246                 drm_i915_vbl_swap_t *swap_hit =
247                         list_entry(hit, drm_i915_vbl_swap_t, head);
248
249                 list_del(hit);
250
251                 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
252         }
253 }
254
255 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
256 {
257         struct drm_device *dev = (struct drm_device *) arg;
258         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
259         u32 pipea_stats, pipeb_stats;
260         u32 iir;
261
262         pipea_stats = I915_READ(PIPEASTAT);
263         pipeb_stats = I915_READ(PIPEBSTAT);
264
265         if (dev->pdev->msi_enabled)
266                 I915_WRITE(IMR, ~0);
267         iir = I915_READ(IIR);
268
269         DRM_DEBUG("iir=%08x\n", iir);
270
271         if (iir == 0) {
272                 if (dev->pdev->msi_enabled) {
273                         I915_WRITE(IMR, dev_priv->irq_mask_reg);
274                         (void) I915_READ(IMR);
275                 }
276                 return IRQ_NONE;
277         }
278
279         I915_WRITE(PIPEASTAT, pipea_stats);
280         I915_WRITE(PIPEBSTAT, pipeb_stats);
281
282         I915_WRITE(IIR, iir);
283         if (dev->pdev->msi_enabled)
284                 I915_WRITE(IMR, dev_priv->irq_mask_reg);
285         (void) I915_READ(IIR); /* Flush posted writes */
286
287         dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
288
289         if (iir & I915_USER_INTERRUPT)
290                 DRM_WAKEUP(&dev_priv->irq_queue);
291
292         if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
293                    I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
294                 int vblank_pipe = dev_priv->vblank_pipe;
295
296                 if ((vblank_pipe &
297                      (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
298                     == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
299                         if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
300                                 atomic_inc(&dev->vbl_received);
301                         if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
302                                 atomic_inc(&dev->vbl_received2);
303                 } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
304                             (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
305                            ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
306                             (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
307                         atomic_inc(&dev->vbl_received);
308
309                 DRM_WAKEUP(&dev->vbl_queue);
310                 drm_vbl_send_signals(dev);
311
312                 if (dev_priv->swaps_pending > 0)
313                         drm_locked_tasklet(dev, i915_vblank_tasklet);
314         }
315
316         if (iir & I915_ASLE_INTERRUPT)
317                 opregion_asle_intr(dev);
318
319         if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
320                 opregion_asle_intr(dev);
321
322         return IRQ_HANDLED;
323 }
324
325 static int i915_emit_irq(struct drm_device * dev)
326 {
327         drm_i915_private_t *dev_priv = dev->dev_private;
328         RING_LOCALS;
329
330         i915_kernel_lost_context(dev);
331
332         DRM_DEBUG("\n");
333
334         dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
335
336         if (dev_priv->counter > 0x7FFFFFFFUL)
337                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
338
339         BEGIN_LP_RING(6);
340         OUT_RING(MI_STORE_DWORD_INDEX);
341         OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
342         OUT_RING(dev_priv->counter);
343         OUT_RING(0);
344         OUT_RING(0);
345         OUT_RING(MI_USER_INTERRUPT);
346         ADVANCE_LP_RING();
347
348         return dev_priv->counter;
349 }
350
351 static void i915_user_irq_get(struct drm_device *dev)
352 {
353         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
354
355         spin_lock(&dev_priv->user_irq_lock);
356         if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
357                 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
358         spin_unlock(&dev_priv->user_irq_lock);
359 }
360
361 static void i915_user_irq_put(struct drm_device *dev)
362 {
363         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
364
365         spin_lock(&dev_priv->user_irq_lock);
366         BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
367         if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
368                 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
369         spin_unlock(&dev_priv->user_irq_lock);
370 }
371
372 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
373 {
374         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
375         int ret = 0;
376
377         DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
378                   READ_BREADCRUMB(dev_priv));
379
380         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
381                 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
382                 return 0;
383         }
384
385         dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
386
387         i915_user_irq_get(dev);
388         DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
389                     READ_BREADCRUMB(dev_priv) >= irq_nr);
390         i915_user_irq_put(dev);
391
392         if (ret == -EBUSY) {
393                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
394                           READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
395         }
396
397         dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
398         return ret;
399 }
400
401 static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
402                                       atomic_t *counter)
403 {
404         drm_i915_private_t *dev_priv = dev->dev_private;
405         unsigned int cur_vblank;
406         int ret = 0;
407
408         if (!dev_priv) {
409                 DRM_ERROR("called with no initialization\n");
410                 return -EINVAL;
411         }
412
413         DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
414                     (((cur_vblank = atomic_read(counter))
415                         - *sequence) <= (1<<23)));
416
417         *sequence = cur_vblank;
418
419         return ret;
420 }
421
422
423 int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
424 {
425         return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
426 }
427
428 int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
429 {
430         return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
431 }
432
433 /* Needs the lock as it touches the ring.
434  */
435 int i915_irq_emit(struct drm_device *dev, void *data,
436                          struct drm_file *file_priv)
437 {
438         drm_i915_private_t *dev_priv = dev->dev_private;
439         drm_i915_irq_emit_t *emit = data;
440         int result;
441
442         LOCK_TEST_WITH_RETURN(dev, file_priv);
443
444         if (!dev_priv) {
445                 DRM_ERROR("called with no initialization\n");
446                 return -EINVAL;
447         }
448
449         result = i915_emit_irq(dev);
450
451         if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
452                 DRM_ERROR("copy_to_user\n");
453                 return -EFAULT;
454         }
455
456         return 0;
457 }
458
459 /* Doesn't need the hardware lock.
460  */
461 int i915_irq_wait(struct drm_device *dev, void *data,
462                          struct drm_file *file_priv)
463 {
464         drm_i915_private_t *dev_priv = dev->dev_private;
465         drm_i915_irq_wait_t *irqwait = data;
466
467         if (!dev_priv) {
468                 DRM_ERROR("called with no initialization\n");
469                 return -EINVAL;
470         }
471
472         return i915_wait_irq(dev, irqwait->irq_seq);
473 }
474
475 /* Set the vblank monitor pipe
476  */
477 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
478                          struct drm_file *file_priv)
479 {
480         drm_i915_private_t *dev_priv = dev->dev_private;
481         drm_i915_vblank_pipe_t *pipe = data;
482         u32 enable_mask = 0, disable_mask = 0;
483
484         if (!dev_priv) {
485                 DRM_ERROR("called with no initialization\n");
486                 return -EINVAL;
487         }
488
489         if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
490                 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
491                 return -EINVAL;
492         }
493
494         if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
495                 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
496         else
497                 disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
498
499         if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
500                 enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
501         else
502                 disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
503
504         i915_enable_irq(dev_priv, enable_mask);
505         i915_disable_irq(dev_priv, disable_mask);
506
507         dev_priv->vblank_pipe = pipe->pipe;
508
509         return 0;
510 }
511
512 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
513                          struct drm_file *file_priv)
514 {
515         drm_i915_private_t *dev_priv = dev->dev_private;
516         drm_i915_vblank_pipe_t *pipe = data;
517         u16 flag;
518
519         if (!dev_priv) {
520                 DRM_ERROR("called with no initialization\n");
521                 return -EINVAL;
522         }
523
524         flag = I915_READ(IMR);
525         pipe->pipe = 0;
526         if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
527                 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
528         if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
529                 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
530
531         return 0;
532 }
533
534 /**
535  * Schedule buffer swap at given vertical blank.
536  */
537 int i915_vblank_swap(struct drm_device *dev, void *data,
538                      struct drm_file *file_priv)
539 {
540         drm_i915_private_t *dev_priv = dev->dev_private;
541         drm_i915_vblank_swap_t *swap = data;
542         drm_i915_vbl_swap_t *vbl_swap;
543         unsigned int pipe, seqtype, curseq;
544         unsigned long irqflags;
545         struct list_head *list;
546
547         if (!dev_priv) {
548                 DRM_ERROR("%s called with no initialization\n", __func__);
549                 return -EINVAL;
550         }
551
552         if (dev_priv->sarea_priv->rotation) {
553                 DRM_DEBUG("Rotation not supported\n");
554                 return -EINVAL;
555         }
556
557         if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
558                              _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
559                 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
560                 return -EINVAL;
561         }
562
563         pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
564
565         seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
566
567         if (!(dev_priv->vblank_pipe & (1 << pipe))) {
568                 DRM_ERROR("Invalid pipe %d\n", pipe);
569                 return -EINVAL;
570         }
571
572         spin_lock_irqsave(&dev->drw_lock, irqflags);
573
574         if (!drm_get_drawable_info(dev, swap->drawable)) {
575                 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
576                 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
577                 return -EINVAL;
578         }
579
580         spin_unlock_irqrestore(&dev->drw_lock, irqflags);
581
582         curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
583
584         if (seqtype == _DRM_VBLANK_RELATIVE)
585                 swap->sequence += curseq;
586
587         if ((curseq - swap->sequence) <= (1<<23)) {
588                 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
589                         swap->sequence = curseq + 1;
590                 } else {
591                         DRM_DEBUG("Missed target sequence\n");
592                         return -EINVAL;
593                 }
594         }
595
596         spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
597
598         list_for_each(list, &dev_priv->vbl_swaps.head) {
599                 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
600
601                 if (vbl_swap->drw_id == swap->drawable &&
602                     vbl_swap->pipe == pipe &&
603                     vbl_swap->sequence == swap->sequence) {
604                         spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
605                         DRM_DEBUG("Already scheduled\n");
606                         return 0;
607                 }
608         }
609
610         spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
611
612         if (dev_priv->swaps_pending >= 100) {
613                 DRM_DEBUG("Too many swaps queued\n");
614                 return -EBUSY;
615         }
616
617         vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
618
619         if (!vbl_swap) {
620                 DRM_ERROR("Failed to allocate memory to queue swap\n");
621                 return -ENOMEM;
622         }
623
624         DRM_DEBUG("\n");
625
626         vbl_swap->drw_id = swap->drawable;
627         vbl_swap->pipe = pipe;
628         vbl_swap->sequence = swap->sequence;
629
630         spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
631
632         list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
633         dev_priv->swaps_pending++;
634
635         spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
636
637         return 0;
638 }
639
640 /* drm_dma.h hooks
641 */
642 void i915_driver_irq_preinstall(struct drm_device * dev)
643 {
644         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
645
646         I915_WRITE(HWSTAM, 0xfffe);
647         I915_WRITE(IMR, 0x0);
648         I915_WRITE(IER, 0x0);
649 }
650
651 void i915_driver_irq_postinstall(struct drm_device * dev)
652 {
653         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
654
655         spin_lock_init(&dev_priv->swaps_lock);
656         INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
657         dev_priv->swaps_pending = 0;
658
659         if (!dev_priv->vblank_pipe)
660                 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
661
662         /* Set initial unmasked IRQs to just the selected vblank pipes. */
663         dev_priv->irq_mask_reg = ~0;
664         if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
665                 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
666         if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
667                 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
668
669         dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
670
671         I915_WRITE(IMR, dev_priv->irq_mask_reg);
672         I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
673         (void) I915_READ(IER);
674
675         opregion_enable_asle(dev);
676
677         DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
678 }
679
680 void i915_driver_irq_uninstall(struct drm_device * dev)
681 {
682         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
683         u16 temp;
684
685         if (!dev_priv)
686                 return;
687
688         I915_WRITE(HWSTAM, 0xffff);
689         I915_WRITE(IMR, 0xffff);
690         I915_WRITE(IER, 0x0);
691
692         temp = I915_READ(IIR);
693         I915_WRITE(IIR, temp);
694 }