Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i810_dma.c -- DMA support for the i810 -*- linux-c -*- |
2 | * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com | |
3 | * | |
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | |
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |
6 | * All Rights Reserved. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the "Software"), | |
10 | * to deal in the Software without restriction, including without limitation | |
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
12 | * and/or sell copies of the Software, and to permit persons to whom the | |
13 | * Software is furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the next | |
16 | * paragraph) shall be included in all copies or substantial portions of the | |
17 | * Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
25 | * DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | * Authors: Rickard E. (Rik) Faith <faith@valinux.com> | |
28 | * Jeff Hartmann <jhartmann@valinux.com> | |
29 | * Keith Whitwell <keith@tungstengraphics.com> | |
30 | * | |
31 | */ | |
32 | ||
33 | #include "drmP.h" | |
34 | #include "drm.h" | |
35 | #include "i810_drm.h" | |
36 | #include "i810_drv.h" | |
37 | #include <linux/interrupt.h> /* For task queue support */ | |
38 | #include <linux/delay.h> | |
39 | #include <linux/pagemap.h> | |
40 | ||
41 | #define I810_BUF_FREE 2 | |
42 | #define I810_BUF_CLIENT 1 | |
43 | #define I810_BUF_HARDWARE 0 | |
44 | ||
45 | #define I810_BUF_UNMAPPED 0 | |
46 | #define I810_BUF_MAPPED 1 | |
47 | ||
056219e2 | 48 | static struct drm_buf *i810_freelist_get(struct drm_device * dev) |
1da177e4 | 49 | { |
cdd55a29 | 50 | struct drm_device_dma *dma = dev->dma; |
b5e89ed5 DA |
51 | int i; |
52 | int used; | |
1da177e4 LT |
53 | |
54 | /* Linear search might not be the best solution */ | |
55 | ||
b5e89ed5 | 56 | for (i = 0; i < dma->buf_count; i++) { |
056219e2 | 57 | struct drm_buf *buf = dma->buflist[i]; |
b5e89ed5 | 58 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
1da177e4 | 59 | /* In use is already a pointer */ |
b5e89ed5 | 60 | used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, |
1da177e4 LT |
61 | I810_BUF_CLIENT); |
62 | if (used == I810_BUF_FREE) { | |
63 | return buf; | |
64 | } | |
65 | } | |
b5e89ed5 | 66 | return NULL; |
1da177e4 LT |
67 | } |
68 | ||
69 | /* This should only be called if the buffer is not sent to the hardware | |
70 | * yet, the hardware updates in use for us once its on the ring buffer. | |
71 | */ | |
72 | ||
056219e2 | 73 | static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) |
1da177e4 | 74 | { |
b5e89ed5 DA |
75 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
76 | int used; | |
1da177e4 | 77 | |
b5e89ed5 DA |
78 | /* In use is already a pointer */ |
79 | used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); | |
1da177e4 | 80 | if (used != I810_BUF_CLIENT) { |
b5e89ed5 DA |
81 | DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); |
82 | return -EINVAL; | |
1da177e4 LT |
83 | } |
84 | ||
b5e89ed5 | 85 | return 0; |
1da177e4 LT |
86 | } |
87 | ||
c94f7029 | 88 | static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) |
1da177e4 | 89 | { |
eddca551 DA |
90 | struct drm_file *priv = filp->private_data; |
91 | struct drm_device *dev; | |
b5e89ed5 | 92 | drm_i810_private_t *dev_priv; |
056219e2 | 93 | struct drm_buf *buf; |
1da177e4 LT |
94 | drm_i810_buf_priv_t *buf_priv; |
95 | ||
96 | lock_kernel(); | |
b5e89ed5 | 97 | dev = priv->head->dev; |
1da177e4 | 98 | dev_priv = dev->dev_private; |
b5e89ed5 | 99 | buf = dev_priv->mmap_buffer; |
1da177e4 LT |
100 | buf_priv = buf->dev_private; |
101 | ||
102 | vma->vm_flags |= (VM_IO | VM_DONTCOPY); | |
103 | vma->vm_file = filp; | |
104 | ||
b5e89ed5 | 105 | buf_priv->currently_mapped = I810_BUF_MAPPED; |
1da177e4 LT |
106 | unlock_kernel(); |
107 | ||
108 | if (io_remap_pfn_range(vma, vma->vm_start, | |
3d77461e | 109 | vma->vm_pgoff, |
b5e89ed5 DA |
110 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) |
111 | return -EAGAIN; | |
1da177e4 LT |
112 | return 0; |
113 | } | |
114 | ||
2b8693c0 | 115 | static const struct file_operations i810_buffer_fops = { |
b5e89ed5 | 116 | .open = drm_open, |
c94f7029 | 117 | .release = drm_release, |
b5e89ed5 DA |
118 | .ioctl = drm_ioctl, |
119 | .mmap = i810_mmap_buffers, | |
120 | .fasync = drm_fasync, | |
c94f7029 DA |
121 | }; |
122 | ||
056219e2 | 123 | static int i810_map_buffer(struct drm_buf * buf, struct file *filp) |
1da177e4 | 124 | { |
eddca551 DA |
125 | struct drm_file *priv = filp->private_data; |
126 | struct drm_device *dev = priv->head->dev; | |
1da177e4 | 127 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
b5e89ed5 | 128 | drm_i810_private_t *dev_priv = dev->dev_private; |
99ac48f5 | 129 | const struct file_operations *old_fops; |
1da177e4 LT |
130 | int retcode = 0; |
131 | ||
b5e89ed5 | 132 | if (buf_priv->currently_mapped == I810_BUF_MAPPED) |
1da177e4 LT |
133 | return -EINVAL; |
134 | ||
b5e89ed5 | 135 | down_write(¤t->mm->mmap_sem); |
1da177e4 LT |
136 | old_fops = filp->f_op; |
137 | filp->f_op = &i810_buffer_fops; | |
138 | dev_priv->mmap_buffer = buf; | |
139 | buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, | |
b5e89ed5 DA |
140 | PROT_READ | PROT_WRITE, |
141 | MAP_SHARED, buf->bus_address); | |
1da177e4 LT |
142 | dev_priv->mmap_buffer = NULL; |
143 | filp->f_op = old_fops; | |
c7aed179 | 144 | if (IS_ERR(buf_priv->virtual)) { |
1da177e4 LT |
145 | /* Real error */ |
146 | DRM_ERROR("mmap error\n"); | |
c7aed179 | 147 | retcode = PTR_ERR(buf_priv->virtual); |
1da177e4 LT |
148 | buf_priv->virtual = NULL; |
149 | } | |
b5e89ed5 | 150 | up_write(¤t->mm->mmap_sem); |
1da177e4 LT |
151 | |
152 | return retcode; | |
153 | } | |
154 | ||
056219e2 | 155 | static int i810_unmap_buffer(struct drm_buf * buf) |
1da177e4 LT |
156 | { |
157 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; | |
158 | int retcode = 0; | |
159 | ||
160 | if (buf_priv->currently_mapped != I810_BUF_MAPPED) | |
161 | return -EINVAL; | |
162 | ||
163 | down_write(¤t->mm->mmap_sem); | |
164 | retcode = do_munmap(current->mm, | |
165 | (unsigned long)buf_priv->virtual, | |
166 | (size_t) buf->total); | |
167 | up_write(¤t->mm->mmap_sem); | |
168 | ||
b5e89ed5 DA |
169 | buf_priv->currently_mapped = I810_BUF_UNMAPPED; |
170 | buf_priv->virtual = NULL; | |
1da177e4 LT |
171 | |
172 | return retcode; | |
173 | } | |
174 | ||
eddca551 | 175 | static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, |
1da177e4 LT |
176 | struct file *filp) |
177 | { | |
056219e2 | 178 | struct drm_buf *buf; |
1da177e4 LT |
179 | drm_i810_buf_priv_t *buf_priv; |
180 | int retcode = 0; | |
181 | ||
182 | buf = i810_freelist_get(dev); | |
183 | if (!buf) { | |
184 | retcode = -ENOMEM; | |
b5e89ed5 | 185 | DRM_DEBUG("retcode=%d\n", retcode); |
1da177e4 LT |
186 | return retcode; |
187 | } | |
188 | ||
189 | retcode = i810_map_buffer(buf, filp); | |
190 | if (retcode) { | |
191 | i810_freelist_put(dev, buf); | |
b5e89ed5 | 192 | DRM_ERROR("mapbuf failed, retcode %d\n", retcode); |
1da177e4 LT |
193 | return retcode; |
194 | } | |
195 | buf->filp = filp; | |
196 | buf_priv = buf->dev_private; | |
197 | d->granted = 1; | |
b5e89ed5 DA |
198 | d->request_idx = buf->idx; |
199 | d->request_size = buf->total; | |
200 | d->virtual = buf_priv->virtual; | |
1da177e4 LT |
201 | |
202 | return retcode; | |
203 | } | |
204 | ||
eddca551 | 205 | static int i810_dma_cleanup(struct drm_device * dev) |
1da177e4 | 206 | { |
cdd55a29 | 207 | struct drm_device_dma *dma = dev->dma; |
1da177e4 LT |
208 | |
209 | /* Make sure interrupts are disabled here because the uninstall ioctl | |
210 | * may not have been called from userspace and after dev_private | |
211 | * is freed, it's too late. | |
212 | */ | |
213 | if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled) | |
214 | drm_irq_uninstall(dev); | |
215 | ||
216 | if (dev->dev_private) { | |
217 | int i; | |
b5e89ed5 DA |
218 | drm_i810_private_t *dev_priv = |
219 | (drm_i810_private_t *) dev->dev_private; | |
1da177e4 LT |
220 | |
221 | if (dev_priv->ring.virtual_start) { | |
b9094d3a | 222 | drm_core_ioremapfree(&dev_priv->ring.map, dev); |
1da177e4 | 223 | } |
b5e89ed5 DA |
224 | if (dev_priv->hw_status_page) { |
225 | pci_free_consistent(dev->pdev, PAGE_SIZE, | |
1da177e4 LT |
226 | dev_priv->hw_status_page, |
227 | dev_priv->dma_status_page); | |
b5e89ed5 DA |
228 | /* Need to rewrite hardware status page */ |
229 | I810_WRITE(0x02080, 0x1ffff000); | |
1da177e4 | 230 | } |
b5e89ed5 | 231 | drm_free(dev->dev_private, sizeof(drm_i810_private_t), |
1da177e4 | 232 | DRM_MEM_DRIVER); |
b5e89ed5 | 233 | dev->dev_private = NULL; |
1da177e4 LT |
234 | |
235 | for (i = 0; i < dma->buf_count; i++) { | |
056219e2 | 236 | struct drm_buf *buf = dma->buflist[i]; |
1da177e4 | 237 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
b9094d3a | 238 | |
b5e89ed5 | 239 | if (buf_priv->kernel_virtual && buf->total) |
b9094d3a | 240 | drm_core_ioremapfree(&buf_priv->map, dev); |
1da177e4 LT |
241 | } |
242 | } | |
b5e89ed5 | 243 | return 0; |
1da177e4 LT |
244 | } |
245 | ||
eddca551 | 246 | static int i810_wait_ring(struct drm_device * dev, int n) |
1da177e4 | 247 | { |
b5e89ed5 DA |
248 | drm_i810_private_t *dev_priv = dev->dev_private; |
249 | drm_i810_ring_buffer_t *ring = &(dev_priv->ring); | |
250 | int iters = 0; | |
251 | unsigned long end; | |
1da177e4 LT |
252 | unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; |
253 | ||
b5e89ed5 DA |
254 | end = jiffies + (HZ * 3); |
255 | while (ring->space < n) { | |
256 | ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; | |
257 | ring->space = ring->head - (ring->tail + 8); | |
258 | if (ring->space < 0) | |
259 | ring->space += ring->Size; | |
260 | ||
1da177e4 | 261 | if (ring->head != last_head) { |
b5e89ed5 | 262 | end = jiffies + (HZ * 3); |
1da177e4 LT |
263 | last_head = ring->head; |
264 | } | |
b5e89ed5 DA |
265 | |
266 | iters++; | |
1da177e4 | 267 | if (time_before(end, jiffies)) { |
b5e89ed5 DA |
268 | DRM_ERROR("space: %d wanted %d\n", ring->space, n); |
269 | DRM_ERROR("lockup\n"); | |
270 | goto out_wait_ring; | |
1da177e4 LT |
271 | } |
272 | udelay(1); | |
273 | } | |
274 | ||
b5e89ed5 DA |
275 | out_wait_ring: |
276 | return iters; | |
1da177e4 LT |
277 | } |
278 | ||
eddca551 | 279 | static void i810_kernel_lost_context(struct drm_device * dev) |
1da177e4 | 280 | { |
b5e89ed5 DA |
281 | drm_i810_private_t *dev_priv = dev->dev_private; |
282 | drm_i810_ring_buffer_t *ring = &(dev_priv->ring); | |
1da177e4 | 283 | |
b5e89ed5 DA |
284 | ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; |
285 | ring->tail = I810_READ(LP_RING + RING_TAIL); | |
286 | ring->space = ring->head - (ring->tail + 8); | |
287 | if (ring->space < 0) | |
288 | ring->space += ring->Size; | |
1da177e4 LT |
289 | } |
290 | ||
eddca551 | 291 | static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) |
1da177e4 | 292 | { |
cdd55a29 | 293 | struct drm_device_dma *dma = dev->dma; |
b5e89ed5 DA |
294 | int my_idx = 24; |
295 | u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); | |
296 | int i; | |
1da177e4 LT |
297 | |
298 | if (dma->buf_count > 1019) { | |
b5e89ed5 DA |
299 | /* Not enough space in the status page for the freelist */ |
300 | return -EINVAL; | |
1da177e4 LT |
301 | } |
302 | ||
b5e89ed5 | 303 | for (i = 0; i < dma->buf_count; i++) { |
056219e2 | 304 | struct drm_buf *buf = dma->buflist[i]; |
b5e89ed5 | 305 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
1da177e4 | 306 | |
b5e89ed5 DA |
307 | buf_priv->in_use = hw_status++; |
308 | buf_priv->my_use_idx = my_idx; | |
309 | my_idx += 4; | |
1da177e4 | 310 | |
b5e89ed5 | 311 | *buf_priv->in_use = I810_BUF_FREE; |
1da177e4 | 312 | |
b9094d3a DA |
313 | buf_priv->map.offset = buf->bus_address; |
314 | buf_priv->map.size = buf->total; | |
315 | buf_priv->map.type = _DRM_AGP; | |
316 | buf_priv->map.flags = 0; | |
317 | buf_priv->map.mtrr = 0; | |
318 | ||
319 | drm_core_ioremap(&buf_priv->map, dev); | |
320 | buf_priv->kernel_virtual = buf_priv->map.handle; | |
321 | ||
1da177e4 LT |
322 | } |
323 | return 0; | |
324 | } | |
325 | ||
eddca551 | 326 | static int i810_dma_initialize(struct drm_device * dev, |
b5e89ed5 DA |
327 | drm_i810_private_t * dev_priv, |
328 | drm_i810_init_t * init) | |
1da177e4 | 329 | { |
55910517 | 330 | struct drm_map_list *r_list; |
b5e89ed5 | 331 | memset(dev_priv, 0, sizeof(drm_i810_private_t)); |
1da177e4 | 332 | |
bd1b331f | 333 | list_for_each_entry(r_list, &dev->maplist, head) { |
1da177e4 LT |
334 | if (r_list->map && |
335 | r_list->map->type == _DRM_SHM && | |
b5e89ed5 | 336 | r_list->map->flags & _DRM_CONTAINS_LOCK) { |
1da177e4 | 337 | dev_priv->sarea_map = r_list->map; |
b5e89ed5 DA |
338 | break; |
339 | } | |
340 | } | |
1da177e4 LT |
341 | if (!dev_priv->sarea_map) { |
342 | dev->dev_private = (void *)dev_priv; | |
b5e89ed5 DA |
343 | i810_dma_cleanup(dev); |
344 | DRM_ERROR("can not find sarea!\n"); | |
345 | return -EINVAL; | |
1da177e4 LT |
346 | } |
347 | dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); | |
348 | if (!dev_priv->mmio_map) { | |
349 | dev->dev_private = (void *)dev_priv; | |
b5e89ed5 DA |
350 | i810_dma_cleanup(dev); |
351 | DRM_ERROR("can not find mmio map!\n"); | |
352 | return -EINVAL; | |
1da177e4 | 353 | } |
d1f2b55a | 354 | dev->agp_buffer_token = init->buffers_offset; |
1da177e4 LT |
355 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
356 | if (!dev->agp_buffer_map) { | |
357 | dev->dev_private = (void *)dev_priv; | |
b5e89ed5 DA |
358 | i810_dma_cleanup(dev); |
359 | DRM_ERROR("can not find dma buffer map!\n"); | |
360 | return -EINVAL; | |
1da177e4 LT |
361 | } |
362 | ||
363 | dev_priv->sarea_priv = (drm_i810_sarea_t *) | |
b5e89ed5 | 364 | ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset); |
1da177e4 | 365 | |
b5e89ed5 DA |
366 | dev_priv->ring.Start = init->ring_start; |
367 | dev_priv->ring.End = init->ring_end; | |
368 | dev_priv->ring.Size = init->ring_size; | |
1da177e4 | 369 | |
b9094d3a DA |
370 | dev_priv->ring.map.offset = dev->agp->base + init->ring_start; |
371 | dev_priv->ring.map.size = init->ring_size; | |
372 | dev_priv->ring.map.type = _DRM_AGP; | |
373 | dev_priv->ring.map.flags = 0; | |
374 | dev_priv->ring.map.mtrr = 0; | |
1da177e4 | 375 | |
b9094d3a DA |
376 | drm_core_ioremap(&dev_priv->ring.map, dev); |
377 | ||
378 | if (dev_priv->ring.map.handle == NULL) { | |
b5e89ed5 DA |
379 | dev->dev_private = (void *)dev_priv; |
380 | i810_dma_cleanup(dev); | |
381 | DRM_ERROR("can not ioremap virtual address for" | |
1da177e4 | 382 | " ring buffer\n"); |
20caafa6 | 383 | return -ENOMEM; |
1da177e4 LT |
384 | } |
385 | ||
b9094d3a DA |
386 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; |
387 | ||
b5e89ed5 | 388 | dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; |
1da177e4 LT |
389 | |
390 | dev_priv->w = init->w; | |
391 | dev_priv->h = init->h; | |
392 | dev_priv->pitch = init->pitch; | |
393 | dev_priv->back_offset = init->back_offset; | |
394 | dev_priv->depth_offset = init->depth_offset; | |
395 | dev_priv->front_offset = init->front_offset; | |
396 | ||
397 | dev_priv->overlay_offset = init->overlay_offset; | |
398 | dev_priv->overlay_physical = init->overlay_physical; | |
399 | ||
400 | dev_priv->front_di1 = init->front_offset | init->pitch_bits; | |
401 | dev_priv->back_di1 = init->back_offset | init->pitch_bits; | |
402 | dev_priv->zi1 = init->depth_offset | init->pitch_bits; | |
403 | ||
b5e89ed5 DA |
404 | /* Program Hardware Status Page */ |
405 | dev_priv->hw_status_page = | |
406 | pci_alloc_consistent(dev->pdev, PAGE_SIZE, | |
407 | &dev_priv->dma_status_page); | |
408 | if (!dev_priv->hw_status_page) { | |
1da177e4 LT |
409 | dev->dev_private = (void *)dev_priv; |
410 | i810_dma_cleanup(dev); | |
411 | DRM_ERROR("Can not allocate hardware status page\n"); | |
412 | return -ENOMEM; | |
413 | } | |
b5e89ed5 DA |
414 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
415 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); | |
1da177e4 LT |
416 | |
417 | I810_WRITE(0x02080, dev_priv->dma_status_page); | |
b5e89ed5 | 418 | DRM_DEBUG("Enabled hardware status page\n"); |
1da177e4 | 419 | |
b5e89ed5 | 420 | /* Now we need to init our freelist */ |
1da177e4 LT |
421 | if (i810_freelist_init(dev, dev_priv) != 0) { |
422 | dev->dev_private = (void *)dev_priv; | |
b5e89ed5 DA |
423 | i810_dma_cleanup(dev); |
424 | DRM_ERROR("Not enough space in the status page for" | |
1da177e4 | 425 | " the freelist\n"); |
b5e89ed5 | 426 | return -ENOMEM; |
1da177e4 LT |
427 | } |
428 | dev->dev_private = (void *)dev_priv; | |
429 | ||
b5e89ed5 | 430 | return 0; |
1da177e4 LT |
431 | } |
432 | ||
433 | /* i810 DRM version 1.1 used a smaller init structure with different | |
434 | * ordering of values than is currently used (drm >= 1.2). There is | |
435 | * no defined way to detect the XFree version to correct this problem, | |
436 | * however by checking using this procedure we can detect the correct | |
437 | * thing to do. | |
438 | * | |
439 | * #1 Read the Smaller init structure from user-space | |
440 | * #2 Verify the overlay_physical is a valid physical address, or NULL | |
441 | * If it isn't then we have a v1.1 client. Fix up params. | |
442 | * If it is, then we have a 1.2 client... get the rest of the data. | |
443 | */ | |
b5e89ed5 | 444 | static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) |
1da177e4 LT |
445 | { |
446 | ||
447 | /* Get v1.1 init data */ | |
b5e89ed5 DA |
448 | if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg, |
449 | sizeof(drm_i810_pre12_init_t))) { | |
1da177e4 LT |
450 | return -EFAULT; |
451 | } | |
452 | ||
453 | if ((!init->overlay_physical) || (init->overlay_physical > 4096)) { | |
454 | ||
455 | /* This is a v1.2 client, just get the v1.2 init data */ | |
456 | DRM_INFO("Using POST v1.2 init.\n"); | |
b5e89ed5 | 457 | if (copy_from_user(init, (drm_i810_init_t __user *) arg, |
1da177e4 LT |
458 | sizeof(drm_i810_init_t))) { |
459 | return -EFAULT; | |
460 | } | |
461 | } else { | |
462 | ||
463 | /* This is a v1.1 client, fix the params */ | |
464 | DRM_INFO("Using PRE v1.2 init.\n"); | |
b5e89ed5 DA |
465 | init->pitch_bits = init->h; |
466 | init->pitch = init->w; | |
467 | init->h = init->overlay_physical; | |
468 | init->w = init->overlay_offset; | |
469 | init->overlay_physical = 0; | |
470 | init->overlay_offset = 0; | |
1da177e4 LT |
471 | } |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
476 | static int i810_dma_init(struct inode *inode, struct file *filp, | |
b5e89ed5 | 477 | unsigned int cmd, unsigned long arg) |
1da177e4 | 478 | { |
eddca551 DA |
479 | struct drm_file *priv = filp->private_data; |
480 | struct drm_device *dev = priv->head->dev; | |
b5e89ed5 DA |
481 | drm_i810_private_t *dev_priv; |
482 | drm_i810_init_t init; | |
483 | int retcode = 0; | |
1da177e4 LT |
484 | |
485 | /* Get only the init func */ | |
b5e89ed5 DA |
486 | if (copy_from_user |
487 | (&init, (void __user *)arg, sizeof(drm_i810_init_func_t))) | |
1da177e4 LT |
488 | return -EFAULT; |
489 | ||
b5e89ed5 DA |
490 | switch (init.func) { |
491 | case I810_INIT_DMA: | |
492 | /* This case is for backward compatibility. It | |
493 | * handles XFree 4.1.0 and 4.2.0, and has to | |
494 | * do some parameter checking as described below. | |
495 | * It will someday go away. | |
496 | */ | |
497 | retcode = i810_dma_init_compat(&init, arg); | |
498 | if (retcode) | |
499 | return retcode; | |
500 | ||
501 | dev_priv = drm_alloc(sizeof(drm_i810_private_t), | |
502 | DRM_MEM_DRIVER); | |
503 | if (dev_priv == NULL) | |
504 | return -ENOMEM; | |
505 | retcode = i810_dma_initialize(dev, dev_priv, &init); | |
506 | break; | |
507 | ||
508 | default: | |
509 | case I810_INIT_DMA_1_4: | |
510 | DRM_INFO("Using v1.4 init.\n"); | |
511 | if (copy_from_user(&init, (drm_i810_init_t __user *) arg, | |
512 | sizeof(drm_i810_init_t))) { | |
513 | return -EFAULT; | |
514 | } | |
515 | dev_priv = drm_alloc(sizeof(drm_i810_private_t), | |
516 | DRM_MEM_DRIVER); | |
517 | if (dev_priv == NULL) | |
518 | return -ENOMEM; | |
519 | retcode = i810_dma_initialize(dev, dev_priv, &init); | |
520 | break; | |
521 | ||
522 | case I810_CLEANUP_DMA: | |
523 | DRM_INFO("DMA Cleanup\n"); | |
524 | retcode = i810_dma_cleanup(dev); | |
525 | break; | |
1da177e4 LT |
526 | } |
527 | ||
b5e89ed5 | 528 | return retcode; |
1da177e4 LT |
529 | } |
530 | ||
1da177e4 LT |
531 | /* Most efficient way to verify state for the i810 is as it is |
532 | * emitted. Non-conformant state is silently dropped. | |
533 | * | |
534 | * Use 'volatile' & local var tmp to force the emitted values to be | |
535 | * identical to the verified ones. | |
536 | */ | |
eddca551 | 537 | static void i810EmitContextVerified(struct drm_device * dev, |
b5e89ed5 | 538 | volatile unsigned int *code) |
1da177e4 | 539 | { |
b5e89ed5 | 540 | drm_i810_private_t *dev_priv = dev->dev_private; |
1da177e4 LT |
541 | int i, j = 0; |
542 | unsigned int tmp; | |
543 | RING_LOCALS; | |
544 | ||
b5e89ed5 | 545 | BEGIN_LP_RING(I810_CTX_SETUP_SIZE); |
1da177e4 | 546 | |
b5e89ed5 DA |
547 | OUT_RING(GFX_OP_COLOR_FACTOR); |
548 | OUT_RING(code[I810_CTXREG_CF1]); | |
1da177e4 | 549 | |
b5e89ed5 DA |
550 | OUT_RING(GFX_OP_STIPPLE); |
551 | OUT_RING(code[I810_CTXREG_ST1]); | |
1da177e4 | 552 | |
b5e89ed5 | 553 | for (i = 4; i < I810_CTX_SETUP_SIZE; i++) { |
1da177e4 LT |
554 | tmp = code[i]; |
555 | ||
b5e89ed5 DA |
556 | if ((tmp & (7 << 29)) == (3 << 29) && |
557 | (tmp & (0x1f << 24)) < (0x1d << 24)) { | |
558 | OUT_RING(tmp); | |
1da177e4 | 559 | j++; |
b5e89ed5 DA |
560 | } else |
561 | printk("constext state dropped!!!\n"); | |
1da177e4 LT |
562 | } |
563 | ||
564 | if (j & 1) | |
b5e89ed5 | 565 | OUT_RING(0); |
1da177e4 LT |
566 | |
567 | ADVANCE_LP_RING(); | |
568 | } | |
569 | ||
eddca551 | 570 | static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) |
1da177e4 | 571 | { |
b5e89ed5 | 572 | drm_i810_private_t *dev_priv = dev->dev_private; |
1da177e4 LT |
573 | int i, j = 0; |
574 | unsigned int tmp; | |
575 | RING_LOCALS; | |
576 | ||
b5e89ed5 | 577 | BEGIN_LP_RING(I810_TEX_SETUP_SIZE); |
1da177e4 | 578 | |
b5e89ed5 DA |
579 | OUT_RING(GFX_OP_MAP_INFO); |
580 | OUT_RING(code[I810_TEXREG_MI1]); | |
581 | OUT_RING(code[I810_TEXREG_MI2]); | |
582 | OUT_RING(code[I810_TEXREG_MI3]); | |
1da177e4 | 583 | |
b5e89ed5 | 584 | for (i = 4; i < I810_TEX_SETUP_SIZE; i++) { |
1da177e4 LT |
585 | tmp = code[i]; |
586 | ||
b5e89ed5 DA |
587 | if ((tmp & (7 << 29)) == (3 << 29) && |
588 | (tmp & (0x1f << 24)) < (0x1d << 24)) { | |
589 | OUT_RING(tmp); | |
1da177e4 | 590 | j++; |
b5e89ed5 DA |
591 | } else |
592 | printk("texture state dropped!!!\n"); | |
1da177e4 LT |
593 | } |
594 | ||
595 | if (j & 1) | |
b5e89ed5 | 596 | OUT_RING(0); |
1da177e4 LT |
597 | |
598 | ADVANCE_LP_RING(); | |
599 | } | |
600 | ||
1da177e4 LT |
601 | /* Need to do some additional checking when setting the dest buffer. |
602 | */ | |
eddca551 | 603 | static void i810EmitDestVerified(struct drm_device * dev, |
b5e89ed5 | 604 | volatile unsigned int *code) |
1da177e4 | 605 | { |
b5e89ed5 | 606 | drm_i810_private_t *dev_priv = dev->dev_private; |
1da177e4 LT |
607 | unsigned int tmp; |
608 | RING_LOCALS; | |
609 | ||
b5e89ed5 | 610 | BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); |
1da177e4 LT |
611 | |
612 | tmp = code[I810_DESTREG_DI1]; | |
613 | if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { | |
b5e89ed5 DA |
614 | OUT_RING(CMD_OP_DESTBUFFER_INFO); |
615 | OUT_RING(tmp); | |
1da177e4 | 616 | } else |
b5e89ed5 DA |
617 | DRM_DEBUG("bad di1 %x (allow %x or %x)\n", |
618 | tmp, dev_priv->front_di1, dev_priv->back_di1); | |
1da177e4 LT |
619 | |
620 | /* invarient: | |
621 | */ | |
b5e89ed5 DA |
622 | OUT_RING(CMD_OP_Z_BUFFER_INFO); |
623 | OUT_RING(dev_priv->zi1); | |
1da177e4 | 624 | |
b5e89ed5 DA |
625 | OUT_RING(GFX_OP_DESTBUFFER_VARS); |
626 | OUT_RING(code[I810_DESTREG_DV1]); | |
1da177e4 | 627 | |
b5e89ed5 DA |
628 | OUT_RING(GFX_OP_DRAWRECT_INFO); |
629 | OUT_RING(code[I810_DESTREG_DR1]); | |
630 | OUT_RING(code[I810_DESTREG_DR2]); | |
631 | OUT_RING(code[I810_DESTREG_DR3]); | |
632 | OUT_RING(code[I810_DESTREG_DR4]); | |
633 | OUT_RING(0); | |
1da177e4 LT |
634 | |
635 | ADVANCE_LP_RING(); | |
636 | } | |
637 | ||
eddca551 | 638 | static void i810EmitState(struct drm_device * dev) |
1da177e4 | 639 | { |
b5e89ed5 DA |
640 | drm_i810_private_t *dev_priv = dev->dev_private; |
641 | drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; | |
1da177e4 | 642 | unsigned int dirty = sarea_priv->dirty; |
b5e89ed5 | 643 | |
1da177e4 LT |
644 | DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); |
645 | ||
646 | if (dirty & I810_UPLOAD_BUFFERS) { | |
b5e89ed5 | 647 | i810EmitDestVerified(dev, sarea_priv->BufferState); |
1da177e4 LT |
648 | sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; |
649 | } | |
650 | ||
651 | if (dirty & I810_UPLOAD_CTX) { | |
b5e89ed5 | 652 | i810EmitContextVerified(dev, sarea_priv->ContextState); |
1da177e4 LT |
653 | sarea_priv->dirty &= ~I810_UPLOAD_CTX; |
654 | } | |
655 | ||
656 | if (dirty & I810_UPLOAD_TEX0) { | |
b5e89ed5 | 657 | i810EmitTexVerified(dev, sarea_priv->TexState[0]); |
1da177e4 LT |
658 | sarea_priv->dirty &= ~I810_UPLOAD_TEX0; |
659 | } | |
660 | ||
661 | if (dirty & I810_UPLOAD_TEX1) { | |
b5e89ed5 | 662 | i810EmitTexVerified(dev, sarea_priv->TexState[1]); |
1da177e4 LT |
663 | sarea_priv->dirty &= ~I810_UPLOAD_TEX1; |
664 | } | |
665 | } | |
666 | ||
1da177e4 LT |
667 | /* need to verify |
668 | */ | |
eddca551 | 669 | static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, |
b5e89ed5 DA |
670 | unsigned int clear_color, |
671 | unsigned int clear_zval) | |
1da177e4 | 672 | { |
b5e89ed5 DA |
673 | drm_i810_private_t *dev_priv = dev->dev_private; |
674 | drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; | |
1da177e4 | 675 | int nbox = sarea_priv->nbox; |
eddca551 | 676 | struct drm_clip_rect *pbox = sarea_priv->boxes; |
1da177e4 LT |
677 | int pitch = dev_priv->pitch; |
678 | int cpp = 2; | |
679 | int i; | |
680 | RING_LOCALS; | |
b5e89ed5 DA |
681 | |
682 | if (dev_priv->current_page == 1) { | |
683 | unsigned int tmp = flags; | |
684 | ||
1da177e4 | 685 | flags &= ~(I810_FRONT | I810_BACK); |
b5e89ed5 DA |
686 | if (tmp & I810_FRONT) |
687 | flags |= I810_BACK; | |
688 | if (tmp & I810_BACK) | |
689 | flags |= I810_FRONT; | |
1da177e4 LT |
690 | } |
691 | ||
b5e89ed5 | 692 | i810_kernel_lost_context(dev); |
1da177e4 | 693 | |
b5e89ed5 DA |
694 | if (nbox > I810_NR_SAREA_CLIPRECTS) |
695 | nbox = I810_NR_SAREA_CLIPRECTS; | |
1da177e4 | 696 | |
b5e89ed5 | 697 | for (i = 0; i < nbox; i++, pbox++) { |
1da177e4 LT |
698 | unsigned int x = pbox->x1; |
699 | unsigned int y = pbox->y1; | |
700 | unsigned int width = (pbox->x2 - x) * cpp; | |
701 | unsigned int height = pbox->y2 - y; | |
702 | unsigned int start = y * pitch + x * cpp; | |
703 | ||
704 | if (pbox->x1 > pbox->x2 || | |
705 | pbox->y1 > pbox->y2 || | |
b5e89ed5 | 706 | pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) |
1da177e4 LT |
707 | continue; |
708 | ||
b5e89ed5 DA |
709 | if (flags & I810_FRONT) { |
710 | BEGIN_LP_RING(6); | |
711 | OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); | |
712 | OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); | |
713 | OUT_RING((height << 16) | width); | |
714 | OUT_RING(start); | |
715 | OUT_RING(clear_color); | |
716 | OUT_RING(0); | |
1da177e4 LT |
717 | ADVANCE_LP_RING(); |
718 | } | |
719 | ||
b5e89ed5 DA |
720 | if (flags & I810_BACK) { |
721 | BEGIN_LP_RING(6); | |
722 | OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); | |
723 | OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); | |
724 | OUT_RING((height << 16) | width); | |
725 | OUT_RING(dev_priv->back_offset + start); | |
726 | OUT_RING(clear_color); | |
727 | OUT_RING(0); | |
1da177e4 LT |
728 | ADVANCE_LP_RING(); |
729 | } | |
730 | ||
b5e89ed5 DA |
731 | if (flags & I810_DEPTH) { |
732 | BEGIN_LP_RING(6); | |
733 | OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); | |
734 | OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); | |
735 | OUT_RING((height << 16) | width); | |
736 | OUT_RING(dev_priv->depth_offset + start); | |
737 | OUT_RING(clear_zval); | |
738 | OUT_RING(0); | |
1da177e4 LT |
739 | ADVANCE_LP_RING(); |
740 | } | |
741 | } | |
742 | } | |
743 | ||
eddca551 | 744 | static void i810_dma_dispatch_swap(struct drm_device * dev) |
1da177e4 | 745 | { |
b5e89ed5 DA |
746 | drm_i810_private_t *dev_priv = dev->dev_private; |
747 | drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; | |
1da177e4 | 748 | int nbox = sarea_priv->nbox; |
eddca551 | 749 | struct drm_clip_rect *pbox = sarea_priv->boxes; |
1da177e4 LT |
750 | int pitch = dev_priv->pitch; |
751 | int cpp = 2; | |
752 | int i; | |
753 | RING_LOCALS; | |
754 | ||
755 | DRM_DEBUG("swapbuffers\n"); | |
756 | ||
b5e89ed5 | 757 | i810_kernel_lost_context(dev); |
1da177e4 | 758 | |
b5e89ed5 DA |
759 | if (nbox > I810_NR_SAREA_CLIPRECTS) |
760 | nbox = I810_NR_SAREA_CLIPRECTS; | |
1da177e4 | 761 | |
b5e89ed5 | 762 | for (i = 0; i < nbox; i++, pbox++) { |
1da177e4 LT |
763 | unsigned int w = pbox->x2 - pbox->x1; |
764 | unsigned int h = pbox->y2 - pbox->y1; | |
b5e89ed5 | 765 | unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch; |
1da177e4 LT |
766 | unsigned int start = dst; |
767 | ||
768 | if (pbox->x1 > pbox->x2 || | |
769 | pbox->y1 > pbox->y2 || | |
b5e89ed5 | 770 | pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) |
1da177e4 LT |
771 | continue; |
772 | ||
b5e89ed5 DA |
773 | BEGIN_LP_RING(6); |
774 | OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4); | |
775 | OUT_RING(pitch | (0xCC << 16)); | |
776 | OUT_RING((h << 16) | (w * cpp)); | |
1da177e4 | 777 | if (dev_priv->current_page == 0) |
b5e89ed5 | 778 | OUT_RING(dev_priv->front_offset + start); |
1da177e4 | 779 | else |
b5e89ed5 DA |
780 | OUT_RING(dev_priv->back_offset + start); |
781 | OUT_RING(pitch); | |
1da177e4 | 782 | if (dev_priv->current_page == 0) |
b5e89ed5 | 783 | OUT_RING(dev_priv->back_offset + start); |
1da177e4 | 784 | else |
b5e89ed5 | 785 | OUT_RING(dev_priv->front_offset + start); |
1da177e4 LT |
786 | ADVANCE_LP_RING(); |
787 | } | |
788 | } | |
789 | ||
eddca551 | 790 | static void i810_dma_dispatch_vertex(struct drm_device * dev, |
056219e2 | 791 | struct drm_buf * buf, int discard, int used) |
1da177e4 | 792 | { |
b5e89ed5 | 793 | drm_i810_private_t *dev_priv = dev->dev_private; |
1da177e4 | 794 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
b5e89ed5 | 795 | drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; |
eddca551 | 796 | struct drm_clip_rect *box = sarea_priv->boxes; |
b5e89ed5 | 797 | int nbox = sarea_priv->nbox; |
1da177e4 LT |
798 | unsigned long address = (unsigned long)buf->bus_address; |
799 | unsigned long start = address - dev->agp->base; | |
800 | int i = 0; | |
b5e89ed5 | 801 | RING_LOCALS; |
1da177e4 | 802 | |
b5e89ed5 | 803 | i810_kernel_lost_context(dev); |
1da177e4 | 804 | |
b5e89ed5 | 805 | if (nbox > I810_NR_SAREA_CLIPRECTS) |
1da177e4 LT |
806 | nbox = I810_NR_SAREA_CLIPRECTS; |
807 | ||
b5e89ed5 | 808 | if (used > 4 * 1024) |
1da177e4 LT |
809 | used = 0; |
810 | ||
811 | if (sarea_priv->dirty) | |
b5e89ed5 | 812 | i810EmitState(dev); |
1da177e4 LT |
813 | |
814 | if (buf_priv->currently_mapped == I810_BUF_MAPPED) { | |
815 | unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); | |
816 | ||
b5e89ed5 DA |
817 | *(u32 *) buf_priv->kernel_virtual = |
818 | ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2))); | |
1da177e4 LT |
819 | |
820 | if (used & 4) { | |
c7aed179 | 821 | *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0; |
1da177e4 LT |
822 | used += 4; |
823 | } | |
824 | ||
825 | i810_unmap_buffer(buf); | |
826 | } | |
827 | ||
828 | if (used) { | |
829 | do { | |
830 | if (i < nbox) { | |
831 | BEGIN_LP_RING(4); | |
b5e89ed5 DA |
832 | OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR | |
833 | SC_ENABLE); | |
834 | OUT_RING(GFX_OP_SCISSOR_INFO); | |
835 | OUT_RING(box[i].x1 | (box[i].y1 << 16)); | |
836 | OUT_RING((box[i].x2 - | |
837 | 1) | ((box[i].y2 - 1) << 16)); | |
1da177e4 LT |
838 | ADVANCE_LP_RING(); |
839 | } | |
840 | ||
841 | BEGIN_LP_RING(4); | |
b5e89ed5 DA |
842 | OUT_RING(CMD_OP_BATCH_BUFFER); |
843 | OUT_RING(start | BB1_PROTECTED); | |
844 | OUT_RING(start + used - 4); | |
845 | OUT_RING(0); | |
1da177e4 LT |
846 | ADVANCE_LP_RING(); |
847 | ||
848 | } while (++i < nbox); | |
849 | } | |
850 | ||
851 | if (discard) { | |
852 | dev_priv->counter++; | |
853 | ||
b5e89ed5 DA |
854 | (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, |
855 | I810_BUF_HARDWARE); | |
1da177e4 LT |
856 | |
857 | BEGIN_LP_RING(8); | |
b5e89ed5 DA |
858 | OUT_RING(CMD_STORE_DWORD_IDX); |
859 | OUT_RING(20); | |
860 | OUT_RING(dev_priv->counter); | |
861 | OUT_RING(CMD_STORE_DWORD_IDX); | |
862 | OUT_RING(buf_priv->my_use_idx); | |
863 | OUT_RING(I810_BUF_FREE); | |
864 | OUT_RING(CMD_REPORT_HEAD); | |
865 | OUT_RING(0); | |
1da177e4 LT |
866 | ADVANCE_LP_RING(); |
867 | } | |
868 | } | |
869 | ||
eddca551 | 870 | static void i810_dma_dispatch_flip(struct drm_device * dev) |
1da177e4 | 871 | { |
b5e89ed5 | 872 | drm_i810_private_t *dev_priv = dev->dev_private; |
1da177e4 LT |
873 | int pitch = dev_priv->pitch; |
874 | RING_LOCALS; | |
875 | ||
b5e89ed5 DA |
876 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", |
877 | __FUNCTION__, | |
878 | dev_priv->current_page, | |
879 | dev_priv->sarea_priv->pf_current_page); | |
880 | ||
881 | i810_kernel_lost_context(dev); | |
1da177e4 | 882 | |
b5e89ed5 DA |
883 | BEGIN_LP_RING(2); |
884 | OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); | |
885 | OUT_RING(0); | |
1da177e4 LT |
886 | ADVANCE_LP_RING(); |
887 | ||
b5e89ed5 | 888 | BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); |
1da177e4 LT |
889 | /* On i815 at least ASYNC is buggy */ |
890 | /* pitch<<5 is from 11.2.8 p158, | |
891 | its the pitch / 8 then left shifted 8, | |
892 | so (pitch >> 3) << 8 */ | |
b5e89ed5 DA |
893 | OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ ); |
894 | if (dev_priv->current_page == 0) { | |
895 | OUT_RING(dev_priv->back_offset); | |
1da177e4 LT |
896 | dev_priv->current_page = 1; |
897 | } else { | |
b5e89ed5 | 898 | OUT_RING(dev_priv->front_offset); |
1da177e4 LT |
899 | dev_priv->current_page = 0; |
900 | } | |
901 | OUT_RING(0); | |
902 | ADVANCE_LP_RING(); | |
903 | ||
904 | BEGIN_LP_RING(2); | |
b5e89ed5 DA |
905 | OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP); |
906 | OUT_RING(0); | |
1da177e4 LT |
907 | ADVANCE_LP_RING(); |
908 | ||
909 | /* Increment the frame counter. The client-side 3D driver must | |
910 | * throttle the framerate by waiting for this value before | |
911 | * performing the swapbuffer ioctl. | |
912 | */ | |
913 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | |
914 | ||
915 | } | |
916 | ||
eddca551 | 917 | static void i810_dma_quiescent(struct drm_device * dev) |
1da177e4 | 918 | { |
b5e89ed5 DA |
919 | drm_i810_private_t *dev_priv = dev->dev_private; |
920 | RING_LOCALS; | |
1da177e4 LT |
921 | |
922 | /* printk("%s\n", __FUNCTION__); */ | |
923 | ||
b5e89ed5 | 924 | i810_kernel_lost_context(dev); |
1da177e4 | 925 | |
b5e89ed5 DA |
926 | BEGIN_LP_RING(4); |
927 | OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); | |
928 | OUT_RING(CMD_REPORT_HEAD); | |
929 | OUT_RING(0); | |
930 | OUT_RING(0); | |
931 | ADVANCE_LP_RING(); | |
1da177e4 | 932 | |
b5e89ed5 | 933 | i810_wait_ring(dev, dev_priv->ring.Size - 8); |
1da177e4 LT |
934 | } |
935 | ||
eddca551 | 936 | static int i810_flush_queue(struct drm_device * dev) |
1da177e4 | 937 | { |
b5e89ed5 | 938 | drm_i810_private_t *dev_priv = dev->dev_private; |
cdd55a29 | 939 | struct drm_device_dma *dma = dev->dma; |
b5e89ed5 DA |
940 | int i, ret = 0; |
941 | RING_LOCALS; | |
942 | ||
1da177e4 LT |
943 | /* printk("%s\n", __FUNCTION__); */ |
944 | ||
b5e89ed5 | 945 | i810_kernel_lost_context(dev); |
1da177e4 | 946 | |
b5e89ed5 DA |
947 | BEGIN_LP_RING(2); |
948 | OUT_RING(CMD_REPORT_HEAD); | |
949 | OUT_RING(0); | |
950 | ADVANCE_LP_RING(); | |
1da177e4 | 951 | |
b5e89ed5 | 952 | i810_wait_ring(dev, dev_priv->ring.Size - 8); |
1da177e4 | 953 | |
b5e89ed5 | 954 | for (i = 0; i < dma->buf_count; i++) { |
056219e2 | 955 | struct drm_buf *buf = dma->buflist[i]; |
b5e89ed5 | 956 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
1da177e4 LT |
957 | |
958 | int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, | |
959 | I810_BUF_FREE); | |
960 | ||
961 | if (used == I810_BUF_HARDWARE) | |
962 | DRM_DEBUG("reclaimed from HARDWARE\n"); | |
963 | if (used == I810_BUF_CLIENT) | |
964 | DRM_DEBUG("still on client\n"); | |
965 | } | |
966 | ||
b5e89ed5 | 967 | return ret; |
1da177e4 LT |
968 | } |
969 | ||
970 | /* Must be called with the lock held */ | |
eddca551 | 971 | static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp) |
1da177e4 | 972 | { |
cdd55a29 | 973 | struct drm_device_dma *dma = dev->dma; |
b5e89ed5 | 974 | int i; |
1da177e4 | 975 | |
b5e89ed5 DA |
976 | if (!dma) |
977 | return; | |
978 | if (!dev->dev_private) | |
979 | return; | |
980 | if (!dma->buflist) | |
981 | return; | |
1da177e4 | 982 | |
b5e89ed5 | 983 | i810_flush_queue(dev); |
1da177e4 LT |
984 | |
985 | for (i = 0; i < dma->buf_count; i++) { | |
056219e2 | 986 | struct drm_buf *buf = dma->buflist[i]; |
b5e89ed5 | 987 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; |
1da177e4 LT |
988 | |
989 | if (buf->filp == filp && buf_priv) { | |
990 | int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, | |
991 | I810_BUF_FREE); | |
992 | ||
993 | if (used == I810_BUF_CLIENT) | |
994 | DRM_DEBUG("reclaimed from client\n"); | |
995 | if (buf_priv->currently_mapped == I810_BUF_MAPPED) | |
b5e89ed5 | 996 | buf_priv->currently_mapped = I810_BUF_UNMAPPED; |
1da177e4 LT |
997 | } |
998 | } | |
999 | } | |
1000 | ||
c94f7029 DA |
1001 | static int i810_flush_ioctl(struct inode *inode, struct file *filp, |
1002 | unsigned int cmd, unsigned long arg) | |
1da177e4 | 1003 | { |
eddca551 DA |
1004 | struct drm_file *priv = filp->private_data; |
1005 | struct drm_device *dev = priv->head->dev; | |
1da177e4 LT |
1006 | |
1007 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1008 | ||
b5e89ed5 DA |
1009 | i810_flush_queue(dev); |
1010 | return 0; | |
1da177e4 LT |
1011 | } |
1012 | ||
1da177e4 | 1013 | static int i810_dma_vertex(struct inode *inode, struct file *filp, |
b5e89ed5 | 1014 | unsigned int cmd, unsigned long arg) |
1da177e4 | 1015 | { |
eddca551 DA |
1016 | struct drm_file *priv = filp->private_data; |
1017 | struct drm_device *dev = priv->head->dev; | |
cdd55a29 | 1018 | struct drm_device_dma *dma = dev->dma; |
b5e89ed5 DA |
1019 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; |
1020 | u32 *hw_status = dev_priv->hw_status_page; | |
1021 | drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) | |
1022 | dev_priv->sarea_priv; | |
1da177e4 LT |
1023 | drm_i810_vertex_t vertex; |
1024 | ||
b5e89ed5 DA |
1025 | if (copy_from_user |
1026 | (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) | |
1da177e4 LT |
1027 | return -EFAULT; |
1028 | ||
1029 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1030 | ||
1031 | DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", | |
1032 | vertex.idx, vertex.used, vertex.discard); | |
1033 | ||
b5e89ed5 | 1034 | if (vertex.idx < 0 || vertex.idx > dma->buf_count) |
1da177e4 LT |
1035 | return -EINVAL; |
1036 | ||
b5e89ed5 DA |
1037 | i810_dma_dispatch_vertex(dev, |
1038 | dma->buflist[vertex.idx], | |
1039 | vertex.discard, vertex.used); | |
1da177e4 | 1040 | |
b5e89ed5 | 1041 | atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); |
1da177e4 | 1042 | atomic_inc(&dev->counts[_DRM_STAT_DMA]); |
b5e89ed5 DA |
1043 | sarea_priv->last_enqueue = dev_priv->counter - 1; |
1044 | sarea_priv->last_dispatch = (int)hw_status[5]; | |
1da177e4 LT |
1045 | |
1046 | return 0; | |
1047 | } | |
1048 | ||
1da177e4 | 1049 | static int i810_clear_bufs(struct inode *inode, struct file *filp, |
b5e89ed5 | 1050 | unsigned int cmd, unsigned long arg) |
1da177e4 | 1051 | { |
eddca551 DA |
1052 | struct drm_file *priv = filp->private_data; |
1053 | struct drm_device *dev = priv->head->dev; | |
1da177e4 LT |
1054 | drm_i810_clear_t clear; |
1055 | ||
b5e89ed5 DA |
1056 | if (copy_from_user |
1057 | (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) | |
1da177e4 LT |
1058 | return -EFAULT; |
1059 | ||
1060 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1061 | ||
b5e89ed5 DA |
1062 | /* GH: Someone's doing nasty things... */ |
1063 | if (!dev->dev_private) { | |
1064 | return -EINVAL; | |
1065 | } | |
1da177e4 | 1066 | |
b5e89ed5 DA |
1067 | i810_dma_dispatch_clear(dev, clear.flags, |
1068 | clear.clear_color, clear.clear_depth); | |
1069 | return 0; | |
1da177e4 LT |
1070 | } |
1071 | ||
1072 | static int i810_swap_bufs(struct inode *inode, struct file *filp, | |
b5e89ed5 | 1073 | unsigned int cmd, unsigned long arg) |
1da177e4 | 1074 | { |
eddca551 DA |
1075 | struct drm_file *priv = filp->private_data; |
1076 | struct drm_device *dev = priv->head->dev; | |
1da177e4 LT |
1077 | |
1078 | DRM_DEBUG("i810_swap_bufs\n"); | |
1079 | ||
1080 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1081 | ||
b5e89ed5 DA |
1082 | i810_dma_dispatch_swap(dev); |
1083 | return 0; | |
1da177e4 LT |
1084 | } |
1085 | ||
1086 | static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, | |
b5e89ed5 | 1087 | unsigned long arg) |
1da177e4 | 1088 | { |
eddca551 DA |
1089 | struct drm_file *priv = filp->private_data; |
1090 | struct drm_device *dev = priv->head->dev; | |
b5e89ed5 DA |
1091 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; |
1092 | u32 *hw_status = dev_priv->hw_status_page; | |
1093 | drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) | |
1094 | dev_priv->sarea_priv; | |
1095 | ||
1096 | sarea_priv->last_dispatch = (int)hw_status[5]; | |
1da177e4 LT |
1097 | return 0; |
1098 | } | |
1099 | ||
1100 | static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, | |
b5e89ed5 | 1101 | unsigned long arg) |
1da177e4 | 1102 | { |
eddca551 DA |
1103 | struct drm_file *priv = filp->private_data; |
1104 | struct drm_device *dev = priv->head->dev; | |
b5e89ed5 DA |
1105 | int retcode = 0; |
1106 | drm_i810_dma_t d; | |
1107 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; | |
1108 | u32 *hw_status = dev_priv->hw_status_page; | |
1109 | drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) | |
1110 | dev_priv->sarea_priv; | |
1111 | ||
1112 | if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) | |
1da177e4 LT |
1113 | return -EFAULT; |
1114 | ||
1115 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1116 | ||
1117 | d.granted = 0; | |
1118 | ||
1119 | retcode = i810_dma_get_buffer(dev, &d, filp); | |
1120 | ||
1121 | DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", | |
1122 | current->pid, retcode, d.granted); | |
1123 | ||
eddca551 | 1124 | if (copy_to_user((void __user *) arg, &d, sizeof(d))) |
1da177e4 | 1125 | return -EFAULT; |
b5e89ed5 | 1126 | sarea_priv->last_dispatch = (int)hw_status[5]; |
1da177e4 LT |
1127 | |
1128 | return retcode; | |
1129 | } | |
1130 | ||
1131 | static int i810_copybuf(struct inode *inode, | |
b5e89ed5 | 1132 | struct file *filp, unsigned int cmd, unsigned long arg) |
1da177e4 LT |
1133 | { |
1134 | /* Never copy - 2.4.x doesn't need it */ | |
1135 | return 0; | |
1136 | } | |
1137 | ||
1138 | static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, | |
b5e89ed5 | 1139 | unsigned long arg) |
1da177e4 LT |
1140 | { |
1141 | /* Never copy - 2.4.x doesn't need it */ | |
1142 | return 0; | |
1143 | } | |
1144 | ||
056219e2 | 1145 | static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, |
b5e89ed5 | 1146 | unsigned int last_render) |
1da177e4 LT |
1147 | { |
1148 | drm_i810_private_t *dev_priv = dev->dev_private; | |
1149 | drm_i810_buf_priv_t *buf_priv = buf->dev_private; | |
1150 | drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; | |
1151 | unsigned long address = (unsigned long)buf->bus_address; | |
1152 | unsigned long start = address - dev->agp->base; | |
1153 | int u; | |
1154 | RING_LOCALS; | |
1155 | ||
1156 | i810_kernel_lost_context(dev); | |
1157 | ||
b5e89ed5 | 1158 | u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE); |
1da177e4 LT |
1159 | if (u != I810_BUF_CLIENT) { |
1160 | DRM_DEBUG("MC found buffer that isn't mine!\n"); | |
1161 | } | |
1162 | ||
b5e89ed5 | 1163 | if (used > 4 * 1024) |
1da177e4 LT |
1164 | used = 0; |
1165 | ||
1166 | sarea_priv->dirty = 0x7f; | |
1167 | ||
b5e89ed5 | 1168 | DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used); |
1da177e4 LT |
1169 | |
1170 | dev_priv->counter++; | |
1171 | DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); | |
1172 | DRM_DEBUG("i810_dma_dispatch_mc\n"); | |
1173 | DRM_DEBUG("start : %lx\n", start); | |
1174 | DRM_DEBUG("used : %d\n", used); | |
1175 | DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); | |
1176 | ||
1177 | if (buf_priv->currently_mapped == I810_BUF_MAPPED) { | |
1178 | if (used & 4) { | |
c7aed179 | 1179 | *(u32 *) ((char *) buf_priv->virtual + used) = 0; |
1da177e4 LT |
1180 | used += 4; |
1181 | } | |
1182 | ||
1183 | i810_unmap_buffer(buf); | |
1184 | } | |
1185 | BEGIN_LP_RING(4); | |
b5e89ed5 DA |
1186 | OUT_RING(CMD_OP_BATCH_BUFFER); |
1187 | OUT_RING(start | BB1_PROTECTED); | |
1188 | OUT_RING(start + used - 4); | |
1189 | OUT_RING(0); | |
1da177e4 LT |
1190 | ADVANCE_LP_RING(); |
1191 | ||
1da177e4 | 1192 | BEGIN_LP_RING(8); |
b5e89ed5 DA |
1193 | OUT_RING(CMD_STORE_DWORD_IDX); |
1194 | OUT_RING(buf_priv->my_use_idx); | |
1195 | OUT_RING(I810_BUF_FREE); | |
1196 | OUT_RING(0); | |
1197 | ||
1198 | OUT_RING(CMD_STORE_DWORD_IDX); | |
1199 | OUT_RING(16); | |
1200 | OUT_RING(last_render); | |
1201 | OUT_RING(0); | |
1da177e4 LT |
1202 | ADVANCE_LP_RING(); |
1203 | } | |
1204 | ||
1205 | static int i810_dma_mc(struct inode *inode, struct file *filp, | |
b5e89ed5 | 1206 | unsigned int cmd, unsigned long arg) |
1da177e4 | 1207 | { |
eddca551 DA |
1208 | struct drm_file *priv = filp->private_data; |
1209 | struct drm_device *dev = priv->head->dev; | |
cdd55a29 | 1210 | struct drm_device_dma *dma = dev->dma; |
b5e89ed5 | 1211 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; |
1da177e4 LT |
1212 | u32 *hw_status = dev_priv->hw_status_page; |
1213 | drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) | |
b5e89ed5 | 1214 | dev_priv->sarea_priv; |
1da177e4 LT |
1215 | drm_i810_mc_t mc; |
1216 | ||
b5e89ed5 | 1217 | if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) |
1da177e4 LT |
1218 | return -EFAULT; |
1219 | ||
1220 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1221 | ||
1222 | if (mc.idx >= dma->buf_count || mc.idx < 0) | |
1223 | return -EINVAL; | |
1224 | ||
1225 | i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, | |
b5e89ed5 | 1226 | mc.last_render); |
1da177e4 LT |
1227 | |
1228 | atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); | |
1229 | atomic_inc(&dev->counts[_DRM_STAT_DMA]); | |
b5e89ed5 DA |
1230 | sarea_priv->last_enqueue = dev_priv->counter - 1; |
1231 | sarea_priv->last_dispatch = (int)hw_status[5]; | |
1da177e4 LT |
1232 | |
1233 | return 0; | |
1234 | } | |
1235 | ||
1236 | static int i810_rstatus(struct inode *inode, struct file *filp, | |
1237 | unsigned int cmd, unsigned long arg) | |
1238 | { | |
eddca551 DA |
1239 | struct drm_file *priv = filp->private_data; |
1240 | struct drm_device *dev = priv->head->dev; | |
b5e89ed5 | 1241 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; |
1da177e4 | 1242 | |
b5e89ed5 | 1243 | return (int)(((u32 *) (dev_priv->hw_status_page))[4]); |
1da177e4 LT |
1244 | } |
1245 | ||
1246 | static int i810_ov0_info(struct inode *inode, struct file *filp, | |
b5e89ed5 | 1247 | unsigned int cmd, unsigned long arg) |
1da177e4 | 1248 | { |
eddca551 DA |
1249 | struct drm_file *priv = filp->private_data; |
1250 | struct drm_device *dev = priv->head->dev; | |
b5e89ed5 | 1251 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; |
1da177e4 LT |
1252 | drm_i810_overlay_t data; |
1253 | ||
1254 | data.offset = dev_priv->overlay_offset; | |
1255 | data.physical = dev_priv->overlay_physical; | |
b5e89ed5 DA |
1256 | if (copy_to_user |
1257 | ((drm_i810_overlay_t __user *) arg, &data, sizeof(data))) | |
1da177e4 LT |
1258 | return -EFAULT; |
1259 | return 0; | |
1260 | } | |
1261 | ||
1262 | static int i810_fstatus(struct inode *inode, struct file *filp, | |
1263 | unsigned int cmd, unsigned long arg) | |
1264 | { | |
eddca551 DA |
1265 | struct drm_file *priv = filp->private_data; |
1266 | struct drm_device *dev = priv->head->dev; | |
b5e89ed5 | 1267 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; |
1da177e4 LT |
1268 | |
1269 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1270 | ||
1271 | return I810_READ(0x30008); | |
1272 | } | |
1273 | ||
1274 | static int i810_ov0_flip(struct inode *inode, struct file *filp, | |
b5e89ed5 | 1275 | unsigned int cmd, unsigned long arg) |
1da177e4 | 1276 | { |
eddca551 DA |
1277 | struct drm_file *priv = filp->private_data; |
1278 | struct drm_device *dev = priv->head->dev; | |
b5e89ed5 | 1279 | drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; |
1da177e4 LT |
1280 | |
1281 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1282 | ||
1283 | //Tell the overlay to update | |
b5e89ed5 | 1284 | I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); |
1da177e4 LT |
1285 | |
1286 | return 0; | |
1287 | } | |
1288 | ||
1da177e4 | 1289 | /* Not sure why this isn't set all the time: |
b5e89ed5 | 1290 | */ |
eddca551 | 1291 | static void i810_do_init_pageflip(struct drm_device * dev) |
1da177e4 LT |
1292 | { |
1293 | drm_i810_private_t *dev_priv = dev->dev_private; | |
b5e89ed5 | 1294 | |
1da177e4 LT |
1295 | DRM_DEBUG("%s\n", __FUNCTION__); |
1296 | dev_priv->page_flipping = 1; | |
1297 | dev_priv->current_page = 0; | |
1298 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | |
1299 | } | |
1300 | ||
eddca551 | 1301 | static int i810_do_cleanup_pageflip(struct drm_device * dev) |
1da177e4 LT |
1302 | { |
1303 | drm_i810_private_t *dev_priv = dev->dev_private; | |
1304 | ||
1305 | DRM_DEBUG("%s\n", __FUNCTION__); | |
1306 | if (dev_priv->current_page != 0) | |
b5e89ed5 | 1307 | i810_dma_dispatch_flip(dev); |
1da177e4 LT |
1308 | |
1309 | dev_priv->page_flipping = 0; | |
1310 | return 0; | |
1311 | } | |
1312 | ||
1313 | static int i810_flip_bufs(struct inode *inode, struct file *filp, | |
b5e89ed5 | 1314 | unsigned int cmd, unsigned long arg) |
1da177e4 | 1315 | { |
eddca551 DA |
1316 | struct drm_file *priv = filp->private_data; |
1317 | struct drm_device *dev = priv->head->dev; | |
1da177e4 LT |
1318 | drm_i810_private_t *dev_priv = dev->dev_private; |
1319 | ||
1320 | DRM_DEBUG("%s\n", __FUNCTION__); | |
1321 | ||
1322 | LOCK_TEST_WITH_RETURN(dev, filp); | |
1323 | ||
b5e89ed5 DA |
1324 | if (!dev_priv->page_flipping) |
1325 | i810_do_init_pageflip(dev); | |
1da177e4 | 1326 | |
b5e89ed5 DA |
1327 | i810_dma_dispatch_flip(dev); |
1328 | return 0; | |
1da177e4 LT |
1329 | } |
1330 | ||
eddca551 | 1331 | int i810_driver_load(struct drm_device *dev, unsigned long flags) |
22eae947 DA |
1332 | { |
1333 | /* i810 has 4 more counters */ | |
1334 | dev->counters += 4; | |
1335 | dev->types[6] = _DRM_STAT_IRQ; | |
1336 | dev->types[7] = _DRM_STAT_PRIMARY; | |
1337 | dev->types[8] = _DRM_STAT_SECONDARY; | |
1338 | dev->types[9] = _DRM_STAT_DMA; | |
1339 | ||
1340 | return 0; | |
1341 | } | |
1342 | ||
eddca551 | 1343 | void i810_driver_lastclose(struct drm_device * dev) |
1da177e4 | 1344 | { |
b5e89ed5 | 1345 | i810_dma_cleanup(dev); |
1da177e4 LT |
1346 | } |
1347 | ||
eddca551 | 1348 | void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) |
1da177e4 LT |
1349 | { |
1350 | if (dev->dev_private) { | |
1351 | drm_i810_private_t *dev_priv = dev->dev_private; | |
1352 | if (dev_priv->page_flipping) { | |
1353 | i810_do_cleanup_pageflip(dev); | |
1354 | } | |
1355 | } | |
1356 | } | |
1357 | ||
eddca551 | 1358 | void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) |
1da177e4 LT |
1359 | { |
1360 | i810_reclaim_buffers(dev, filp); | |
1361 | } | |
1362 | ||
eddca551 | 1363 | int i810_driver_dma_quiescent(struct drm_device * dev) |
1da177e4 | 1364 | { |
b5e89ed5 | 1365 | i810_dma_quiescent(dev); |
1da177e4 LT |
1366 | return 0; |
1367 | } | |
1368 | ||
1369 | drm_ioctl_desc_t i810_ioctls[] = { | |
a7a2cc31 DA |
1370 | [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, |
1371 | [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, | |
1372 | [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, | |
1373 | [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, | |
1374 | [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, | |
1375 | [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, | |
1376 | [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, | |
1377 | [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, | |
1378 | [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, | |
1379 | [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, | |
1380 | [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, | |
1381 | [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, | |
1382 | [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, | |
1383 | [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, | |
1384 | [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} | |
1da177e4 LT |
1385 | }; |
1386 | ||
1387 | int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); | |
cda17380 DA |
1388 | |
1389 | /** | |
1390 | * Determine if the device really is AGP or not. | |
1391 | * | |
1392 | * All Intel graphics chipsets are treated as AGP, even if they are really | |
1393 | * PCI-e. | |
1394 | * | |
1395 | * \param dev The device to be tested. | |
1396 | * | |
1397 | * \returns | |
1398 | * A value of 1 is always retured to indictate every i810 is AGP. | |
1399 | */ | |
eddca551 | 1400 | int i810_driver_device_is_agp(struct drm_device * dev) |
cda17380 DA |
1401 | { |
1402 | return 1; | |
1403 | } |