proc: fix PG_locked reporting in /proc/kpageflags
[linux-2.6-block.git] / include / drm / drmP.h
CommitLineData
1da177e4 1/**
b5e89ed5 2 * \file drmP.h
1da177e4 3 * Private header for Direct Rendering Manager
b5e89ed5 4 *
1da177e4
LT
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All rights reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#ifndef _DRM_P_H_
35#define _DRM_P_H_
36
37/* If you want the memory alloc debug functionality, change define below */
38/* #define DEBUG_MEMORY */
39
40#ifdef __KERNEL__
41#ifdef __alpha__
42/* add include of current.h so that "current" is defined
43 * before static inline funcs in wait.h. Doing this so we
44 * can build the DRM (part of PI DRI). 4/21/2000 S + B */
45#include <asm/current.h>
b5e89ed5 46#endif /* __alpha__ */
1da177e4
LT
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/miscdevice.h>
50#include <linux/fs.h>
51#include <linux/proc_fs.h>
52#include <linux/init.h>
53#include <linux/file.h>
54#include <linux/pci.h>
1da177e4
LT
55#include <linux/jiffies.h>
56#include <linux/smp_lock.h> /* For (un)lock_kernel */
b05c2385 57#include <linux/dma-mapping.h>
1da177e4
LT
58#include <linux/mm.h>
59#include <linux/cdev.h>
30e2fb18 60#include <linux/mutex.h>
1da177e4 61#if defined(__alpha__) || defined(__powerpc__)
b5e89ed5 62#include <asm/pgtable.h> /* For pte_wrprotect */
1da177e4
LT
63#endif
64#include <asm/io.h>
65#include <asm/mman.h>
66#include <asm/uaccess.h>
67#ifdef CONFIG_MTRR
68#include <asm/mtrr.h>
69#endif
70#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
71#include <linux/types.h>
72#include <linux/agp_backend.h>
73#endif
74#include <linux/workqueue.h>
75#include <linux/poll.h>
76#include <asm/pgalloc.h>
77#include "drm.h"
78
62968144
DA
79#include <linux/idr.h>
80
1da177e4
LT
81#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
82#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
83
c153f45f
EA
84struct drm_file;
85struct drm_device;
86
1da177e4 87#include "drm_os_linux.h"
3a1bd924 88#include "drm_hashtab.h"
1da177e4
LT
89
90/***********************************************************************/
91/** \name DRM template customization defaults */
92/*@{*/
93
94/* driver capabilities and requirements mask */
95#define DRIVER_USE_AGP 0x1
96#define DRIVER_REQUIRE_AGP 0x2
97#define DRIVER_USE_MTRR 0x4
98#define DRIVER_PCI_DMA 0x8
99#define DRIVER_SG 0x10
100#define DRIVER_HAVE_DMA 0x20
101#define DRIVER_HAVE_IRQ 0x40
102#define DRIVER_IRQ_SHARED 0x80
af6061af 103#define DRIVER_IRQ_VBL 0x100
1da177e4 104#define DRIVER_DMA_QUEUE 0x200
b84397d6 105#define DRIVER_FB_DMA 0x400
af6061af 106#define DRIVER_IRQ_VBL2 0x800
673a394b 107#define DRIVER_GEM 0x1000
f453ba04 108#define DRIVER_MODESET 0x2000
1da177e4
LT
109
110/***********************************************************************/
111/** \name Begin the DRM... */
112/*@{*/
113
114#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
115 also include looping detection. */
116
8669cbc5 117#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
1da177e4
LT
118#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
119#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
120#define DRM_LOOPING_LIMIT 5000000
1da177e4
LT
121#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
122#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
123
124#define DRM_FLAG_DEBUG 0x01
125
126#define DRM_MEM_DMA 0
127#define DRM_MEM_SAREA 1
128#define DRM_MEM_DRIVER 2
129#define DRM_MEM_MAGIC 3
130#define DRM_MEM_IOCTLS 4
131#define DRM_MEM_MAPS 5
132#define DRM_MEM_VMAS 6
133#define DRM_MEM_BUFS 7
134#define DRM_MEM_SEGS 8
135#define DRM_MEM_PAGES 9
136#define DRM_MEM_FILES 10
137#define DRM_MEM_QUEUES 11
138#define DRM_MEM_CMDS 12
139#define DRM_MEM_MAPPINGS 13
140#define DRM_MEM_BUFLISTS 14
141#define DRM_MEM_AGPLISTS 15
142#define DRM_MEM_TOTALAGP 16
143#define DRM_MEM_BOUNDAGP 17
144#define DRM_MEM_CTXBITMAP 18
145#define DRM_MEM_STUB 19
146#define DRM_MEM_SGLISTS 20
3a1bd924
TH
147#define DRM_MEM_CTXLIST 21
148#define DRM_MEM_MM 22
149#define DRM_MEM_HASHTAB 23
1da177e4
LT
150
151#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
8d153f71 152#define DRM_MAP_HASH_OFFSET 0x10000000
1da177e4 153
b5e89ed5 154/*@}*/
1da177e4 155
1da177e4
LT
156/***********************************************************************/
157/** \name Macros to make printk easier */
158/*@{*/
159
160/**
161 * Error output.
162 *
163 * \param fmt printf() like format string.
164 * \param arg arguments
165 */
166#define DRM_ERROR(fmt, arg...) \
bf9d8929 167 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
1da177e4
LT
168
169/**
170 * Memory error output.
171 *
172 * \param area memory area where the error occurred.
173 * \param fmt printf() like format string.
174 * \param arg arguments
175 */
176#define DRM_MEM_ERROR(area, fmt, arg...) \
bf9d8929 177 printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
1da177e4
LT
178 drm_mem_stats[area].name , ##arg)
179
180#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
181
182/**
183 * Debug output.
b5e89ed5 184 *
1da177e4
LT
185 * \param fmt printf() like format string.
186 * \param arg arguments
187 */
188#if DRM_DEBUG_CODE
189#define DRM_DEBUG(fmt, arg...) \
190 do { \
191 if ( drm_debug ) \
192 printk(KERN_DEBUG \
193 "[" DRM_NAME ":%s] " fmt , \
bf9d8929 194 __func__ , ##arg); \
1da177e4
LT
195 } while (0)
196#else
197#define DRM_DEBUG(fmt, arg...) do { } while (0)
198#endif
199
200#define DRM_PROC_LIMIT (PAGE_SIZE-80)
201
202#define DRM_PROC_PRINT(fmt, arg...) \
203 len += sprintf(&buf[len], fmt , ##arg); \
204 if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
205
206#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
207 len += sprintf(&buf[len], fmt , ##arg); \
208 if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
209
210/*@}*/
211
1da177e4
LT
212/***********************************************************************/
213/** \name Internal types and structures */
214/*@{*/
215
99a2657a 216#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
1da177e4
LT
217
218#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
219#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
220#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
221
222#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
223/**
224 * Get the private SAREA mapping.
225 *
226 * \param _dev DRM device.
227 * \param _ctx context number.
228 * \param _map output mapping.
229 */
230#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
231 (_map) = (_dev)->context_sareas[_ctx]; \
232} while(0)
233
234/**
235 * Test that the hardware lock is held by the caller, returning otherwise.
236 *
237 * \param dev DRM device.
238 * \param filp file pointer of the caller.
239 */
c153f45f 240#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
1da177e4 241do { \
7c1c2871
DA
242 if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock) || \
243 file_priv->master->lock.file_priv != file_priv) { \
c153f45f 244 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
7c1c2871
DA
245 __func__, _DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock),\
246 file_priv->master->lock.file_priv, file_priv); \
1da177e4
LT
247 return -EINVAL; \
248 } \
249} while (0)
250
251/**
252 * Copy and IOCTL return string to user space
253 */
254#define DRM_COPY( name, value ) \
255 len = strlen( value ); \
256 if ( len > name##_len ) len = name##_len; \
257 name##_len = strlen( value ); \
258 if ( len && name ) { \
259 if ( copy_to_user( name, value, len ) ) \
260 return -EFAULT; \
261 }
b5e89ed5 262
1da177e4
LT
263/**
264 * Ioctl function type.
265 *
266 * \param inode device inode.
6c340eac 267 * \param file_priv DRM file private pointer.
1da177e4
LT
268 * \param cmd command.
269 * \param arg argument.
270 */
c153f45f
EA
271typedef int drm_ioctl_t(struct drm_device *dev, void *data,
272 struct drm_file *file_priv);
1da177e4 273
9a186645
DA
274typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
275 unsigned long arg);
276
a7a2cc31
DA
277#define DRM_AUTH 0x1
278#define DRM_MASTER 0x2
279#define DRM_ROOT_ONLY 0x4
f453ba04 280#define DRM_CONTROL_ALLOW 0x8
a7a2cc31 281
c153f45f
EA
282struct drm_ioctl_desc {
283 unsigned int cmd;
b5e89ed5 284 drm_ioctl_t *func;
a7a2cc31 285 int flags;
c153f45f
EA
286};
287
288/**
289 * Creates a driver or general drm_ioctl_desc array entry for the given
290 * ioctl, for use by drm_ioctl().
291 */
292#define DRM_IOCTL_DEF(ioctl, func, flags) \
293 [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
1da177e4 294
8fc2fdf4 295struct drm_magic_entry {
8669cbc5 296 struct list_head head;
e0be428e 297 struct drm_hash_item hash_item;
b5e89ed5 298 struct drm_file *priv;
8fc2fdf4 299};
1da177e4 300
8fc2fdf4 301struct drm_vma_entry {
bd1b331f 302 struct list_head head;
1da177e4 303 struct vm_area_struct *vma;
b5e89ed5 304 pid_t pid;
8fc2fdf4 305};
1da177e4
LT
306
307/**
308 * DMA buffer.
309 */
056219e2 310struct drm_buf {
b5e89ed5
DA
311 int idx; /**< Index into master buflist */
312 int total; /**< Buffer size */
313 int order; /**< log-base-2(total) */
314 int used; /**< Amount of buffer in use (for DMA) */
315 unsigned long offset; /**< Byte offset (used internally) */
316 void *address; /**< Address of buffer */
317 unsigned long bus_address; /**< Bus address of buffer */
318 struct drm_buf *next; /**< Kernel-only: used for free list */
319 __volatile__ int waiting; /**< On kernel DMA queue */
320 __volatile__ int pending; /**< On hardware DMA queue */
1da177e4 321 wait_queue_head_t dma_wait; /**< Processes waiting */
6c340eac 322 struct drm_file *file_priv; /**< Private of holding file descr */
b5e89ed5
DA
323 int context; /**< Kernel queue for this buffer */
324 int while_locked; /**< Dispatch this buffer while locked */
1da177e4 325 enum {
b5e89ed5
DA
326 DRM_LIST_NONE = 0,
327 DRM_LIST_FREE = 1,
328 DRM_LIST_WAIT = 2,
329 DRM_LIST_PEND = 3,
330 DRM_LIST_PRIO = 4,
1da177e4 331 DRM_LIST_RECLAIM = 5
b5e89ed5 332 } list; /**< Which list we're on */
1da177e4 333
b5e89ed5
DA
334 int dev_priv_size; /**< Size of buffer private storage */
335 void *dev_private; /**< Per-buffer private storage */
056219e2 336};
1da177e4 337
1da177e4 338/** bufs is one longer than it has to be */
cdd55a29 339struct drm_waitlist {
b5e89ed5 340 int count; /**< Number of possible buffers */
056219e2
DA
341 struct drm_buf **bufs; /**< List of pointers to buffers */
342 struct drm_buf **rp; /**< Read pointer */
343 struct drm_buf **wp; /**< Write pointer */
344 struct drm_buf **end; /**< End pointer */
b5e89ed5
DA
345 spinlock_t read_lock;
346 spinlock_t write_lock;
cdd55a29 347};
1da177e4 348
cdd55a29 349struct drm_freelist {
b5e89ed5
DA
350 int initialized; /**< Freelist in use */
351 atomic_t count; /**< Number of free buffers */
056219e2 352 struct drm_buf *next; /**< End pointer */
1da177e4
LT
353
354 wait_queue_head_t waiting; /**< Processes waiting on free bufs */
b5e89ed5
DA
355 int low_mark; /**< Low water mark */
356 int high_mark; /**< High water mark */
357 atomic_t wfh; /**< If waiting for high mark */
358 spinlock_t lock;
cdd55a29 359};
1da177e4 360
ddf19b97
DA
361typedef struct drm_dma_handle {
362 dma_addr_t busaddr;
363 void *vaddr;
364 size_t size;
365} drm_dma_handle_t;
366
1da177e4
LT
367/**
368 * Buffer entry. There is one of this for each buffer size order.
369 */
cdd55a29 370struct drm_buf_entry {
b5e89ed5
DA
371 int buf_size; /**< size */
372 int buf_count; /**< number of buffers */
056219e2 373 struct drm_buf *buflist; /**< buffer list */
b5e89ed5
DA
374 int seg_count;
375 int page_order;
cdd55a29 376 struct drm_dma_handle **seglist;
b5e89ed5 377
cdd55a29
DA
378 struct drm_freelist freelist;
379};
1da177e4
LT
380
381/** File private data */
84b1fd10 382struct drm_file {
b5e89ed5 383 int authenticated;
b5e89ed5
DA
384 pid_t pid;
385 uid_t uid;
386 drm_magic_t magic;
387 unsigned long ioctl_count;
bd1b331f 388 struct list_head lhead;
2c14f28b 389 struct drm_minor *minor;
b5e89ed5 390 unsigned long lock_count;
7c1c2871 391
673a394b
EA
392 /** Mapping of mm object handles to object pointers. */
393 struct idr object_idr;
394 /** Lock for synchronization of access to object_idr. */
395 spinlock_t table_lock;
7c1c2871 396
6c340eac 397 struct file *filp;
8562b3f2 398 void *driver_priv;
7c1c2871
DA
399
400 int is_master; /* this file private is a master for a minor */
401 struct drm_master *master; /* master this node is currently associated with
402 N.B. not always minor->master */
f453ba04 403 struct list_head fbs;
84b1fd10 404};
1da177e4
LT
405
406/** Wait queue */
cdd55a29 407struct drm_queue {
b5e89ed5
DA
408 atomic_t use_count; /**< Outstanding uses (+1) */
409 atomic_t finalization; /**< Finalization in progress */
410 atomic_t block_count; /**< Count of processes waiting */
411 atomic_t block_read; /**< Queue blocked for reads */
1da177e4 412 wait_queue_head_t read_queue; /**< Processes waiting on block_read */
b5e89ed5 413 atomic_t block_write; /**< Queue blocked for writes */
1da177e4 414 wait_queue_head_t write_queue; /**< Processes waiting on block_write */
b5e89ed5
DA
415 atomic_t total_queued; /**< Total queued statistic */
416 atomic_t total_flushed; /**< Total flushes statistic */
417 atomic_t total_locks; /**< Total locks statistics */
c60ce623 418 enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
cdd55a29 419 struct drm_waitlist waitlist; /**< Pending buffers */
1da177e4 420 wait_queue_head_t flush_queue; /**< Processes waiting until flush */
cdd55a29 421};
1da177e4
LT
422
423/**
424 * Lock data.
425 */
55910517 426struct drm_lock_data {
c60ce623 427 struct drm_hw_lock *hw_lock; /**< Hardware lock */
8562b3f2
DA
428 /** Private of lock holder's file (NULL=kernel) */
429 struct drm_file *file_priv;
1da177e4 430 wait_queue_head_t lock_queue; /**< Queue of blocked processes */
b5e89ed5 431 unsigned long lock_time; /**< Time of last lock in jiffies */
040ac320
TH
432 spinlock_t spinlock;
433 uint32_t kernel_waiters;
434 uint32_t user_waiters;
435 int idle_has_lock;
55910517 436};
1da177e4
LT
437
438/**
439 * DMA data.
440 */
cdd55a29 441struct drm_device_dma {
1da177e4 442
cdd55a29 443 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
b5e89ed5 444 int buf_count; /**< total number of buffers */
056219e2 445 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
b5e89ed5
DA
446 int seg_count;
447 int page_count; /**< number of pages */
448 unsigned long *pagelist; /**< page list */
449 unsigned long byte_count;
1da177e4
LT
450 enum {
451 _DRM_DMA_USE_AGP = 0x01,
b5e89ed5 452 _DRM_DMA_USE_SG = 0x02,
3417f33e
GS
453 _DRM_DMA_USE_FB = 0x04,
454 _DRM_DMA_USE_PCI_RO = 0x08
1da177e4
LT
455 } flags;
456
cdd55a29 457};
1da177e4 458
b5e89ed5 459/**
1da177e4
LT
460 * AGP memory entry. Stored as a doubly linked list.
461 */
55910517 462struct drm_agp_mem {
b5e89ed5
DA
463 unsigned long handle; /**< handle */
464 DRM_AGP_MEM *memory;
465 unsigned long bound; /**< address */
466 int pages;
bd1b331f 467 struct list_head head;
55910517 468};
1da177e4
LT
469
470/**
471 * AGP data.
472 *
473 * \sa drm_agp_init() and drm_device::agp.
474 */
55910517 475struct drm_agp_head {
b5e89ed5 476 DRM_AGP_KERN agp_info; /**< AGP device information */
bd1b331f 477 struct list_head memory;
b5e89ed5
DA
478 unsigned long mode; /**< AGP mode */
479 struct agp_bridge_data *bridge;
480 int enabled; /**< whether the AGP bus as been enabled */
481 int acquired; /**< whether the AGP device has been acquired */
482 unsigned long base;
483 int agp_mtrr;
484 int cant_use_aperture;
485 unsigned long page_mask;
55910517 486};
1da177e4
LT
487
488/**
489 * Scatter-gather memory.
490 */
55910517 491struct drm_sg_mem {
b5e89ed5
DA
492 unsigned long handle;
493 void *virtual;
494 int pages;
495 struct page **pagelist;
496 dma_addr_t *busaddr;
55910517 497};
1da177e4 498
55910517 499struct drm_sigdata {
b5e89ed5 500 int context;
c60ce623 501 struct drm_hw_lock *lock;
55910517 502};
1da177e4 503
8562b3f2
DA
504
505/*
506 * Generic memory manager structs
507 */
508
509struct drm_mm_node {
510 struct list_head fl_entry;
511 struct list_head ml_entry;
512 int free;
513 unsigned long start;
514 unsigned long size;
515 struct drm_mm *mm;
516 void *private;
517};
518
519struct drm_mm {
520 struct list_head fl_entry;
521 struct list_head ml_entry;
522};
523
524
1da177e4
LT
525/**
526 * Mappings list
527 */
55910517 528struct drm_map_list {
b5e89ed5 529 struct list_head head; /**< list head */
e0be428e 530 struct drm_hash_item hash;
c60ce623 531 struct drm_map *map; /**< mapping */
8562b3f2 532 uint64_t user_token;
7c1c2871 533 struct drm_master *master;
a2c0a97b 534 struct drm_mm_node *file_offset_node; /**< fake offset */
55910517 535};
1da177e4 536
c60ce623 537typedef struct drm_map drm_local_map_t;
1da177e4
LT
538
539/**
540 * Context handle list
541 */
55910517 542struct drm_ctx_list {
b5e89ed5
DA
543 struct list_head head; /**< list head */
544 drm_context_t handle; /**< context handle */
84b1fd10 545 struct drm_file *tag; /**< associated fd private data */
55910517 546};
1da177e4 547
ea98a92f
DA
548/* location of GART table */
549#define DRM_ATI_GART_MAIN 1
550#define DRM_ATI_GART_FB 2
551
f2b04cd2
DA
552#define DRM_ATI_GART_PCI 1
553#define DRM_ATI_GART_PCIE 2
554#define DRM_ATI_GART_IGP 3
555
55910517 556struct drm_ati_pcigart_info {
ea98a92f 557 int gart_table_location;
f2b04cd2 558 int gart_reg_if;
f26c473c 559 void *addr;
ea98a92f 560 dma_addr_t bus_addr;
b05c2385
DA
561 dma_addr_t table_mask;
562 struct drm_dma_handle *table_handle;
f26c473c 563 drm_local_map_t mapping;
f2b04cd2 564 int table_size;
55910517 565};
ea98a92f 566
a2c0a97b
JB
567/**
568 * GEM specific mm private for tracking GEM objects
569 */
570struct drm_gem_mm {
571 struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */
572 struct drm_open_hash offset_hash; /**< User token hash table for maps */
573};
574
673a394b
EA
575/**
576 * This structure defines the drm_mm memory object, which will be used by the
577 * DRM for its buffer objects.
578 */
579struct drm_gem_object {
580 /** Reference count of this object */
581 struct kref refcount;
582
583 /** Handle count of this object. Each handle also holds a reference */
584 struct kref handlecount;
585
586 /** Related drm device */
587 struct drm_device *dev;
588
589 /** File representing the shmem storage */
590 struct file *filp;
591
a2c0a97b
JB
592 /* Mapping info for this object */
593 struct drm_map_list map_list;
594
673a394b
EA
595 /**
596 * Size of the object, in bytes. Immutable over the object's
597 * lifetime.
598 */
599 size_t size;
600
601 /**
602 * Global name for this object, starts at 1. 0 means unnamed.
603 * Access is covered by the object_name_lock in the related drm_device
604 */
605 int name;
606
607 /**
608 * Memory domains. These monitor which caches contain read/write data
609 * related to the object. When transitioning from one set of domains
610 * to another, the driver is called to ensure that caches are suitably
611 * flushed and invalidated
612 */
613 uint32_t read_domains;
614 uint32_t write_domain;
615
616 /**
617 * While validating an exec operation, the
618 * new read/write domain values are computed here.
619 * They will be transferred to the above values
620 * at the point that any cache flushing occurs
621 */
622 uint32_t pending_read_domains;
623 uint32_t pending_write_domain;
624
625 void *driver_private;
626};
627
f453ba04
DA
628#include "drm_crtc.h"
629
7c1c2871
DA
630/* per-master structure */
631struct drm_master {
632
633 struct kref refcount; /* refcount for this master */
634
635 struct list_head head; /**< each minor contains a list of masters */
636 struct drm_minor *minor; /**< link back to minor we are a master for */
637
638 char *unique; /**< Unique identifier: e.g., busid */
639 int unique_len; /**< Length of unique field */
1147c9cd 640 int unique_size; /**< amount allocated */
7c1c2871
DA
641
642 int blocked; /**< Blocked due to VC switch? */
643
644 /** \name Authentication */
645 /*@{ */
646 struct drm_open_hash magiclist;
647 struct list_head magicfree;
648 /*@} */
649
650 struct drm_lock_data lock; /**< Information on hardware lock */
651
652 void *driver_priv; /**< Private structure for driver to use */
653};
654
1da177e4
LT
655/**
656 * DRM driver structure. This structure represent the common code for
657 * a family of cards. There will one drm_device for each card present
658 * in this family
659 */
1da177e4 660struct drm_driver {
22eae947
DA
661 int (*load) (struct drm_device *, unsigned long flags);
662 int (*firstopen) (struct drm_device *);
84b1fd10 663 int (*open) (struct drm_device *, struct drm_file *);
6c340eac 664 void (*preclose) (struct drm_device *, struct drm_file *file_priv);
84b1fd10 665 void (*postclose) (struct drm_device *, struct drm_file *);
22eae947
DA
666 void (*lastclose) (struct drm_device *);
667 int (*unload) (struct drm_device *);
b932ccb5 668 int (*suspend) (struct drm_device *, pm_message_t state);
e8b962b6 669 int (*resume) (struct drm_device *);
c153f45f 670 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
b5e89ed5
DA
671 void (*dma_ready) (struct drm_device *);
672 int (*dma_quiescent) (struct drm_device *);
84b1fd10
DA
673 int (*context_ctor) (struct drm_device *dev, int context);
674 int (*context_dtor) (struct drm_device *dev, int context);
675 int (*kernel_context_switch) (struct drm_device *dev, int old,
b5e89ed5 676 int new);
af6061af 677 void (*kernel_context_switch_unlock) (struct drm_device *dev);
af6061af 678 int (*dri_library_name) (struct drm_device *dev, char *buf);
b5e89ed5 679
0a3e67a4
JB
680 /**
681 * get_vblank_counter - get raw hardware vblank counter
682 * @dev: DRM device
683 * @crtc: counter to fetch
684 *
685 * Driver callback for fetching a raw hardware vblank counter
686 * for @crtc. If a device doesn't have a hardware counter, the
687 * driver can simply return the value of drm_vblank_count and
688 * make the enable_vblank() and disable_vblank() hooks into no-ops,
689 * leaving interrupts enabled at all times.
690 *
691 * Wraparound handling and loss of events due to modesetting is dealt
692 * with in the DRM core code.
693 *
694 * RETURNS
695 * Raw vblank counter value.
696 */
697 u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
698
699 /**
700 * enable_vblank - enable vblank interrupt events
701 * @dev: DRM device
702 * @crtc: which irq to enable
703 *
704 * Enable vblank interrupts for @crtc. If the device doesn't have
705 * a hardware vblank counter, this routine should be a no-op, since
706 * interrupts will have to stay on to keep the count accurate.
707 *
708 * RETURNS
709 * Zero on success, appropriate errno if the given @crtc's vblank
710 * interrupt cannot be enabled.
711 */
712 int (*enable_vblank) (struct drm_device *dev, int crtc);
713
714 /**
715 * disable_vblank - disable vblank interrupt events
716 * @dev: DRM device
717 * @crtc: which irq to enable
718 *
719 * Disable vblank interrupts for @crtc. If the device doesn't have
720 * a hardware vblank counter, this routine should be a no-op, since
721 * interrupts will have to stay on to keep the count accurate.
722 */
723 void (*disable_vblank) (struct drm_device *dev, int crtc);
724
cda17380
DA
725 /**
726 * Called by \c drm_device_is_agp. Typically used to determine if a
727 * card is really attached to AGP or not.
728 *
729 * \param dev DRM device handle
730 *
731 * \returns
732 * One of three values is returned depending on whether or not the
733 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
734 * (return of 1), or may or may not be AGP (return of 2).
735 */
84b1fd10 736 int (*device_is_agp) (struct drm_device *dev);
cda17380 737
1da177e4 738 /* these have to be filled in */
b5e89ed5 739
22eae947 740 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
84b1fd10 741 void (*irq_preinstall) (struct drm_device *dev);
0a3e67a4 742 int (*irq_postinstall) (struct drm_device *dev);
84b1fd10 743 void (*irq_uninstall) (struct drm_device *dev);
6c340eac
EA
744 void (*reclaim_buffers) (struct drm_device *dev,
745 struct drm_file * file_priv);
d985c108 746 void (*reclaim_buffers_locked) (struct drm_device *dev,
6c340eac 747 struct drm_file *file_priv);
040ac320 748 void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
6c340eac 749 struct drm_file *file_priv);
c60ce623 750 unsigned long (*get_map_ofs) (struct drm_map * map);
84b1fd10
DA
751 unsigned long (*get_reg_ofs) (struct drm_device *dev);
752 void (*set_version) (struct drm_device *dev,
c60ce623 753 struct drm_set_version *sv);
22eae947 754
7c1c2871
DA
755 /* Master routines */
756 int (*master_create)(struct drm_device *dev, struct drm_master *master);
757 void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
758
673a394b
EA
759 int (*proc_init)(struct drm_minor *minor);
760 void (*proc_cleanup)(struct drm_minor *minor);
761
762 /**
763 * Driver-specific constructor for drm_gem_objects, to set up
764 * obj->driver_private.
765 *
766 * Returns 0 on success.
767 */
768 int (*gem_init_object) (struct drm_gem_object *obj);
769 void (*gem_free_object) (struct drm_gem_object *obj);
770
a2c0a97b
JB
771 /* Driver private ops for this object */
772 struct vm_operations_struct *gem_vm_ops;
773
22eae947
DA
774 int major;
775 int minor;
776 int patchlevel;
777 char *name;
778 char *desc;
779 char *date;
780
1da177e4
LT
781 u32 driver_features;
782 int dev_priv_size;
c153f45f 783 struct drm_ioctl_desc *ioctls;
1da177e4
LT
784 int num_ioctls;
785 struct file_operations fops;
786 struct pci_driver pci_driver;
e7f7ab45
DA
787 /* List of devices hanging off this driver */
788 struct list_head device_list;
1da177e4
LT
789};
790
2c14f28b
DA
791#define DRM_MINOR_UNASSIGNED 0
792#define DRM_MINOR_LEGACY 1
f453ba04
DA
793#define DRM_MINOR_CONTROL 2
794#define DRM_MINOR_RENDER 3
2c14f28b 795
1da177e4 796/**
2c14f28b 797 * DRM minor structure. This structure represents a drm minor number.
1da177e4 798 */
2c14f28b
DA
799struct drm_minor {
800 int index; /**< Minor device number */
801 int type; /**< Control or render */
802 dev_t device; /**< Device number for mknod */
803 struct device kdev; /**< Linux device */
1da177e4
LT
804 struct drm_device *dev;
805 struct proc_dir_entry *dev_root; /**< proc directory entry */
7c1c2871
DA
806 struct drm_master *master; /* currently active master for this node */
807 struct list_head master_list;
f453ba04 808 struct drm_mode_group mode_group;
84b1fd10 809};
1da177e4
LT
810
811/**
812 * DRM device structure. This structure represent a complete card that
813 * may contain multiple heads.
814 */
84b1fd10 815struct drm_device {
e7f7ab45 816 struct list_head driver_item; /**< list of devices per driver */
b5e89ed5
DA
817 char *devname; /**< For /proc/interrupts */
818 int if_version; /**< Highest interface version set */
1da177e4 819
1da177e4 820 /** \name Locks */
b5e89ed5
DA
821 /*@{ */
822 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
30e2fb18 823 struct mutex struct_mutex; /**< For others */
b5e89ed5 824 /*@} */
1da177e4
LT
825
826 /** \name Usage Counters */
b5e89ed5
DA
827 /*@{ */
828 int open_count; /**< Outstanding files open */
829 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
830 atomic_t vma_count; /**< Outstanding vma areas open */
831 int buf_use; /**< Buffers in use -- cannot alloc */
832 atomic_t buf_alloc; /**< Buffer allocation in progress */
833 /*@} */
1da177e4
LT
834
835 /** \name Performance counters */
b5e89ed5
DA
836 /*@{ */
837 unsigned long counters;
c60ce623 838 enum drm_stat_type types[15];
b5e89ed5
DA
839 atomic_t counts[15];
840 /*@} */
1da177e4 841
bd1b331f 842 struct list_head filelist;
1da177e4
LT
843
844 /** \name Memory management */
b5e89ed5 845 /*@{ */
bd1b331f 846 struct list_head maplist; /**< Linked list of regions */
b5e89ed5 847 int map_count; /**< Number of mappable regions */
e0be428e 848 struct drm_open_hash map_hash; /**< User token hash table for maps */
1da177e4
LT
849
850 /** \name Context handle management */
b5e89ed5 851 /*@{ */
bd1b331f 852 struct list_head ctxlist; /**< Linked list of context handles */
b5e89ed5 853 int ctx_count; /**< Number of context handles */
30e2fb18 854 struct mutex ctxlist_mutex; /**< For ctxlist */
1da177e4 855
62968144 856 struct idr ctx_idr;
1da177e4 857
bd1b331f 858 struct list_head vmalist; /**< List of vmas (for debugging) */
f453ba04 859
b5e89ed5 860 /*@} */
1da177e4
LT
861
862 /** \name DMA queues (contexts) */
b5e89ed5
DA
863 /*@{ */
864 int queue_count; /**< Number of active DMA queues */
865 int queue_reserved; /**< Number of reserved DMA queues */
866 int queue_slots; /**< Actual length of queuelist */
cdd55a29
DA
867 struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
868 struct drm_device_dma *dma; /**< Optional pointer for DMA support */
b5e89ed5 869 /*@} */
1da177e4
LT
870
871 /** \name Context support */
b5e89ed5 872 /*@{ */
b5e89ed5 873 int irq_enabled; /**< True if irq handler is enabled */
1da177e4
LT
874 __volatile__ long context_flag; /**< Context swapping flag */
875 __volatile__ long interrupt_flag; /**< Interruption handler flag */
876 __volatile__ long dma_flag; /**< DMA dispatch flag */
877 struct timer_list timer; /**< Timer for delaying ctx switch */
b5e89ed5
DA
878 wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
879 int last_checked; /**< Last context checked for DMA */
880 int last_context; /**< Last current context */
881 unsigned long last_switch; /**< jiffies at last context switch */
882 /*@} */
883
884 struct work_struct work;
1da177e4 885 /** \name VBLANK IRQ support */
b5e89ed5
DA
886 /*@{ */
887
0a3e67a4
JB
888 /*
889 * At load time, disabling the vblank interrupt won't be allowed since
890 * old clients may not call the modeset ioctl and therefore misbehave.
891 * Once the modeset ioctl *has* been called though, we can safely
892 * disable them when unused.
893 */
894 int vblank_disable_allowed;
895
896 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
897 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
b5e89ed5 898 spinlock_t vbl_lock;
0a3e67a4
JB
899 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
900 u32 *last_vblank; /* protected by dev->vbl_lock, used */
901 /* for wraparound handling */
902 int *vblank_enabled; /* so we don't call enable more than
903 once per disable */
904 int *vblank_inmodeset; /* Display driver is setting mode */
fede5c91 905 u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
0a3e67a4
JB
906 struct timer_list vblank_disable_timer;
907
908 u32 max_vblank_count; /**< size of vblank counter register */
b5e89ed5
DA
909
910 /*@} */
911 cycles_t ctx_start;
912 cycles_t lck_start;
913
1da177e4
LT
914 struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
915 wait_queue_head_t buf_readers; /**< Processes waiting to read */
916 wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
917
55910517 918 struct drm_agp_head *agp; /**< AGP data */
1da177e4 919
b5e89ed5 920 struct pci_dev *pdev; /**< PCI device structure */
2f02cc3f
EA
921 int pci_vendor; /**< PCI vendor id */
922 int pci_device; /**< PCI device id */
1da177e4 923#ifdef __alpha__
1da177e4 924 struct pci_controller *hose;
1da177e4 925#endif
55910517 926 struct drm_sg_mem *sg; /**< Scatter gather memory */
0a3e67a4 927 int num_crtcs; /**< Number of CRTCs on this device */
b5e89ed5 928 void *dev_private; /**< device private data */
a2c0a97b
JB
929 void *mm_private;
930 struct address_space *dev_mapping;
55910517 931 struct drm_sigdata sigdata; /**< For block_all_signals */
b5e89ed5
DA
932 sigset_t sigmask;
933
934 struct drm_driver *driver;
935 drm_local_map_t *agp_buffer_map;
d1f2b55a 936 unsigned int agp_buffer_token;
f453ba04 937 struct drm_minor *control; /**< Control node for card */
2c14f28b 938 struct drm_minor *primary; /**< render type primary screen head */
bea5679f
MCA
939
940 /** \name Drawable information */
941 /*@{ */
942 spinlock_t drw_lock;
d4e2cbe9 943 struct idr drw_idr;
bea5679f 944 /*@} */
673a394b 945
f453ba04
DA
946 struct drm_mode_config mode_config; /**< Current mode config */
947
673a394b
EA
948 /** \name GEM information */
949 /*@{ */
950 spinlock_t object_name_lock;
951 struct idr object_name_idr;
952 atomic_t object_count;
953 atomic_t object_memory;
954 atomic_t pin_count;
955 atomic_t pin_memory;
956 atomic_t gtt_count;
957 atomic_t gtt_memory;
958 uint32_t gtt_total;
959 uint32_t invalidate_domains; /* domains pending invalidation */
960 uint32_t flush_domains; /* domains pending flush */
961 /*@} */
962
84b1fd10 963};
1da177e4 964
9bfbd5cb
JB
965static inline int drm_dev_to_irq(struct drm_device *dev)
966{
967 return dev->pdev->irq;
968}
969
b5e89ed5
DA
970static __inline__ int drm_core_check_feature(struct drm_device *dev,
971 int feature)
1da177e4
LT
972{
973 return ((dev->driver->driver_features & feature) ? 1 : 0);
974}
975
33229601 976#ifdef __alpha__
6244270e 977#define drm_get_pci_domain(dev) dev->hose->index
33229601 978#else
9b1a51b6 979#define drm_get_pci_domain(dev) 0
33229601
DA
980#endif
981
1da177e4
LT
982#if __OS_HAS_AGP
983static inline int drm_core_has_AGP(struct drm_device *dev)
984{
b5e89ed5 985 return drm_core_check_feature(dev, DRIVER_USE_AGP);
1da177e4
LT
986}
987#else
988#define drm_core_has_AGP(dev) (0)
989#endif
990
991#if __OS_HAS_MTRR
992static inline int drm_core_has_MTRR(struct drm_device *dev)
993{
b5e89ed5 994 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
1da177e4 995}
269dc512
DA
996
997#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
998
999static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
1000 unsigned int flags)
1001{
1002 return mtrr_add(offset, size, flags, 1);
1003}
1004
1005static inline int drm_mtrr_del(int handle, unsigned long offset,
1006 unsigned long size, unsigned int flags)
1007{
1008 return mtrr_del(handle, offset, size);
1009}
1010
1da177e4
LT
1011#else
1012#define drm_core_has_MTRR(dev) (0)
9c7d462e
DA
1013
1014#define DRM_MTRR_WC 0
1015
1016static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
1017 unsigned int flags)
1018{
1019 return 0;
1020}
1021
1022static inline int drm_mtrr_del(int handle, unsigned long offset,
1023 unsigned long size, unsigned int flags)
1024{
1025 return 0;
1026}
1da177e4
LT
1027#endif
1028
1029/******************************************************************/
1030/** \name Internal function definitions */
1031/*@{*/
1032
1da177e4 1033 /* Driver support (drm_drv.h) */
b5e89ed5
DA
1034extern int drm_init(struct drm_driver *driver);
1035extern void drm_exit(struct drm_driver *driver);
1036extern int drm_ioctl(struct inode *inode, struct file *filp,
1037 unsigned int cmd, unsigned long arg);
1038extern long drm_compat_ioctl(struct file *filp,
1039 unsigned int cmd, unsigned long arg);
84b1fd10 1040extern int drm_lastclose(struct drm_device *dev);
1da177e4
LT
1041
1042 /* Device support (drm_fops.h) */
b5e89ed5
DA
1043extern int drm_open(struct inode *inode, struct file *filp);
1044extern int drm_stub_open(struct inode *inode, struct file *filp);
b5e89ed5
DA
1045extern int drm_fasync(int fd, struct file *filp, int on);
1046extern int drm_release(struct inode *inode, struct file *filp);
1da177e4
LT
1047
1048 /* Mapping support (drm_vm.h) */
b5e89ed5 1049extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
a2c0a97b
JB
1050extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
1051extern void drm_vm_open_locked(struct vm_area_struct *vma);
ded23359
DA
1052extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
1053extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
b5e89ed5 1054extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
1da177e4
LT
1055
1056 /* Memory management support (drm_memory.h) */
1057#include "drm_memory.h"
b5e89ed5
DA
1058extern void drm_mem_init(void);
1059extern int drm_mem_info(char *buf, char **start, off_t offset,
1060 int request, int *eof, void *data);
1061extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
b5e89ed5 1062
84b1fd10 1063extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
b5e89ed5
DA
1064extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
1065extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
673a394b
EA
1066extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1067 struct page **pages,
1068 unsigned long num_pages,
ba1eb1d8
KP
1069 uint32_t gtt_offset,
1070 uint32_t type);
b5e89ed5 1071extern int drm_unbind_agp(DRM_AGP_MEM * handle);
1da177e4
LT
1072
1073 /* Misc. IOCTL support (drm_ioctl.h) */
c153f45f
EA
1074extern int drm_irq_by_busid(struct drm_device *dev, void *data,
1075 struct drm_file *file_priv);
1076extern int drm_getunique(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv);
1078extern int drm_setunique(struct drm_device *dev, void *data,
1079 struct drm_file *file_priv);
1080extern int drm_getmap(struct drm_device *dev, void *data,
1081 struct drm_file *file_priv);
1082extern int drm_getclient(struct drm_device *dev, void *data,
1083 struct drm_file *file_priv);
1084extern int drm_getstats(struct drm_device *dev, void *data,
1085 struct drm_file *file_priv);
1086extern int drm_setversion(struct drm_device *dev, void *data,
1087 struct drm_file *file_priv);
1088extern int drm_noop(struct drm_device *dev, void *data,
1089 struct drm_file *file_priv);
1da177e4
LT
1090
1091 /* Context IOCTL support (drm_context.h) */
c153f45f
EA
1092extern int drm_resctx(struct drm_device *dev, void *data,
1093 struct drm_file *file_priv);
1094extern int drm_addctx(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv);
1096extern int drm_modctx(struct drm_device *dev, void *data,
1097 struct drm_file *file_priv);
1098extern int drm_getctx(struct drm_device *dev, void *data,
1099 struct drm_file *file_priv);
1100extern int drm_switchctx(struct drm_device *dev, void *data,
1101 struct drm_file *file_priv);
1102extern int drm_newctx(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv);
1104extern int drm_rmctx(struct drm_device *dev, void *data,
1105 struct drm_file *file_priv);
b5e89ed5 1106
84b1fd10
DA
1107extern int drm_ctxbitmap_init(struct drm_device *dev);
1108extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
1109extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
b5e89ed5 1110
c153f45f
EA
1111extern int drm_setsareactx(struct drm_device *dev, void *data,
1112 struct drm_file *file_priv);
1113extern int drm_getsareactx(struct drm_device *dev, void *data,
1114 struct drm_file *file_priv);
1da177e4
LT
1115
1116 /* Drawable IOCTL support (drm_drawable.h) */
c153f45f
EA
1117extern int drm_adddraw(struct drm_device *dev, void *data,
1118 struct drm_file *file_priv);
1119extern int drm_rmdraw(struct drm_device *dev, void *data,
1120 struct drm_file *file_priv);
1121extern int drm_update_drawable_info(struct drm_device *dev, void *data,
1122 struct drm_file *file_priv);
84b1fd10 1123extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
bea5679f 1124 drm_drawable_t id);
d4e2cbe9 1125extern void drm_drawable_free_all(struct drm_device *dev);
1da177e4
LT
1126
1127 /* Authentication IOCTL support (drm_auth.h) */
c153f45f
EA
1128extern int drm_getmagic(struct drm_device *dev, void *data,
1129 struct drm_file *file_priv);
1130extern int drm_authmagic(struct drm_device *dev, void *data,
1131 struct drm_file *file_priv);
1da177e4 1132
673a394b
EA
1133/* Cache management (drm_cache.c) */
1134void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1135
1da177e4 1136 /* Locking IOCTL support (drm_lock.h) */
c153f45f
EA
1137extern int drm_lock(struct drm_device *dev, void *data,
1138 struct drm_file *file_priv);
1139extern int drm_unlock(struct drm_device *dev, void *data,
1140 struct drm_file *file_priv);
55910517
DA
1141extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
1142extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
1143extern void drm_idlelock_take(struct drm_lock_data *lock_data);
1144extern void drm_idlelock_release(struct drm_lock_data *lock_data);
040ac320
TH
1145
1146/*
1147 * These are exported to drivers so that they can implement fencing using
1148 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
1149 */
1150
c153f45f 1151extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
1da177e4
LT
1152
1153 /* Buffer management support (drm_bufs.h) */
84b1fd10
DA
1154extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
1155extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
1156extern int drm_addmap(struct drm_device *dev, unsigned int offset,
c60ce623
DA
1157 unsigned int size, enum drm_map_type type,
1158 enum drm_map_flags flags, drm_local_map_t ** map_ptr);
c153f45f
EA
1159extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
1160 struct drm_file *file_priv);
1161extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
1162extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
1163extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
1164 struct drm_file *file_priv);
1165extern int drm_addbufs(struct drm_device *dev, void *data,
1166 struct drm_file *file_priv);
1167extern int drm_infobufs(struct drm_device *dev, void *data,
1168 struct drm_file *file_priv);
1169extern int drm_markbufs(struct drm_device *dev, void *data,
1170 struct drm_file *file_priv);
1171extern int drm_freebufs(struct drm_device *dev, void *data,
1172 struct drm_file *file_priv);
1173extern int drm_mapbufs(struct drm_device *dev, void *data,
1174 struct drm_file *file_priv);
b5e89ed5 1175extern int drm_order(unsigned long size);
84b1fd10 1176extern unsigned long drm_get_resource_start(struct drm_device *dev,
836cf046 1177 unsigned int resource);
84b1fd10 1178extern unsigned long drm_get_resource_len(struct drm_device *dev,
836cf046 1179 unsigned int resource);
1da177e4
LT
1180
1181 /* DMA support (drm_dma.h) */
84b1fd10
DA
1182extern int drm_dma_setup(struct drm_device *dev);
1183extern void drm_dma_takedown(struct drm_device *dev);
056219e2 1184extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
6c340eac
EA
1185extern void drm_core_reclaim_buffers(struct drm_device *dev,
1186 struct drm_file *filp);
1da177e4
LT
1187
1188 /* IRQ support (drm_irq.h) */
c153f45f
EA
1189extern int drm_control(struct drm_device *dev, void *data,
1190 struct drm_file *file_priv);
b5e89ed5 1191extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
dbb19d30 1192extern int drm_irq_install(struct drm_device *dev);
84b1fd10
DA
1193extern int drm_irq_uninstall(struct drm_device *dev);
1194extern void drm_driver_irq_preinstall(struct drm_device *dev);
1195extern void drm_driver_irq_postinstall(struct drm_device *dev);
1196extern void drm_driver_irq_uninstall(struct drm_device *dev);
b5e89ed5 1197
0a3e67a4 1198extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
af6061af 1199extern int drm_wait_vblank(struct drm_device *dev, void *data,
0a3e67a4 1200 struct drm_file *filp);
af6061af 1201extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
0a3e67a4
JB
1202extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1203extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1204extern int drm_vblank_get(struct drm_device *dev, int crtc);
1205extern void drm_vblank_put(struct drm_device *dev, int crtc);
52440211 1206extern void drm_vblank_cleanup(struct drm_device *dev);
0a3e67a4 1207/* Modesetting support */
f453ba04
DA
1208extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1209extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
0a3e67a4
JB
1210extern int drm_modeset_ctl(struct drm_device *dev, void *data,
1211 struct drm_file *file_priv);
1da177e4
LT
1212
1213 /* AGP/GART support (drm_agpsupport.h) */
55910517 1214extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
84b1fd10 1215extern int drm_agp_acquire(struct drm_device *dev);
c153f45f
EA
1216extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
1217 struct drm_file *file_priv);
84b1fd10 1218extern int drm_agp_release(struct drm_device *dev);
c153f45f
EA
1219extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
1220 struct drm_file *file_priv);
84b1fd10 1221extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
c153f45f
EA
1222extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
1223 struct drm_file *file_priv);
1224extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
1225extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
1226 struct drm_file *file_priv);
84b1fd10 1227extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
c153f45f
EA
1228extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
1229 struct drm_file *file_priv);
84b1fd10 1230extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
c153f45f
EA
1231extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
1232 struct drm_file *file_priv);
84b1fd10 1233extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
c153f45f
EA
1234extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1235 struct drm_file *file_priv);
84b1fd10 1236extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
c153f45f
EA
1237extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1238 struct drm_file *file_priv);
1239extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
b5e89ed5
DA
1240extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1241extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1242extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
673a394b 1243extern void drm_agp_chipset_flush(struct drm_device *dev);
1da177e4
LT
1244
1245 /* Stub support (drm_stub.h) */
7c1c2871
DA
1246extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
1247 struct drm_file *file_priv);
1248extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
1249 struct drm_file *file_priv);
1250struct drm_master *drm_master_create(struct drm_minor *minor);
1251extern struct drm_master *drm_master_get(struct drm_master *master);
1252extern void drm_master_put(struct drm_master **master);
1da177e4 1253extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
b5e89ed5 1254 struct drm_driver *driver);
84b1fd10 1255extern int drm_put_dev(struct drm_device *dev);
2c14f28b 1256extern int drm_put_minor(struct drm_minor **minor);
b5e89ed5 1257extern unsigned int drm_debug;
2c14f28b 1258
0650fd58 1259extern struct class *drm_class;
1da177e4
LT
1260extern struct proc_dir_entry *drm_proc_root;
1261
2c14f28b
DA
1262extern struct idr drm_minors_idr;
1263
da509d7a
DA
1264extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
1265
1da177e4 1266 /* Proc support (drm_proc.h) */
2c14f28b
DA
1267extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1268 struct proc_dir_entry *root);
1269extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1da177e4
LT
1270
1271 /* Scatter Gather Support (drm_scatter.h) */
55910517 1272extern void drm_sg_cleanup(struct drm_sg_mem * entry);
c153f45f
EA
1273extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv);
1275extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1276extern int drm_sg_free(struct drm_device *dev, void *data,
1277 struct drm_file *file_priv);
1da177e4 1278
b5e89ed5 1279 /* ATI PCIGART support (ati_pcigart.h) */
84b1fd10 1280extern int drm_ati_pcigart_init(struct drm_device *dev,
55910517 1281 struct drm_ati_pcigart_info * gart_info);
84b1fd10 1282extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
55910517 1283 struct drm_ati_pcigart_info * gart_info);
1da177e4 1284
84b1fd10 1285extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
9c8da5eb 1286 size_t align, dma_addr_t maxaddr);
84b1fd10
DA
1287extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1288extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1da177e4
LT
1289
1290 /* sysfs support (drm_sysfs.c) */
e8b962b6 1291struct drm_sysfs_class;
0650fd58 1292extern struct class *drm_sysfs_create(struct module *owner, char *name);
e8b962b6 1293extern void drm_sysfs_destroy(void);
2c14f28b 1294extern int drm_sysfs_device_add(struct drm_minor *minor);
f453ba04 1295extern void drm_sysfs_hotplug_event(struct drm_device *dev);
2c14f28b 1296extern void drm_sysfs_device_remove(struct drm_minor *minor);
f453ba04
DA
1297extern char *drm_get_connector_status_name(enum drm_connector_status status);
1298extern int drm_sysfs_connector_add(struct drm_connector *connector);
1299extern void drm_sysfs_connector_remove(struct drm_connector *connector);
1da177e4 1300
3a1bd924
TH
1301/*
1302 * Basic memory manager support (drm_mm.c)
1303 */
55910517 1304extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
3a1bd924
TH
1305 unsigned long size,
1306 unsigned alignment);
8562b3f2 1307extern void drm_mm_put_block(struct drm_mm_node * cur);
55910517 1308extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
3a1bd924 1309 unsigned alignment, int best_match);
55910517
DA
1310extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
1311extern void drm_mm_takedown(struct drm_mm *mm);
1312extern int drm_mm_clean(struct drm_mm *mm);
1313extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
1314extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
1315extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
3a1bd924 1316
673a394b
EA
1317/* Graphics Execution Manager library functions (drm_gem.c) */
1318int drm_gem_init(struct drm_device *dev);
a2c0a97b 1319void drm_gem_destroy(struct drm_device *dev);
673a394b
EA
1320void drm_gem_object_free(struct kref *kref);
1321struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1322 size_t size);
1323void drm_gem_object_handle_free(struct kref *kref);
ab00b3e5
JB
1324void drm_gem_vm_open(struct vm_area_struct *vma);
1325void drm_gem_vm_close(struct vm_area_struct *vma);
a2c0a97b 1326int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
673a394b
EA
1327
1328static inline void
1329drm_gem_object_reference(struct drm_gem_object *obj)
1330{
1331 kref_get(&obj->refcount);
1332}
1333
1334static inline void
1335drm_gem_object_unreference(struct drm_gem_object *obj)
1336{
1337 if (obj == NULL)
1338 return;
1339
1340 kref_put(&obj->refcount, drm_gem_object_free);
1341}
1342
1343int drm_gem_handle_create(struct drm_file *file_priv,
1344 struct drm_gem_object *obj,
1345 int *handlep);
1346
1347static inline void
1348drm_gem_object_handle_reference(struct drm_gem_object *obj)
1349{
1350 drm_gem_object_reference(obj);
1351 kref_get(&obj->handlecount);
1352}
1353
1354static inline void
1355drm_gem_object_handle_unreference(struct drm_gem_object *obj)
1356{
1357 if (obj == NULL)
1358 return;
1359
1360 /*
1361 * Must bump handle count first as this may be the last
1362 * ref, in which case the object would disappear before we
1363 * checked for a name
1364 */
1365 kref_put(&obj->handlecount, drm_gem_object_handle_free);
1366 drm_gem_object_unreference(obj);
1367}
1368
1369struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
1370 struct drm_file *filp,
1371 int handle);
1372int drm_gem_close_ioctl(struct drm_device *dev, void *data,
1373 struct drm_file *file_priv);
1374int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1375 struct drm_file *file_priv);
1376int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1377 struct drm_file *file_priv);
1378void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1379void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1380
004a7727 1381extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
242e3df8 1382extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
004a7727 1383extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
1da177e4 1384
b5e89ed5
DA
1385static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
1386 unsigned int token)
1da177e4 1387{
55910517 1388 struct drm_map_list *_entry;
bd1b331f 1389 list_for_each_entry(_entry, &dev->maplist, head)
b5e89ed5
DA
1390 if (_entry->user_token == token)
1391 return _entry->map;
1da177e4
LT
1392 return NULL;
1393}
1394
84b1fd10 1395static __inline__ int drm_device_is_agp(struct drm_device *dev)
cda17380 1396{
b5e89ed5
DA
1397 if (dev->driver->device_is_agp != NULL) {
1398 int err = (*dev->driver->device_is_agp) (dev);
1399
cda17380
DA
1400 if (err != 2) {
1401 return err;
1402 }
1403 }
1404
1405 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1406}
1407
84b1fd10 1408static __inline__ int drm_device_is_pcie(struct drm_device *dev)
ea98a92f
DA
1409{
1410 return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
1411}
1412
1da177e4
LT
1413static __inline__ void drm_core_dropmap(struct drm_map *map)
1414{
1415}
1416
1417#ifndef DEBUG_MEMORY
1418/** Wrapper around kmalloc() */
1419static __inline__ void *drm_alloc(size_t size, int area)
1420{
1421 return kmalloc(size, GFP_KERNEL);
1422}
1423
1424/** Wrapper around kfree() */
1425static __inline__ void drm_free(void *pt, size_t size, int area)
1426{
1427 kfree(pt);
1428}
b9523249
DA
1429
1430/** Wrapper around kcalloc() */
1431static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area)
1432{
1433 return kcalloc(nmemb, size, GFP_KERNEL);
1434}
1da177e4
LT
1435#else
1436extern void *drm_alloc(size_t size, int area);
1437extern void drm_free(void *pt, size_t size, int area);
b9523249 1438extern void *drm_calloc(size_t nmemb, size_t size, int area);
1da177e4
LT
1439#endif
1440
1441/*@}*/
1442
b5e89ed5 1443#endif /* __KERNEL__ */
1da177e4 1444#endif