Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2005-2006 Stephane Marchesin | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
22 | * DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "drmP.h" | |
26 | #include "drm.h" | |
27 | #include "nouveau_drv.h" | |
28 | #include "nouveau_drm.h" | |
29 | #include "nouveau_dma.h" | |
30 | ||
31 | static int | |
32 | nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |
33 | { | |
34 | struct drm_device *dev = chan->dev; | |
35 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
36 | struct nouveau_bo *pb = chan->pushbuf_bo; | |
37 | struct nouveau_gpuobj *pushbuf = NULL; | |
6ee73861 BS |
38 | int ret; |
39 | ||
d87897d4 BS |
40 | if (dev_priv->card_type >= NV_50) { |
41 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | |
7f4a195f BS |
42 | dev_priv->vm_end, NV_MEM_ACCESS_RO, |
43 | NV_MEM_TARGET_VM, &pushbuf); | |
d87897d4 BS |
44 | chan->pushbuf_base = pb->bo.offset; |
45 | } else | |
6ee73861 | 46 | if (pb->bo.mem.mem_type == TTM_PL_TT) { |
7f4a195f BS |
47 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
48 | dev_priv->gart_info.aper_size, | |
49 | NV_MEM_ACCESS_RO, | |
50 | NV_MEM_TARGET_GART, &pushbuf); | |
d961db75 | 51 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
6ee73861 BS |
52 | } else |
53 | if (dev_priv->card_type != NV_04) { | |
54 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | |
55 | dev_priv->fb_available_size, | |
7f4a195f BS |
56 | NV_MEM_ACCESS_RO, |
57 | NV_MEM_TARGET_VRAM, &pushbuf); | |
d961db75 | 58 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
6ee73861 BS |
59 | } else { |
60 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's | |
61 | * exact reason for existing :) PCI access to cmdbuf in | |
62 | * VRAM. | |
63 | */ | |
64 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | |
7f4a195f | 65 | pci_resource_start(dev->pdev, 1), |
6ee73861 | 66 | dev_priv->fb_available_size, |
7f4a195f BS |
67 | NV_MEM_ACCESS_RO, |
68 | NV_MEM_TARGET_PCI, &pushbuf); | |
d961db75 | 69 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
6ee73861 BS |
70 | } |
71 | ||
a8eaebc6 BS |
72 | nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); |
73 | nouveau_gpuobj_ref(NULL, &pushbuf); | |
6ee73861 BS |
74 | return 0; |
75 | } | |
76 | ||
77 | static struct nouveau_bo * | |
78 | nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) | |
79 | { | |
80 | struct nouveau_bo *pushbuf = NULL; | |
81 | int location, ret; | |
82 | ||
83 | if (nouveau_vram_pushbuf) | |
84 | location = TTM_PL_FLAG_VRAM; | |
85 | else | |
86 | location = TTM_PL_FLAG_TT; | |
87 | ||
88 | ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false, | |
89 | true, &pushbuf); | |
90 | if (ret) { | |
91 | NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); | |
92 | return NULL; | |
93 | } | |
94 | ||
95 | ret = nouveau_bo_pin(pushbuf, location); | |
96 | if (ret) { | |
97 | NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); | |
98 | nouveau_bo_ref(NULL, &pushbuf); | |
99 | return NULL; | |
100 | } | |
101 | ||
102 | return pushbuf; | |
103 | } | |
104 | ||
105 | /* allocates and initializes a fifo for user space consumption */ | |
106 | int | |
107 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |
108 | struct drm_file *file_priv, | |
cff5c133 | 109 | uint32_t vram_handle, uint32_t gart_handle) |
6ee73861 BS |
110 | { |
111 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
112 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | |
113 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | |
114 | struct nouveau_channel *chan; | |
cff5c133 | 115 | unsigned long flags; |
d908175c | 116 | int ret; |
6ee73861 | 117 | |
cff5c133 BS |
118 | /* allocate and lock channel structure */ |
119 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | |
120 | if (!chan) | |
6ee73861 | 121 | return -ENOMEM; |
6ee73861 | 122 | chan->dev = dev; |
6ee73861 BS |
123 | chan->file_priv = file_priv; |
124 | chan->vram_handle = vram_handle; | |
cff5c133 BS |
125 | chan->gart_handle = gart_handle; |
126 | ||
f091a3d4 FJ |
127 | kref_init(&chan->ref); |
128 | atomic_set(&chan->users, 1); | |
6a6b73f2 | 129 | mutex_init(&chan->mutex); |
cff5c133 | 130 | mutex_lock(&chan->mutex); |
6ee73861 | 131 | |
cff5c133 BS |
132 | /* allocate hw channel id */ |
133 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | |
134 | for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { | |
135 | if (!dev_priv->channels.ptr[chan->id]) { | |
f091a3d4 | 136 | nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); |
cff5c133 BS |
137 | break; |
138 | } | |
139 | } | |
140 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | |
141 | ||
142 | if (chan->id == pfifo->channels) { | |
143 | mutex_unlock(&chan->mutex); | |
144 | kfree(chan); | |
145 | return -ENODEV; | |
146 | } | |
147 | ||
148 | NV_DEBUG(dev, "initialising channel %d\n", chan->id); | |
149 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | |
332b242f | 150 | INIT_LIST_HEAD(&chan->nvsw.flip); |
cff5c133 | 151 | INIT_LIST_HEAD(&chan->fence.pending); |
6ee73861 BS |
152 | |
153 | /* Allocate DMA push buffer */ | |
154 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); | |
155 | if (!chan->pushbuf_bo) { | |
156 | ret = -ENOMEM; | |
157 | NV_ERROR(dev, "pushbuf %d\n", ret); | |
cff5c133 | 158 | nouveau_channel_put(&chan); |
6ee73861 BS |
159 | return ret; |
160 | } | |
161 | ||
75c99da6 | 162 | nouveau_dma_pre_init(chan); |
6ee73861 BS |
163 | chan->user_put = 0x40; |
164 | chan->user_get = 0x44; | |
165 | ||
166 | /* Allocate space for per-channel fixed notifier memory */ | |
167 | ret = nouveau_notifier_init_channel(chan); | |
168 | if (ret) { | |
169 | NV_ERROR(dev, "ntfy %d\n", ret); | |
cff5c133 | 170 | nouveau_channel_put(&chan); |
6ee73861 BS |
171 | return ret; |
172 | } | |
173 | ||
174 | /* Setup channel's default objects */ | |
cff5c133 | 175 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); |
6ee73861 BS |
176 | if (ret) { |
177 | NV_ERROR(dev, "gpuobj %d\n", ret); | |
cff5c133 | 178 | nouveau_channel_put(&chan); |
6ee73861 BS |
179 | return ret; |
180 | } | |
181 | ||
182 | /* Create a dma object for the push buffer */ | |
183 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); | |
184 | if (ret) { | |
185 | NV_ERROR(dev, "pbctxdma %d\n", ret); | |
cff5c133 | 186 | nouveau_channel_put(&chan); |
6ee73861 BS |
187 | return ret; |
188 | } | |
189 | ||
190 | /* disable the fifo caches */ | |
191 | pfifo->reassign(dev, false); | |
192 | ||
193 | /* Create a graphics context for new channel */ | |
f4512e65 BS |
194 | if (dev_priv->card_type < NV_50) { |
195 | ret = pgraph->create_context(chan); | |
bd2e597d BS |
196 | if (ret) { |
197 | nouveau_channel_put(&chan); | |
198 | return ret; | |
199 | } | |
200 | } | |
201 | ||
6ee73861 BS |
202 | /* Construct inital RAMFC for new channel */ |
203 | ret = pfifo->create_context(chan); | |
204 | if (ret) { | |
cff5c133 | 205 | nouveau_channel_put(&chan); |
6ee73861 BS |
206 | return ret; |
207 | } | |
208 | ||
209 | pfifo->reassign(dev, true); | |
210 | ||
211 | ret = nouveau_dma_init(chan); | |
212 | if (!ret) | |
2730723b | 213 | ret = nouveau_fence_channel_init(chan); |
6ee73861 | 214 | if (ret) { |
cff5c133 | 215 | nouveau_channel_put(&chan); |
6ee73861 BS |
216 | return ret; |
217 | } | |
218 | ||
219 | nouveau_debugfs_channel_init(chan); | |
220 | ||
cff5c133 | 221 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); |
6ee73861 BS |
222 | *chan_ret = chan; |
223 | return 0; | |
224 | } | |
225 | ||
feeb0aec FJ |
226 | struct nouveau_channel * |
227 | nouveau_channel_get_unlocked(struct nouveau_channel *ref) | |
228 | { | |
f091a3d4 | 229 | struct nouveau_channel *chan = NULL; |
feeb0aec | 230 | |
f091a3d4 FJ |
231 | if (likely(ref && atomic_inc_not_zero(&ref->users))) |
232 | nouveau_channel_ref(ref, &chan); | |
233 | ||
234 | return chan; | |
feeb0aec FJ |
235 | } |
236 | ||
cff5c133 BS |
237 | struct nouveau_channel * |
238 | nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) | |
239 | { | |
240 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
feeb0aec | 241 | struct nouveau_channel *chan; |
cff5c133 BS |
242 | unsigned long flags; |
243 | ||
244 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | |
feeb0aec FJ |
245 | chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); |
246 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | |
cff5c133 | 247 | |
feeb0aec | 248 | if (unlikely(!chan)) |
cff5c133 | 249 | return ERR_PTR(-EINVAL); |
cff5c133 | 250 | |
feeb0aec FJ |
251 | if (unlikely(file_priv && chan->file_priv != file_priv)) { |
252 | nouveau_channel_put_unlocked(&chan); | |
cff5c133 BS |
253 | return ERR_PTR(-EINVAL); |
254 | } | |
255 | ||
cff5c133 BS |
256 | mutex_lock(&chan->mutex); |
257 | return chan; | |
258 | } | |
259 | ||
6ee73861 | 260 | void |
feeb0aec | 261 | nouveau_channel_put_unlocked(struct nouveau_channel **pchan) |
6ee73861 | 262 | { |
cff5c133 | 263 | struct nouveau_channel *chan = *pchan; |
6ee73861 BS |
264 | struct drm_device *dev = chan->dev; |
265 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
6ee73861 | 266 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
cff5c133 | 267 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
bd2e597d | 268 | struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; |
6ee73861 | 269 | unsigned long flags; |
6ee73861 | 270 | |
cff5c133 | 271 | /* decrement the refcount, and we're done if there's still refs */ |
f091a3d4 FJ |
272 | if (likely(!atomic_dec_and_test(&chan->users))) { |
273 | nouveau_channel_ref(NULL, pchan); | |
cff5c133 BS |
274 | return; |
275 | } | |
6ee73861 | 276 | |
cff5c133 BS |
277 | /* noone wants the channel anymore */ |
278 | NV_DEBUG(dev, "freeing channel %d\n", chan->id); | |
6ee73861 BS |
279 | nouveau_debugfs_channel_fini(chan); |
280 | ||
cff5c133 | 281 | /* give it chance to idle */ |
6dccd311 | 282 | nouveau_channel_idle(chan); |
6ee73861 | 283 | |
cff5c133 | 284 | /* ensure all outstanding fences are signaled. they should be if the |
6ee73861 BS |
285 | * above attempts at idling were OK, but if we failed this'll tell TTM |
286 | * we're done with the buffers. | |
287 | */ | |
2730723b | 288 | nouveau_fence_channel_fini(chan); |
6ee73861 | 289 | |
cff5c133 | 290 | /* boot it off the hardware */ |
6ee73861 BS |
291 | pfifo->reassign(dev, false); |
292 | ||
3945e475 FJ |
293 | /* We want to give pgraph a chance to idle and get rid of all |
294 | * potential errors. We need to do this without the context | |
295 | * switch lock held, otherwise the irq handler is unable to | |
296 | * process them. | |
ff9e5279 MM |
297 | */ |
298 | if (pgraph->channel(dev) == chan) | |
299 | nouveau_wait_for_idle(dev); | |
300 | ||
3945e475 | 301 | /* destroy the engine specific contexts */ |
6ee73861 | 302 | pfifo->destroy_context(chan); |
3945e475 | 303 | pgraph->destroy_context(chan); |
bd2e597d BS |
304 | if (pcrypt->destroy_context) |
305 | pcrypt->destroy_context(chan); | |
6ee73861 BS |
306 | |
307 | pfifo->reassign(dev, true); | |
308 | ||
cff5c133 BS |
309 | /* aside from its resources, the channel should now be dead, |
310 | * remove it from the channel list | |
311 | */ | |
312 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | |
f091a3d4 | 313 | nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); |
cff5c133 BS |
314 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); |
315 | ||
316 | /* destroy any resources the channel owned */ | |
a8eaebc6 | 317 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
6ee73861 | 318 | if (chan->pushbuf_bo) { |
9d59e8a1 | 319 | nouveau_bo_unmap(chan->pushbuf_bo); |
6ee73861 BS |
320 | nouveau_bo_unpin(chan->pushbuf_bo); |
321 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | |
322 | } | |
323 | nouveau_gpuobj_channel_takedown(chan); | |
324 | nouveau_notifier_takedown_channel(chan); | |
6ee73861 | 325 | |
f091a3d4 | 326 | nouveau_channel_ref(NULL, pchan); |
6ee73861 BS |
327 | } |
328 | ||
feeb0aec FJ |
329 | void |
330 | nouveau_channel_put(struct nouveau_channel **pchan) | |
331 | { | |
332 | mutex_unlock(&(*pchan)->mutex); | |
333 | nouveau_channel_put_unlocked(pchan); | |
334 | } | |
335 | ||
f091a3d4 FJ |
336 | static void |
337 | nouveau_channel_del(struct kref *ref) | |
338 | { | |
339 | struct nouveau_channel *chan = | |
340 | container_of(ref, struct nouveau_channel, ref); | |
341 | ||
f091a3d4 FJ |
342 | kfree(chan); |
343 | } | |
344 | ||
345 | void | |
346 | nouveau_channel_ref(struct nouveau_channel *chan, | |
347 | struct nouveau_channel **pchan) | |
348 | { | |
349 | if (chan) | |
350 | kref_get(&chan->ref); | |
351 | ||
352 | if (*pchan) | |
353 | kref_put(&(*pchan)->ref, nouveau_channel_del); | |
354 | ||
355 | *pchan = chan; | |
356 | } | |
357 | ||
6dccd311 FJ |
358 | void |
359 | nouveau_channel_idle(struct nouveau_channel *chan) | |
360 | { | |
361 | struct drm_device *dev = chan->dev; | |
362 | struct nouveau_fence *fence = NULL; | |
363 | int ret; | |
364 | ||
365 | nouveau_fence_update(chan); | |
366 | ||
367 | if (chan->fence.sequence != chan->fence.sequence_ack) { | |
368 | ret = nouveau_fence_new(chan, &fence, true); | |
369 | if (!ret) { | |
370 | ret = nouveau_fence_wait(fence, false, false); | |
371 | nouveau_fence_unref(&fence); | |
372 | } | |
373 | ||
374 | if (ret) | |
375 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | |
376 | } | |
377 | } | |
378 | ||
6ee73861 BS |
379 | /* cleans up all the fifos from file_priv */ |
380 | void | |
381 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | |
382 | { | |
383 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
384 | struct nouveau_engine *engine = &dev_priv->engine; | |
cff5c133 | 385 | struct nouveau_channel *chan; |
6ee73861 BS |
386 | int i; |
387 | ||
388 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | |
389 | for (i = 0; i < engine->fifo.channels; i++) { | |
cff5c133 BS |
390 | chan = nouveau_channel_get(dev, file_priv, i); |
391 | if (IS_ERR(chan)) | |
392 | continue; | |
6ee73861 | 393 | |
f091a3d4 | 394 | atomic_dec(&chan->users); |
cff5c133 | 395 | nouveau_channel_put(&chan); |
6ee73861 BS |
396 | } |
397 | } | |
398 | ||
6ee73861 BS |
399 | |
400 | /*********************************** | |
401 | * ioctls wrapping the functions | |
402 | ***********************************/ | |
403 | ||
404 | static int | |
405 | nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |
406 | struct drm_file *file_priv) | |
407 | { | |
408 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
409 | struct drm_nouveau_channel_alloc *init = data; | |
410 | struct nouveau_channel *chan; | |
411 | int ret; | |
412 | ||
6ee73861 BS |
413 | if (dev_priv->engine.graph.accel_blocked) |
414 | return -ENODEV; | |
415 | ||
416 | if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) | |
417 | return -EINVAL; | |
418 | ||
419 | ret = nouveau_channel_alloc(dev, &chan, file_priv, | |
420 | init->fb_ctxdma_handle, | |
421 | init->tt_ctxdma_handle); | |
422 | if (ret) | |
423 | return ret; | |
424 | init->channel = chan->id; | |
425 | ||
a1606a95 BS |
426 | if (chan->dma.ib_max) |
427 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | | |
428 | NOUVEAU_GEM_DOMAIN_GART; | |
429 | else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) | |
430 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; | |
431 | else | |
432 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | |
433 | ||
6ee73861 BS |
434 | init->subchan[0].handle = NvM2MF; |
435 | if (dev_priv->card_type < NV_50) | |
436 | init->subchan[0].grclass = 0x0039; | |
437 | else | |
438 | init->subchan[0].grclass = 0x5039; | |
f03a314b FJ |
439 | init->subchan[1].handle = NvSw; |
440 | init->subchan[1].grclass = NV_SW; | |
441 | init->nr_subchan = 2; | |
6ee73861 BS |
442 | |
443 | /* Named memory object area */ | |
444 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | |
445 | &init->notifier_handle); | |
6ee73861 | 446 | |
cff5c133 | 447 | if (ret == 0) |
f091a3d4 | 448 | atomic_inc(&chan->users); /* userspace reference */ |
cff5c133 BS |
449 | nouveau_channel_put(&chan); |
450 | return ret; | |
6ee73861 BS |
451 | } |
452 | ||
453 | static int | |
454 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | |
455 | struct drm_file *file_priv) | |
456 | { | |
cff5c133 | 457 | struct drm_nouveau_channel_free *req = data; |
6ee73861 BS |
458 | struct nouveau_channel *chan; |
459 | ||
cff5c133 BS |
460 | chan = nouveau_channel_get(dev, file_priv, req->channel); |
461 | if (IS_ERR(chan)) | |
462 | return PTR_ERR(chan); | |
6ee73861 | 463 | |
f091a3d4 | 464 | atomic_dec(&chan->users); |
cff5c133 | 465 | nouveau_channel_put(&chan); |
6ee73861 BS |
466 | return 0; |
467 | } | |
468 | ||
469 | /*********************************** | |
470 | * finally, the ioctl table | |
471 | ***********************************/ | |
472 | ||
473 | struct drm_ioctl_desc nouveau_ioctls[] = { | |
b12120a5 BS |
474 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), |
475 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
476 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), | |
477 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), | |
478 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), | |
479 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), | |
480 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), | |
481 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), | |
482 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), | |
483 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), | |
484 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), | |
485 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), | |
6ee73861 BS |
486 | }; |
487 | ||
488 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); |