Merge branch 'for-3.11-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux-2.6-block.git] / drivers / gpu / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <drm/drmP.h>
37 #include <linux/export.h>
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #include <linux/slab.h>
41 #endif
42
43 static void drm_vm_open(struct vm_area_struct *vma);
44 static void drm_vm_close(struct vm_area_struct *vma);
45
46 static pgprot_t drm_io_prot(struct drm_local_map *map,
47                             struct vm_area_struct *vma)
48 {
49         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
50
51 #if defined(__i386__) || defined(__x86_64__)
52         if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
53                 tmp = pgprot_noncached(tmp);
54         else
55                 tmp = pgprot_writecombine(tmp);
56 #elif defined(__powerpc__)
57         pgprot_val(tmp) |= _PAGE_NO_CACHE;
58         if (map->type == _DRM_REGISTERS)
59                 pgprot_val(tmp) |= _PAGE_GUARDED;
60 #elif defined(__ia64__)
61         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
62                                     vma->vm_start))
63                 tmp = pgprot_writecombine(tmp);
64         else
65                 tmp = pgprot_noncached(tmp);
66 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
67         tmp = pgprot_noncached(tmp);
68 #endif
69         return tmp;
70 }
71
72 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
73 {
74         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
75
76 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
77         tmp |= _PAGE_NO_CACHE;
78 #endif
79         return tmp;
80 }
81
82 /**
83  * \c fault method for AGP virtual memory.
84  *
85  * \param vma virtual memory area.
86  * \param address access address.
87  * \return pointer to the page structure.
88  *
89  * Find the right map and if it's AGP memory find the real physical page to
90  * map, get the page, increment the use count and return it.
91  */
92 #if __OS_HAS_AGP
93 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
94 {
95         struct drm_file *priv = vma->vm_file->private_data;
96         struct drm_device *dev = priv->minor->dev;
97         struct drm_local_map *map = NULL;
98         struct drm_map_list *r_list;
99         struct drm_hash_item *hash;
100
101         /*
102          * Find the right map
103          */
104         if (!drm_core_has_AGP(dev))
105                 goto vm_fault_error;
106
107         if (!dev->agp || !dev->agp->cant_use_aperture)
108                 goto vm_fault_error;
109
110         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
111                 goto vm_fault_error;
112
113         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
114         map = r_list->map;
115
116         if (map && map->type == _DRM_AGP) {
117                 /*
118                  * Using vm_pgoff as a selector forces us to use this unusual
119                  * addressing scheme.
120                  */
121                 resource_size_t offset = (unsigned long)vmf->virtual_address -
122                         vma->vm_start;
123                 resource_size_t baddr = map->offset + offset;
124                 struct drm_agp_mem *agpmem;
125                 struct page *page;
126
127 #ifdef __alpha__
128                 /*
129                  * Adjust to a bus-relative address
130                  */
131                 baddr -= dev->hose->mem_space->start;
132 #endif
133
134                 /*
135                  * It's AGP memory - find the real physical page to map
136                  */
137                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
138                         if (agpmem->bound <= baddr &&
139                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
140                                 break;
141                 }
142
143                 if (&agpmem->head == &dev->agp->memory)
144                         goto vm_fault_error;
145
146                 /*
147                  * Get the page, inc the use count, and return it
148                  */
149                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
150                 page = agpmem->memory->pages[offset];
151                 get_page(page);
152                 vmf->page = page;
153
154                 DRM_DEBUG
155                     ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
156                      (unsigned long long)baddr,
157                      agpmem->memory->pages[offset],
158                      (unsigned long long)offset,
159                      page_count(page));
160                 return 0;
161         }
162 vm_fault_error:
163         return VM_FAULT_SIGBUS; /* Disallow mremap */
164 }
165 #else                           /* __OS_HAS_AGP */
166 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
167 {
168         return VM_FAULT_SIGBUS;
169 }
170 #endif                          /* __OS_HAS_AGP */
171
172 /**
173  * \c nopage method for shared virtual memory.
174  *
175  * \param vma virtual memory area.
176  * \param address access address.
177  * \return pointer to the page structure.
178  *
179  * Get the mapping, find the real physical page to map, get the page, and
180  * return it.
181  */
182 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
183 {
184         struct drm_local_map *map = vma->vm_private_data;
185         unsigned long offset;
186         unsigned long i;
187         struct page *page;
188
189         if (!map)
190                 return VM_FAULT_SIGBUS; /* Nothing allocated */
191
192         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
193         i = (unsigned long)map->handle + offset;
194         page = vmalloc_to_page((void *)i);
195         if (!page)
196                 return VM_FAULT_SIGBUS;
197         get_page(page);
198         vmf->page = page;
199
200         DRM_DEBUG("shm_fault 0x%lx\n", offset);
201         return 0;
202 }
203
204 /**
205  * \c close method for shared virtual memory.
206  *
207  * \param vma virtual memory area.
208  *
209  * Deletes map information if we are the last
210  * person to close a mapping and it's not in the global maplist.
211  */
212 static void drm_vm_shm_close(struct vm_area_struct *vma)
213 {
214         struct drm_file *priv = vma->vm_file->private_data;
215         struct drm_device *dev = priv->minor->dev;
216         struct drm_vma_entry *pt, *temp;
217         struct drm_local_map *map;
218         struct drm_map_list *r_list;
219         int found_maps = 0;
220
221         DRM_DEBUG("0x%08lx,0x%08lx\n",
222                   vma->vm_start, vma->vm_end - vma->vm_start);
223         atomic_dec(&dev->vma_count);
224
225         map = vma->vm_private_data;
226
227         mutex_lock(&dev->struct_mutex);
228         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
229                 if (pt->vma->vm_private_data == map)
230                         found_maps++;
231                 if (pt->vma == vma) {
232                         list_del(&pt->head);
233                         kfree(pt);
234                 }
235         }
236
237         /* We were the only map that was found */
238         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
239                 /* Check to see if we are in the maplist, if we are not, then
240                  * we delete this mappings information.
241                  */
242                 found_maps = 0;
243                 list_for_each_entry(r_list, &dev->maplist, head) {
244                         if (r_list->map == map)
245                                 found_maps++;
246                 }
247
248                 if (!found_maps) {
249                         drm_dma_handle_t dmah;
250
251                         switch (map->type) {
252                         case _DRM_REGISTERS:
253                         case _DRM_FRAME_BUFFER:
254                                 if (drm_core_has_MTRR(dev))
255                                         arch_phys_wc_del(map->mtrr);
256                                 iounmap(map->handle);
257                                 break;
258                         case _DRM_SHM:
259                                 vfree(map->handle);
260                                 break;
261                         case _DRM_AGP:
262                         case _DRM_SCATTER_GATHER:
263                                 break;
264                         case _DRM_CONSISTENT:
265                                 dmah.vaddr = map->handle;
266                                 dmah.busaddr = map->offset;
267                                 dmah.size = map->size;
268                                 __drm_pci_free(dev, &dmah);
269                                 break;
270                         case _DRM_GEM:
271                                 DRM_ERROR("tried to rmmap GEM object\n");
272                                 break;
273                         }
274                         kfree(map);
275                 }
276         }
277         mutex_unlock(&dev->struct_mutex);
278 }
279
280 /**
281  * \c fault method for DMA virtual memory.
282  *
283  * \param vma virtual memory area.
284  * \param address access address.
285  * \return pointer to the page structure.
286  *
287  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
288  */
289 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
290 {
291         struct drm_file *priv = vma->vm_file->private_data;
292         struct drm_device *dev = priv->minor->dev;
293         struct drm_device_dma *dma = dev->dma;
294         unsigned long offset;
295         unsigned long page_nr;
296         struct page *page;
297
298         if (!dma)
299                 return VM_FAULT_SIGBUS; /* Error */
300         if (!dma->pagelist)
301                 return VM_FAULT_SIGBUS; /* Nothing allocated */
302
303         offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
304         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
305         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
306
307         get_page(page);
308         vmf->page = page;
309
310         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
311         return 0;
312 }
313
314 /**
315  * \c fault method for scatter-gather virtual memory.
316  *
317  * \param vma virtual memory area.
318  * \param address access address.
319  * \return pointer to the page structure.
320  *
321  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
322  */
323 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
324 {
325         struct drm_local_map *map = vma->vm_private_data;
326         struct drm_file *priv = vma->vm_file->private_data;
327         struct drm_device *dev = priv->minor->dev;
328         struct drm_sg_mem *entry = dev->sg;
329         unsigned long offset;
330         unsigned long map_offset;
331         unsigned long page_offset;
332         struct page *page;
333
334         if (!entry)
335                 return VM_FAULT_SIGBUS; /* Error */
336         if (!entry->pagelist)
337                 return VM_FAULT_SIGBUS; /* Nothing allocated */
338
339         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
340         map_offset = map->offset - (unsigned long)dev->sg->virtual;
341         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
342         page = entry->pagelist[page_offset];
343         get_page(page);
344         vmf->page = page;
345
346         return 0;
347 }
348
349 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
350 {
351         return drm_do_vm_fault(vma, vmf);
352 }
353
354 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
355 {
356         return drm_do_vm_shm_fault(vma, vmf);
357 }
358
359 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360 {
361         return drm_do_vm_dma_fault(vma, vmf);
362 }
363
364 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
365 {
366         return drm_do_vm_sg_fault(vma, vmf);
367 }
368
369 /** AGP virtual memory operations */
370 static const struct vm_operations_struct drm_vm_ops = {
371         .fault = drm_vm_fault,
372         .open = drm_vm_open,
373         .close = drm_vm_close,
374 };
375
376 /** Shared virtual memory operations */
377 static const struct vm_operations_struct drm_vm_shm_ops = {
378         .fault = drm_vm_shm_fault,
379         .open = drm_vm_open,
380         .close = drm_vm_shm_close,
381 };
382
383 /** DMA virtual memory operations */
384 static const struct vm_operations_struct drm_vm_dma_ops = {
385         .fault = drm_vm_dma_fault,
386         .open = drm_vm_open,
387         .close = drm_vm_close,
388 };
389
390 /** Scatter-gather virtual memory operations */
391 static const struct vm_operations_struct drm_vm_sg_ops = {
392         .fault = drm_vm_sg_fault,
393         .open = drm_vm_open,
394         .close = drm_vm_close,
395 };
396
397 /**
398  * \c open method for shared virtual memory.
399  *
400  * \param vma virtual memory area.
401  *
402  * Create a new drm_vma_entry structure as the \p vma private data entry and
403  * add it to drm_device::vmalist.
404  */
405 void drm_vm_open_locked(struct drm_device *dev,
406                 struct vm_area_struct *vma)
407 {
408         struct drm_vma_entry *vma_entry;
409
410         DRM_DEBUG("0x%08lx,0x%08lx\n",
411                   vma->vm_start, vma->vm_end - vma->vm_start);
412         atomic_inc(&dev->vma_count);
413
414         vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
415         if (vma_entry) {
416                 vma_entry->vma = vma;
417                 vma_entry->pid = current->pid;
418                 list_add(&vma_entry->head, &dev->vmalist);
419         }
420 }
421 EXPORT_SYMBOL_GPL(drm_vm_open_locked);
422
423 static void drm_vm_open(struct vm_area_struct *vma)
424 {
425         struct drm_file *priv = vma->vm_file->private_data;
426         struct drm_device *dev = priv->minor->dev;
427
428         mutex_lock(&dev->struct_mutex);
429         drm_vm_open_locked(dev, vma);
430         mutex_unlock(&dev->struct_mutex);
431 }
432
433 void drm_vm_close_locked(struct drm_device *dev,
434                 struct vm_area_struct *vma)
435 {
436         struct drm_vma_entry *pt, *temp;
437
438         DRM_DEBUG("0x%08lx,0x%08lx\n",
439                   vma->vm_start, vma->vm_end - vma->vm_start);
440         atomic_dec(&dev->vma_count);
441
442         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
443                 if (pt->vma == vma) {
444                         list_del(&pt->head);
445                         kfree(pt);
446                         break;
447                 }
448         }
449 }
450
451 /**
452  * \c close method for all virtual memory types.
453  *
454  * \param vma virtual memory area.
455  *
456  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
457  * free it.
458  */
459 static void drm_vm_close(struct vm_area_struct *vma)
460 {
461         struct drm_file *priv = vma->vm_file->private_data;
462         struct drm_device *dev = priv->minor->dev;
463
464         mutex_lock(&dev->struct_mutex);
465         drm_vm_close_locked(dev, vma);
466         mutex_unlock(&dev->struct_mutex);
467 }
468
469 /**
470  * mmap DMA memory.
471  *
472  * \param file_priv DRM file private.
473  * \param vma virtual memory area.
474  * \return zero on success or a negative number on failure.
475  *
476  * Sets the virtual memory area operations structure to vm_dma_ops, the file
477  * pointer, and calls vm_open().
478  */
479 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
480 {
481         struct drm_file *priv = filp->private_data;
482         struct drm_device *dev;
483         struct drm_device_dma *dma;
484         unsigned long length = vma->vm_end - vma->vm_start;
485
486         dev = priv->minor->dev;
487         dma = dev->dma;
488         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
489                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
490
491         /* Length must match exact page count */
492         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
493                 return -EINVAL;
494         }
495
496         if (!capable(CAP_SYS_ADMIN) &&
497             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
498                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
499 #if defined(__i386__) || defined(__x86_64__)
500                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
501 #else
502                 /* Ye gads this is ugly.  With more thought
503                    we could move this up higher and use
504                    `protection_map' instead.  */
505                 vma->vm_page_prot =
506                     __pgprot(pte_val
507                              (pte_wrprotect
508                               (__pte(pgprot_val(vma->vm_page_prot)))));
509 #endif
510         }
511
512         vma->vm_ops = &drm_vm_dma_ops;
513
514         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
515
516         drm_vm_open_locked(dev, vma);
517         return 0;
518 }
519
520 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
521 {
522 #ifdef __alpha__
523         return dev->hose->dense_mem_base;
524 #else
525         return 0;
526 #endif
527 }
528
529 /**
530  * mmap DMA memory.
531  *
532  * \param file_priv DRM file private.
533  * \param vma virtual memory area.
534  * \return zero on success or a negative number on failure.
535  *
536  * If the virtual memory area has no offset associated with it then it's a DMA
537  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
538  * checks that the restricted flag is not set, sets the virtual memory operations
539  * according to the mapping type and remaps the pages. Finally sets the file
540  * pointer and calls vm_open().
541  */
542 int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
543 {
544         struct drm_file *priv = filp->private_data;
545         struct drm_device *dev = priv->minor->dev;
546         struct drm_local_map *map = NULL;
547         resource_size_t offset = 0;
548         struct drm_hash_item *hash;
549
550         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
551                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
552
553         if (!priv->authenticated)
554                 return -EACCES;
555
556         /* We check for "dma". On Apple's UniNorth, it's valid to have
557          * the AGP mapped at physical address 0
558          * --BenH.
559          */
560         if (!vma->vm_pgoff
561 #if __OS_HAS_AGP
562             && (!dev->agp
563                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
564 #endif
565             )
566                 return drm_mmap_dma(filp, vma);
567
568         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
569                 DRM_ERROR("Could not find map\n");
570                 return -EINVAL;
571         }
572
573         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
574         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
575                 return -EPERM;
576
577         /* Check for valid size. */
578         if (map->size < vma->vm_end - vma->vm_start)
579                 return -EINVAL;
580
581         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
582                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
583 #if defined(__i386__) || defined(__x86_64__)
584                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
585 #else
586                 /* Ye gads this is ugly.  With more thought
587                    we could move this up higher and use
588                    `protection_map' instead.  */
589                 vma->vm_page_prot =
590                     __pgprot(pte_val
591                              (pte_wrprotect
592                               (__pte(pgprot_val(vma->vm_page_prot)))));
593 #endif
594         }
595
596         switch (map->type) {
597 #if !defined(__arm__)
598         case _DRM_AGP:
599                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
600                         /*
601                          * On some platforms we can't talk to bus dma address from the CPU, so for
602                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
603                          * pages and mappings in fault()
604                          */
605 #if defined(__powerpc__)
606                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
607 #endif
608                         vma->vm_ops = &drm_vm_ops;
609                         break;
610                 }
611                 /* fall through to _DRM_FRAME_BUFFER... */
612 #endif
613         case _DRM_FRAME_BUFFER:
614         case _DRM_REGISTERS:
615                 offset = drm_core_get_reg_ofs(dev);
616                 vma->vm_page_prot = drm_io_prot(map, vma);
617                 if (io_remap_pfn_range(vma, vma->vm_start,
618                                        (map->offset + offset) >> PAGE_SHIFT,
619                                        vma->vm_end - vma->vm_start,
620                                        vma->vm_page_prot))
621                         return -EAGAIN;
622                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
623                           " offset = 0x%llx\n",
624                           map->type,
625                           vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
626
627                 vma->vm_ops = &drm_vm_ops;
628                 break;
629         case _DRM_CONSISTENT:
630                 /* Consistent memory is really like shared memory. But
631                  * it's allocated in a different way, so avoid fault */
632                 if (remap_pfn_range(vma, vma->vm_start,
633                     page_to_pfn(virt_to_page(map->handle)),
634                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
635                         return -EAGAIN;
636                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
637         /* fall through to _DRM_SHM */
638         case _DRM_SHM:
639                 vma->vm_ops = &drm_vm_shm_ops;
640                 vma->vm_private_data = (void *)map;
641                 break;
642         case _DRM_SCATTER_GATHER:
643                 vma->vm_ops = &drm_vm_sg_ops;
644                 vma->vm_private_data = (void *)map;
645                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
646                 break;
647         default:
648                 return -EINVAL; /* This should never happen. */
649         }
650         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
651
652         drm_vm_open_locked(dev, vma);
653         return 0;
654 }
655
656 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
657 {
658         struct drm_file *priv = filp->private_data;
659         struct drm_device *dev = priv->minor->dev;
660         int ret;
661
662         if (drm_device_is_unplugged(dev))
663                 return -ENODEV;
664
665         mutex_lock(&dev->struct_mutex);
666         ret = drm_mmap_locked(filp, vma);
667         mutex_unlock(&dev->struct_mutex);
668
669         return ret;
670 }
671 EXPORT_SYMBOL(drm_mmap);