Commit | Line | Data |
---|---|---|
bcb877e4 | 1 | /* |
1da177e4 LT |
2 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
3 | * \author Daryll Strauss <daryll@valinux.com> | |
4 | * \author Gareth Hughes <gareth@valinux.com> | |
5 | */ | |
6 | ||
7 | /* | |
8 | * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com | |
9 | * | |
10 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | |
11 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |
12 | * All Rights Reserved. | |
13 | * | |
14 | * Permission is hereby granted, free of charge, to any person obtaining a | |
15 | * copy of this software and associated documentation files (the "Software"), | |
16 | * to deal in the Software without restriction, including without limitation | |
17 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
18 | * and/or sell copies of the Software, and to permit persons to whom the | |
19 | * Software is furnished to do so, subject to the following conditions: | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the next | |
22 | * paragraph) shall be included in all copies or substantial portions of the | |
23 | * Software. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
26 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
27 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
28 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
29 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
30 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
31 | * OTHER DEALINGS IN THE SOFTWARE. | |
32 | */ | |
33 | ||
4748aa16 | 34 | #include <linux/anon_inodes.h> |
0500c04e | 35 | #include <linux/dma-fence.h> |
4748aa16 | 36 | #include <linux/file.h> |
0500c04e SR |
37 | #include <linux/module.h> |
38 | #include <linux/pci.h> | |
1da177e4 | 39 | #include <linux/poll.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
a8f8b1d9 | 41 | |
c76f0f7c | 42 | #include <drm/drm_client.h> |
0500c04e | 43 | #include <drm/drm_drv.h> |
a8f8b1d9 | 44 | #include <drm/drm_file.h> |
0500c04e | 45 | #include <drm/drm_print.h> |
a8f8b1d9 | 46 | |
81065548 | 47 | #include "drm_crtc_internal.h" |
0500c04e SR |
48 | #include "drm_internal.h" |
49 | #include "drm_legacy.h" | |
1da177e4 | 50 | |
b1823416 THV |
51 | #if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
52 | #include <uapi/asm/mman.h> | |
53 | #include <drm/drm_vma_manager.h> | |
54 | #endif | |
55 | ||
0d639883 | 56 | /* from BKL pushdown */ |
58374713 AB |
57 | DEFINE_MUTEX(drm_global_mutex); |
58 | ||
4017ad7b DV |
59 | bool drm_dev_needs_global_mutex(struct drm_device *dev) |
60 | { | |
61 | /* | |
62 | * Legacy drivers rely on all kinds of BKL locking semantics, don't | |
63 | * bother. They also still need BKL locking for their ioctls, so better | |
64 | * safe than sorry. | |
65 | */ | |
66 | if (drm_core_check_feature(dev, DRIVER_LEGACY)) | |
67 | return true; | |
68 | ||
69 | /* | |
70 | * The deprecated ->load callback must be called after the driver is | |
71 | * already registered. This means such drivers rely on the BKL to make | |
72 | * sure an open can't proceed until the driver is actually fully set up. | |
73 | * Similar hilarity holds for the unload callback. | |
74 | */ | |
75 | if (dev->driver->load || dev->driver->unload) | |
76 | return true; | |
77 | ||
78 | /* | |
79 | * Drivers with the lastclose callback assume that it's synchronized | |
80 | * against concurrent opens, which again needs the BKL. The proper fix | |
81 | * is to use the drm_client infrastructure with proper locking for each | |
82 | * client. | |
83 | */ | |
84 | if (dev->driver->lastclose) | |
85 | return true; | |
86 | ||
87 | return false; | |
88 | } | |
89 | ||
bcb877e4 DV |
90 | /** |
91 | * DOC: file operations | |
92 | * | |
93 | * Drivers must define the file operations structure that forms the DRM | |
94 | * userspace API entry point, even though most of those operations are | |
b93658f8 DV |
95 | * implemented in the DRM core. The resulting &struct file_operations must be |
96 | * stored in the &drm_driver.fops field. The mandatory functions are drm_open(), | |
55edf41b | 97 | * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled |
b93658f8 DV |
98 | * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no |
99 | * need to sprinkle #ifdef into the code. Drivers which implement private ioctls | |
100 | * that require 32/64 bit compatibility support must provide their own | |
101 | * &file_operations.compat_ioctl handler that processes private ioctls and calls | |
102 | * drm_compat_ioctl() for core ioctls. | |
bcb877e4 DV |
103 | * |
104 | * In addition drm_read() and drm_poll() provide support for DRM events. DRM | |
105 | * events are a generic and extensible means to send asynchronous events to | |
106 | * userspace through the file descriptor. They are used to send vblank event and | |
107 | * page flip completions by the KMS API. But drivers can also use it for their | |
108 | * own needs, e.g. to signal completion of rendering. | |
109 | * | |
b93658f8 DV |
110 | * For the driver-side event interface see drm_event_reserve_init() and |
111 | * drm_send_event() as the main starting points. | |
112 | * | |
bcb877e4 DV |
113 | * The memory mapping implementation will vary depending on how the driver |
114 | * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() | |
115 | * function, modern drivers should use one of the provided memory-manager | |
b93658f8 DV |
116 | * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and |
117 | * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap(). | |
bcb877e4 DV |
118 | * |
119 | * No other file operations are supported by the DRM userspace API. Overall the | |
bb2eaba6 | 120 | * following is an example &file_operations structure:: |
bcb877e4 DV |
121 | * |
122 | * static const example_drm_fops = { | |
123 | * .owner = THIS_MODULE, | |
124 | * .open = drm_open, | |
125 | * .release = drm_release, | |
126 | * .unlocked_ioctl = drm_ioctl, | |
55edf41b | 127 | * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n |
bcb877e4 DV |
128 | * .poll = drm_poll, |
129 | * .read = drm_read, | |
130 | * .llseek = no_llseek, | |
131 | * .mmap = drm_gem_mmap, | |
132 | * }; | |
b93658f8 | 133 | * |
f42e1819 DV |
134 | * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for |
135 | * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this | |
136 | * simpler. | |
bb2eaba6 DV |
137 | * |
138 | * The driver's &file_operations must be stored in &drm_driver.fops. | |
139 | * | |
140 | * For driver-private IOCTL handling see the more detailed discussion in | |
141 | * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`. | |
bcb877e4 DV |
142 | */ |
143 | ||
1572042a DH |
144 | /** |
145 | * drm_file_alloc - allocate file context | |
146 | * @minor: minor to allocate on | |
147 | * | |
148 | * This allocates a new DRM file context. It is not linked into any context and | |
149 | * can be used by the caller freely. Note that the context keeps a pointer to | |
150 | * @minor, so it must be freed before @minor is. | |
151 | * | |
152 | * RETURNS: | |
153 | * Pointer to newly allocated context, ERR_PTR on failure. | |
154 | */ | |
155 | struct drm_file *drm_file_alloc(struct drm_minor *minor) | |
156 | { | |
157 | struct drm_device *dev = minor->dev; | |
158 | struct drm_file *file; | |
159 | int ret; | |
160 | ||
161 | file = kzalloc(sizeof(*file), GFP_KERNEL); | |
162 | if (!file) | |
163 | return ERR_PTR(-ENOMEM); | |
164 | ||
165 | file->pid = get_pid(task_pid(current)); | |
166 | file->minor = minor; | |
167 | ||
168 | /* for compatibility root is always authenticated */ | |
169 | file->authenticated = capable(CAP_SYS_ADMIN); | |
1572042a DH |
170 | |
171 | INIT_LIST_HEAD(&file->lhead); | |
172 | INIT_LIST_HEAD(&file->fbs); | |
173 | mutex_init(&file->fbs_lock); | |
174 | INIT_LIST_HEAD(&file->blobs); | |
175 | INIT_LIST_HEAD(&file->pending_event_list); | |
176 | INIT_LIST_HEAD(&file->event_list); | |
177 | init_waitqueue_head(&file->event_wait); | |
178 | file->event_space = 4096; /* set aside 4k for event buffer */ | |
179 | ||
180 | mutex_init(&file->event_read_lock); | |
181 | ||
182 | if (drm_core_check_feature(dev, DRIVER_GEM)) | |
183 | drm_gem_open(dev, file); | |
184 | ||
185 | if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) | |
186 | drm_syncobj_open(file); | |
187 | ||
ae75f836 | 188 | drm_prime_init_file_private(&file->prime); |
1572042a DH |
189 | |
190 | if (dev->driver->open) { | |
191 | ret = dev->driver->open(dev, file); | |
192 | if (ret < 0) | |
193 | goto out_prime_destroy; | |
194 | } | |
195 | ||
1572042a DH |
196 | return file; |
197 | ||
1572042a | 198 | out_prime_destroy: |
ae75f836 | 199 | drm_prime_destroy_file_private(&file->prime); |
1572042a DH |
200 | if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) |
201 | drm_syncobj_release(file); | |
202 | if (drm_core_check_feature(dev, DRIVER_GEM)) | |
203 | drm_gem_release(dev, file); | |
204 | put_pid(file->pid); | |
205 | kfree(file); | |
206 | ||
207 | return ERR_PTR(ret); | |
208 | } | |
209 | ||
210 | static void drm_events_release(struct drm_file *file_priv) | |
211 | { | |
212 | struct drm_device *dev = file_priv->minor->dev; | |
213 | struct drm_pending_event *e, *et; | |
214 | unsigned long flags; | |
215 | ||
216 | spin_lock_irqsave(&dev->event_lock, flags); | |
217 | ||
218 | /* Unlink pending events */ | |
219 | list_for_each_entry_safe(e, et, &file_priv->pending_event_list, | |
220 | pending_link) { | |
221 | list_del(&e->pending_link); | |
222 | e->file_priv = NULL; | |
223 | } | |
224 | ||
225 | /* Remove unconsumed events */ | |
226 | list_for_each_entry_safe(e, et, &file_priv->event_list, link) { | |
227 | list_del(&e->link); | |
228 | kfree(e); | |
229 | } | |
230 | ||
231 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
232 | } | |
233 | ||
234 | /** | |
235 | * drm_file_free - free file context | |
236 | * @file: context to free, or NULL | |
237 | * | |
238 | * This destroys and deallocates a DRM file context previously allocated via | |
239 | * drm_file_alloc(). The caller must make sure to unlink it from any contexts | |
240 | * before calling this. | |
241 | * | |
242 | * If NULL is passed, this is a no-op. | |
243 | * | |
244 | * RETURNS: | |
245 | * 0 on success, or error code on failure. | |
246 | */ | |
247 | void drm_file_free(struct drm_file *file) | |
248 | { | |
249 | struct drm_device *dev; | |
250 | ||
251 | if (!file) | |
252 | return; | |
253 | ||
254 | dev = file->minor->dev; | |
255 | ||
5a2ba992 EV |
256 | DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n", |
257 | current->comm, task_pid_nr(current), | |
1572042a | 258 | (long)old_encode_dev(file->minor->kdev->devt), |
7e13ad89 | 259 | atomic_read(&dev->open_count)); |
1572042a DH |
260 | |
261 | if (drm_core_check_feature(dev, DRIVER_LEGACY) && | |
262 | dev->driver->preclose) | |
263 | dev->driver->preclose(dev, file); | |
264 | ||
265 | if (drm_core_check_feature(dev, DRIVER_LEGACY)) | |
266 | drm_legacy_lock_release(dev, file->filp); | |
267 | ||
268 | if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | |
269 | drm_legacy_reclaim_buffers(dev, file); | |
270 | ||
271 | drm_events_release(file); | |
272 | ||
273 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
274 | drm_fb_release(file); | |
275 | drm_property_destroy_user_blobs(dev, file); | |
276 | } | |
277 | ||
278 | if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) | |
279 | drm_syncobj_release(file); | |
280 | ||
281 | if (drm_core_check_feature(dev, DRIVER_GEM)) | |
282 | drm_gem_release(dev, file); | |
283 | ||
284 | drm_legacy_ctxbitmap_flush(dev, file); | |
285 | ||
286 | if (drm_is_primary_client(file)) | |
287 | drm_master_release(file); | |
288 | ||
289 | if (dev->driver->postclose) | |
290 | dev->driver->postclose(dev, file); | |
291 | ||
ae75f836 | 292 | drm_prime_destroy_file_private(&file->prime); |
1572042a DH |
293 | |
294 | WARN_ON(!list_empty(&file->event_list)); | |
295 | ||
296 | put_pid(file->pid); | |
297 | kfree(file); | |
298 | } | |
299 | ||
e21710a8 EV |
300 | static void drm_close_helper(struct file *filp) |
301 | { | |
302 | struct drm_file *file_priv = filp->private_data; | |
303 | struct drm_device *dev = file_priv->minor->dev; | |
304 | ||
305 | mutex_lock(&dev->filelist_mutex); | |
306 | list_del(&file_priv->lhead); | |
307 | mutex_unlock(&dev->filelist_mutex); | |
308 | ||
309 | drm_file_free(file_priv); | |
310 | } | |
311 | ||
bcb877e4 | 312 | /* |
d985c108 DA |
313 | * Check whether DRI will run on this CPU. |
314 | * | |
315 | * \return non-zero if the DRI will run on this CPU, or zero otherwise. | |
316 | */ | |
317 | static int drm_cpu_valid(void) | |
318 | { | |
d985c108 DA |
319 | #if defined(__sparc__) && !defined(__sparc_v9__) |
320 | return 0; /* No cmpxchg before v9 sparc. */ | |
321 | #endif | |
322 | return 1; | |
323 | } | |
324 | ||
bcb877e4 | 325 | /* |
85dce7ff | 326 | * Called whenever a process opens a drm node |
d985c108 | 327 | * |
d985c108 | 328 | * \param filp file pointer. |
f4aede2e | 329 | * \param minor acquired minor-object. |
d985c108 DA |
330 | * \return zero on success or a negative number on failure. |
331 | * | |
332 | * Creates and initializes a drm_file structure for the file private data in \p | |
333 | * filp and add it into the double linked list in \p dev. | |
334 | */ | |
1dcc0ceb | 335 | static int drm_open_helper(struct file *filp, struct drm_minor *minor) |
d985c108 | 336 | { |
f4aede2e | 337 | struct drm_device *dev = minor->dev; |
84b1fd10 | 338 | struct drm_file *priv; |
7eeaeb90 | 339 | int ret; |
d985c108 DA |
340 | |
341 | if (filp->f_flags & O_EXCL) | |
342 | return -EBUSY; /* No exclusive opens */ | |
343 | if (!drm_cpu_valid()) | |
344 | return -EINVAL; | |
17ee1eb6 EV |
345 | if (dev->switch_power_state != DRM_SWITCH_POWER_ON && |
346 | dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) | |
5bcf719b | 347 | return -EINVAL; |
d985c108 | 348 | |
5a2ba992 EV |
349 | DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm, |
350 | task_pid_nr(current), minor->index); | |
d985c108 | 351 | |
1572042a DH |
352 | priv = drm_file_alloc(minor); |
353 | if (IS_ERR(priv)) | |
354 | return PTR_ERR(priv); | |
d985c108 | 355 | |
7eeaeb90 NT |
356 | if (drm_is_primary_client(priv)) { |
357 | ret = drm_master_open(priv); | |
358 | if (ret) { | |
359 | drm_file_free(priv); | |
360 | return ret; | |
361 | } | |
362 | } | |
363 | ||
d985c108 | 364 | filp->private_data = priv; |
76ef6b28 | 365 | filp->f_mode |= FMODE_UNSIGNED_OFFSET; |
6c340eac | 366 | priv->filp = filp; |
bd1b331f | 367 | |
1d2ac403 | 368 | mutex_lock(&dev->filelist_mutex); |
bd1b331f | 369 | list_add(&priv->lhead, &dev->filelist); |
1d2ac403 | 370 | mutex_unlock(&dev->filelist_mutex); |
d985c108 DA |
371 | |
372 | #ifdef __alpha__ | |
373 | /* | |
374 | * Default the hose | |
375 | */ | |
376 | if (!dev->hose) { | |
377 | struct pci_dev *pci_dev; | |
948de842 | 378 | |
d985c108 DA |
379 | pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); |
380 | if (pci_dev) { | |
381 | dev->hose = pci_dev->sysdata; | |
382 | pci_dev_put(pci_dev); | |
383 | } | |
384 | if (!dev->hose) { | |
59c1ad3b YW |
385 | struct pci_bus *b = list_entry(pci_root_buses.next, |
386 | struct pci_bus, node); | |
d985c108 DA |
387 | if (b) |
388 | dev->hose = b->sysdata; | |
389 | } | |
390 | } | |
391 | #endif | |
392 | ||
393 | return 0; | |
c9a9c5e0 KH |
394 | } |
395 | ||
094aa54f DV |
396 | /** |
397 | * drm_open - open method for DRM file | |
398 | * @inode: device inode | |
399 | * @filp: file pointer. | |
400 | * | |
401 | * This function must be used by drivers as their &file_operations.open method. | |
402 | * It looks up the correct DRM device and instantiates all the per-file | |
403 | * resources for it. It also calls the &drm_driver.open driver callback. | |
404 | * | |
405 | * RETURNS: | |
406 | * | |
407 | * 0 on success or negative errno value on falure. | |
408 | */ | |
409 | int drm_open(struct inode *inode, struct file *filp) | |
410 | { | |
411 | struct drm_device *dev; | |
412 | struct drm_minor *minor; | |
413 | int retcode; | |
414 | int need_setup = 0; | |
415 | ||
416 | minor = drm_minor_acquire(iminor(inode)); | |
417 | if (IS_ERR(minor)) | |
418 | return PTR_ERR(minor); | |
419 | ||
420 | dev = minor->dev; | |
4017ad7b DV |
421 | if (drm_dev_needs_global_mutex(dev)) |
422 | mutex_lock(&drm_global_mutex); | |
423 | ||
7e13ad89 | 424 | if (!atomic_fetch_inc(&dev->open_count)) |
094aa54f DV |
425 | need_setup = 1; |
426 | ||
427 | /* share address_space across all char-devs of a single device */ | |
428 | filp->f_mapping = dev->anon_inode->i_mapping; | |
429 | ||
430 | retcode = drm_open_helper(filp, minor); | |
431 | if (retcode) | |
432 | goto err_undo; | |
433 | if (need_setup) { | |
434 | retcode = drm_legacy_setup(dev); | |
435 | if (retcode) { | |
436 | drm_close_helper(filp); | |
437 | goto err_undo; | |
438 | } | |
439 | } | |
591a2abf | 440 | |
4017ad7b DV |
441 | if (drm_dev_needs_global_mutex(dev)) |
442 | mutex_unlock(&drm_global_mutex); | |
591a2abf | 443 | |
094aa54f DV |
444 | return 0; |
445 | ||
446 | err_undo: | |
7e13ad89 | 447 | atomic_dec(&dev->open_count); |
4017ad7b DV |
448 | if (drm_dev_needs_global_mutex(dev)) |
449 | mutex_unlock(&drm_global_mutex); | |
094aa54f DV |
450 | drm_minor_release(minor); |
451 | return retcode; | |
452 | } | |
453 | EXPORT_SYMBOL(drm_open); | |
454 | ||
68dfbeba | 455 | void drm_lastclose(struct drm_device * dev) |
1c8887dd | 456 | { |
1c8887dd DH |
457 | DRM_DEBUG("\n"); |
458 | ||
459 | if (dev->driver->lastclose) | |
460 | dev->driver->lastclose(dev); | |
461 | DRM_DEBUG("driver lastclose completed\n"); | |
462 | ||
fa538645 | 463 | if (drm_core_check_feature(dev, DRIVER_LEGACY)) |
68dfbeba | 464 | drm_legacy_dev_reinit(dev); |
c76f0f7c NT |
465 | |
466 | drm_client_dev_restore(dev); | |
1c8887dd DH |
467 | } |
468 | ||
1da177e4 | 469 | /** |
bcb877e4 DV |
470 | * drm_release - release method for DRM file |
471 | * @inode: device inode | |
472 | * @filp: file pointer. | |
1da177e4 | 473 | * |
b93658f8 DV |
474 | * This function must be used by drivers as their &file_operations.release |
475 | * method. It frees any resources associated with the open file, and calls the | |
45c3d213 DV |
476 | * &drm_driver.postclose driver callback. If this is the last open file for the |
477 | * DRM device also proceeds to call the &drm_driver.lastclose driver callback. | |
1da177e4 | 478 | * |
bcb877e4 DV |
479 | * RETURNS: |
480 | * | |
481 | * Always succeeds and returns 0. | |
1da177e4 | 482 | */ |
b5e89ed5 | 483 | int drm_release(struct inode *inode, struct file *filp) |
1da177e4 | 484 | { |
6c340eac | 485 | struct drm_file *file_priv = filp->private_data; |
1616c525 DH |
486 | struct drm_minor *minor = file_priv->minor; |
487 | struct drm_device *dev = minor->dev; | |
1da177e4 | 488 | |
4017ad7b DV |
489 | if (drm_dev_needs_global_mutex(dev)) |
490 | mutex_lock(&drm_global_mutex); | |
1da177e4 | 491 | |
7e13ad89 | 492 | DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count)); |
1da177e4 | 493 | |
e21710a8 | 494 | drm_close_helper(filp); |
1da177e4 | 495 | |
7e13ad89 | 496 | if (atomic_dec_and_test(&dev->open_count)) |
68dfbeba | 497 | drm_lastclose(dev); |
1ee57d4d | 498 | |
4017ad7b DV |
499 | if (drm_dev_needs_global_mutex(dev)) |
500 | mutex_unlock(&drm_global_mutex); | |
1da177e4 | 501 | |
1616c525 DH |
502 | drm_minor_release(minor); |
503 | ||
68dfbeba | 504 | return 0; |
1da177e4 LT |
505 | } |
506 | EXPORT_SYMBOL(drm_release); | |
507 | ||
7a2c65dd CW |
508 | /** |
509 | * drm_release_noglobal - release method for DRM file | |
510 | * @inode: device inode | |
511 | * @filp: file pointer. | |
512 | * | |
513 | * This function may be used by drivers as their &file_operations.release | |
514 | * method. It frees any resources associated with the open file prior to taking | |
515 | * the drm_global_mutex, which then calls the &drm_driver.postclose driver | |
516 | * callback. If this is the last open file for the DRM device also proceeds to | |
517 | * call the &drm_driver.lastclose driver callback. | |
518 | * | |
519 | * RETURNS: | |
520 | * | |
521 | * Always succeeds and returns 0. | |
522 | */ | |
523 | int drm_release_noglobal(struct inode *inode, struct file *filp) | |
524 | { | |
525 | struct drm_file *file_priv = filp->private_data; | |
526 | struct drm_minor *minor = file_priv->minor; | |
527 | struct drm_device *dev = minor->dev; | |
528 | ||
529 | drm_close_helper(filp); | |
530 | ||
7e13ad89 | 531 | if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) { |
7a2c65dd | 532 | drm_lastclose(dev); |
7e13ad89 CW |
533 | mutex_unlock(&drm_global_mutex); |
534 | } | |
7a2c65dd CW |
535 | |
536 | drm_minor_release(minor); | |
537 | ||
538 | return 0; | |
539 | } | |
540 | EXPORT_SYMBOL(drm_release_noglobal); | |
541 | ||
bcb877e4 DV |
542 | /** |
543 | * drm_read - read method for DRM file | |
544 | * @filp: file pointer | |
545 | * @buffer: userspace destination pointer for the read | |
546 | * @count: count in bytes to read | |
547 | * @offset: offset to read | |
548 | * | |
b93658f8 | 549 | * This function must be used by drivers as their &file_operations.read |
bcb877e4 DV |
550 | * method iff they use DRM events for asynchronous signalling to userspace. |
551 | * Since events are used by the KMS API for vblank and page flip completion this | |
552 | * means all modern display drivers must use it. | |
553 | * | |
b93658f8 DV |
554 | * @offset is ignored, DRM events are read like a pipe. Therefore drivers also |
555 | * must set the &file_operation.llseek to no_llseek(). Polling support is | |
bcb877e4 DV |
556 | * provided by drm_poll(). |
557 | * | |
558 | * This function will only ever read a full event. Therefore userspace must | |
559 | * supply a big enough buffer to fit any event to ensure forward progress. Since | |
560 | * the maximum event space is currently 4K it's recommended to just use that for | |
561 | * safety. | |
562 | * | |
563 | * RETURNS: | |
564 | * | |
565 | * Number of bytes read (always aligned to full events, and can be 0) or a | |
566 | * negative error code on failure. | |
567 | */ | |
cdd1cf79 CW |
568 | ssize_t drm_read(struct file *filp, char __user *buffer, |
569 | size_t count, loff_t *offset) | |
c9a9c5e0 | 570 | { |
cdd1cf79 | 571 | struct drm_file *file_priv = filp->private_data; |
c9a9c5e0 | 572 | struct drm_device *dev = file_priv->minor->dev; |
9b2c0b7f | 573 | ssize_t ret; |
c9a9c5e0 | 574 | |
9b2c0b7f CW |
575 | ret = mutex_lock_interruptible(&file_priv->event_read_lock); |
576 | if (ret) | |
577 | return ret; | |
578 | ||
cdd1cf79 | 579 | for (;;) { |
83eb64c8 CW |
580 | struct drm_pending_event *e = NULL; |
581 | ||
582 | spin_lock_irq(&dev->event_lock); | |
583 | if (!list_empty(&file_priv->event_list)) { | |
584 | e = list_first_entry(&file_priv->event_list, | |
585 | struct drm_pending_event, link); | |
586 | file_priv->event_space += e->event->length; | |
587 | list_del(&e->link); | |
588 | } | |
589 | spin_unlock_irq(&dev->event_lock); | |
590 | ||
591 | if (e == NULL) { | |
cdd1cf79 CW |
592 | if (ret) |
593 | break; | |
c9a9c5e0 | 594 | |
cdd1cf79 CW |
595 | if (filp->f_flags & O_NONBLOCK) { |
596 | ret = -EAGAIN; | |
597 | break; | |
598 | } | |
c9a9c5e0 | 599 | |
9b2c0b7f | 600 | mutex_unlock(&file_priv->event_read_lock); |
cdd1cf79 CW |
601 | ret = wait_event_interruptible(file_priv->event_wait, |
602 | !list_empty(&file_priv->event_list)); | |
9b2c0b7f CW |
603 | if (ret >= 0) |
604 | ret = mutex_lock_interruptible(&file_priv->event_read_lock); | |
605 | if (ret) | |
606 | return ret; | |
cdd1cf79 | 607 | } else { |
83eb64c8 CW |
608 | unsigned length = e->event->length; |
609 | ||
610 | if (length > count - ret) { | |
611 | put_back_event: | |
612 | spin_lock_irq(&dev->event_lock); | |
613 | file_priv->event_space -= length; | |
614 | list_add(&e->link, &file_priv->event_list); | |
615 | spin_unlock_irq(&dev->event_lock); | |
87189b78 KL |
616 | wake_up_interruptible_poll(&file_priv->event_wait, |
617 | EPOLLIN | EPOLLRDNORM); | |
cdd1cf79 | 618 | break; |
83eb64c8 | 619 | } |
cdd1cf79 | 620 | |
83eb64c8 | 621 | if (copy_to_user(buffer + ret, e->event, length)) { |
cdd1cf79 CW |
622 | if (ret == 0) |
623 | ret = -EFAULT; | |
83eb64c8 | 624 | goto put_back_event; |
cdd1cf79 | 625 | } |
c9a9c5e0 | 626 | |
83eb64c8 | 627 | ret += length; |
1b47aaf9 | 628 | kfree(e); |
c9a9c5e0 | 629 | } |
c9a9c5e0 | 630 | } |
9b2c0b7f | 631 | mutex_unlock(&file_priv->event_read_lock); |
c9a9c5e0 | 632 | |
cdd1cf79 | 633 | return ret; |
c9a9c5e0 KH |
634 | } |
635 | EXPORT_SYMBOL(drm_read); | |
636 | ||
bcb877e4 DV |
637 | /** |
638 | * drm_poll - poll method for DRM file | |
639 | * @filp: file pointer | |
640 | * @wait: poll waiter table | |
641 | * | |
b93658f8 DV |
642 | * This function must be used by drivers as their &file_operations.read method |
643 | * iff they use DRM events for asynchronous signalling to userspace. Since | |
644 | * events are used by the KMS API for vblank and page flip completion this means | |
645 | * all modern display drivers must use it. | |
bcb877e4 DV |
646 | * |
647 | * See also drm_read(). | |
648 | * | |
649 | * RETURNS: | |
650 | * | |
651 | * Mask of POLL flags indicating the current status of the file. | |
652 | */ | |
afc9a42b | 653 | __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait) |
1da177e4 | 654 | { |
c9a9c5e0 | 655 | struct drm_file *file_priv = filp->private_data; |
afc9a42b | 656 | __poll_t mask = 0; |
c9a9c5e0 KH |
657 | |
658 | poll_wait(filp, &file_priv->event_wait, wait); | |
659 | ||
660 | if (!list_empty(&file_priv->event_list)) | |
a9a08845 | 661 | mask |= EPOLLIN | EPOLLRDNORM; |
c9a9c5e0 KH |
662 | |
663 | return mask; | |
1da177e4 | 664 | } |
b5e89ed5 | 665 | EXPORT_SYMBOL(drm_poll); |
2dd500f1 DV |
666 | |
667 | /** | |
4020b220 | 668 | * drm_event_reserve_init_locked - init a DRM event and reserve space for it |
2dd500f1 DV |
669 | * @dev: DRM device |
670 | * @file_priv: DRM file private data | |
671 | * @p: tracking structure for the pending event | |
672 | * @e: actual event data to deliver to userspace | |
673 | * | |
674 | * This function prepares the passed in event for eventual delivery. If the event | |
675 | * doesn't get delivered (because the IOCTL fails later on, before queuing up | |
676 | * anything) then the even must be cancelled and freed using | |
fb740cf2 DV |
677 | * drm_event_cancel_free(). Successfully initialized events should be sent out |
678 | * using drm_send_event() or drm_send_event_locked() to signal completion of the | |
679 | * asynchronous event to userspace. | |
2dd500f1 DV |
680 | * |
681 | * If callers embedded @p into a larger structure it must be allocated with | |
682 | * kmalloc and @p must be the first member element. | |
683 | * | |
4020b220 | 684 | * This is the locked version of drm_event_reserve_init() for callers which |
ef40cbf9 | 685 | * already hold &drm_device.event_lock. |
4020b220 | 686 | * |
2dd500f1 DV |
687 | * RETURNS: |
688 | * | |
689 | * 0 on success or a negative error code on failure. | |
690 | */ | |
4020b220 DV |
691 | int drm_event_reserve_init_locked(struct drm_device *dev, |
692 | struct drm_file *file_priv, | |
693 | struct drm_pending_event *p, | |
694 | struct drm_event *e) | |
2dd500f1 | 695 | { |
4020b220 DV |
696 | if (file_priv->event_space < e->length) |
697 | return -ENOMEM; | |
2dd500f1 DV |
698 | |
699 | file_priv->event_space -= e->length; | |
700 | ||
701 | p->event = e; | |
681047b4 | 702 | list_add(&p->pending_link, &file_priv->pending_event_list); |
2dd500f1 DV |
703 | p->file_priv = file_priv; |
704 | ||
4020b220 DV |
705 | return 0; |
706 | } | |
707 | EXPORT_SYMBOL(drm_event_reserve_init_locked); | |
708 | ||
709 | /** | |
710 | * drm_event_reserve_init - init a DRM event and reserve space for it | |
711 | * @dev: DRM device | |
712 | * @file_priv: DRM file private data | |
713 | * @p: tracking structure for the pending event | |
714 | * @e: actual event data to deliver to userspace | |
715 | * | |
716 | * This function prepares the passed in event for eventual delivery. If the event | |
717 | * doesn't get delivered (because the IOCTL fails later on, before queuing up | |
718 | * anything) then the even must be cancelled and freed using | |
719 | * drm_event_cancel_free(). Successfully initialized events should be sent out | |
720 | * using drm_send_event() or drm_send_event_locked() to signal completion of the | |
721 | * asynchronous event to userspace. | |
722 | * | |
723 | * If callers embedded @p into a larger structure it must be allocated with | |
724 | * kmalloc and @p must be the first member element. | |
725 | * | |
ef40cbf9 | 726 | * Callers which already hold &drm_device.event_lock should use |
20c9ca4f | 727 | * drm_event_reserve_init_locked() instead. |
4020b220 DV |
728 | * |
729 | * RETURNS: | |
730 | * | |
731 | * 0 on success or a negative error code on failure. | |
732 | */ | |
733 | int drm_event_reserve_init(struct drm_device *dev, | |
734 | struct drm_file *file_priv, | |
735 | struct drm_pending_event *p, | |
736 | struct drm_event *e) | |
737 | { | |
738 | unsigned long flags; | |
739 | int ret; | |
740 | ||
741 | spin_lock_irqsave(&dev->event_lock, flags); | |
742 | ret = drm_event_reserve_init_locked(dev, file_priv, p, e); | |
2dd500f1 | 743 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4020b220 | 744 | |
2dd500f1 DV |
745 | return ret; |
746 | } | |
747 | EXPORT_SYMBOL(drm_event_reserve_init); | |
748 | ||
749 | /** | |
1e55a53a | 750 | * drm_event_cancel_free - free a DRM event and release its space |
2dd500f1 DV |
751 | * @dev: DRM device |
752 | * @p: tracking structure for the pending event | |
753 | * | |
754 | * This function frees the event @p initialized with drm_event_reserve_init() | |
b93658f8 DV |
755 | * and releases any allocated space. It is used to cancel an event when the |
756 | * nonblocking operation could not be submitted and needed to be aborted. | |
2dd500f1 DV |
757 | */ |
758 | void drm_event_cancel_free(struct drm_device *dev, | |
759 | struct drm_pending_event *p) | |
760 | { | |
761 | unsigned long flags; | |
948de842 | 762 | |
2dd500f1 | 763 | spin_lock_irqsave(&dev->event_lock, flags); |
681047b4 DV |
764 | if (p->file_priv) { |
765 | p->file_priv->event_space += p->event->length; | |
766 | list_del(&p->pending_link); | |
767 | } | |
2dd500f1 | 768 | spin_unlock_irqrestore(&dev->event_lock, flags); |
838de39f GP |
769 | |
770 | if (p->fence) | |
f54d1867 | 771 | dma_fence_put(p->fence); |
838de39f | 772 | |
1b47aaf9 | 773 | kfree(p); |
2dd500f1 DV |
774 | } |
775 | EXPORT_SYMBOL(drm_event_cancel_free); | |
fb740cf2 DV |
776 | |
777 | /** | |
778 | * drm_send_event_locked - send DRM event to file descriptor | |
779 | * @dev: DRM device | |
780 | * @e: DRM event to deliver | |
781 | * | |
782 | * This function sends the event @e, initialized with drm_event_reserve_init(), | |
783 | * to its associated userspace DRM file. Callers must already hold | |
ef40cbf9 | 784 | * &drm_device.event_lock, see drm_send_event() for the unlocked version. |
681047b4 DV |
785 | * |
786 | * Note that the core will take care of unlinking and disarming events when the | |
787 | * corresponding DRM file is closed. Drivers need not worry about whether the | |
788 | * DRM file for this event still exists and can call this function upon | |
789 | * completion of the asynchronous work unconditionally. | |
fb740cf2 DV |
790 | */ |
791 | void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) | |
792 | { | |
793 | assert_spin_locked(&dev->event_lock); | |
794 | ||
3b24f7d6 | 795 | if (e->completion) { |
3b24f7d6 | 796 | complete_all(e->completion); |
24835e44 | 797 | e->completion_release(e->completion); |
3b24f7d6 DV |
798 | e->completion = NULL; |
799 | } | |
800 | ||
1b47aaf9 | 801 | if (e->fence) { |
f54d1867 CW |
802 | dma_fence_signal(e->fence); |
803 | dma_fence_put(e->fence); | |
1b47aaf9 GP |
804 | } |
805 | ||
681047b4 | 806 | if (!e->file_priv) { |
1b47aaf9 | 807 | kfree(e); |
681047b4 DV |
808 | return; |
809 | } | |
810 | ||
811 | list_del(&e->pending_link); | |
fb740cf2 DV |
812 | list_add_tail(&e->link, |
813 | &e->file_priv->event_list); | |
87189b78 KL |
814 | wake_up_interruptible_poll(&e->file_priv->event_wait, |
815 | EPOLLIN | EPOLLRDNORM); | |
fb740cf2 DV |
816 | } |
817 | EXPORT_SYMBOL(drm_send_event_locked); | |
818 | ||
819 | /** | |
820 | * drm_send_event - send DRM event to file descriptor | |
821 | * @dev: DRM device | |
822 | * @e: DRM event to deliver | |
823 | * | |
824 | * This function sends the event @e, initialized with drm_event_reserve_init(), | |
ef40cbf9 DV |
825 | * to its associated userspace DRM file. This function acquires |
826 | * &drm_device.event_lock, see drm_send_event_locked() for callers which already | |
827 | * hold this lock. | |
681047b4 DV |
828 | * |
829 | * Note that the core will take care of unlinking and disarming events when the | |
830 | * corresponding DRM file is closed. Drivers need not worry about whether the | |
831 | * DRM file for this event still exists and can call this function upon | |
832 | * completion of the asynchronous work unconditionally. | |
fb740cf2 DV |
833 | */ |
834 | void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) | |
835 | { | |
836 | unsigned long irqflags; | |
837 | ||
838 | spin_lock_irqsave(&dev->event_lock, irqflags); | |
839 | drm_send_event_locked(dev, e); | |
840 | spin_unlock_irqrestore(&dev->event_lock, irqflags); | |
841 | } | |
842 | EXPORT_SYMBOL(drm_send_event); | |
4748aa16 CW |
843 | |
844 | /** | |
845 | * mock_drm_getfile - Create a new struct file for the drm device | |
846 | * @minor: drm minor to wrap (e.g. #drm_device.primary) | |
847 | * @flags: file creation mode (O_RDWR etc) | |
848 | * | |
849 | * This create a new struct file that wraps a DRM file context around a | |
850 | * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without | |
851 | * invoking userspace. The struct file may be operated on using its f_op | |
852 | * (the drm_device.driver.fops) to mimick userspace operations, or be supplied | |
853 | * to userspace facing functions as an internal/anonymous client. | |
854 | * | |
855 | * RETURNS: | |
856 | * Pointer to newly created struct file, ERR_PTR on failure. | |
857 | */ | |
858 | struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags) | |
859 | { | |
860 | struct drm_device *dev = minor->dev; | |
861 | struct drm_file *priv; | |
862 | struct file *file; | |
863 | ||
864 | priv = drm_file_alloc(minor); | |
865 | if (IS_ERR(priv)) | |
866 | return ERR_CAST(priv); | |
867 | ||
868 | file = anon_inode_getfile("drm", dev->driver->fops, priv, flags); | |
869 | if (IS_ERR(file)) { | |
870 | drm_file_free(priv); | |
871 | return file; | |
872 | } | |
873 | ||
874 | /* Everyone shares a single global address space */ | |
875 | file->f_mapping = dev->anon_inode->i_mapping; | |
876 | ||
877 | drm_dev_get(dev); | |
878 | priv->filp = file; | |
879 | ||
880 | return file; | |
881 | } | |
882 | EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile); | |
b1823416 THV |
883 | |
884 | #ifdef CONFIG_MMU | |
885 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
886 | /* | |
887 | * drm_addr_inflate() attempts to construct an aligned area by inflating | |
888 | * the area size and skipping the unaligned start of the area. | |
889 | * adapted from shmem_get_unmapped_area() | |
890 | */ | |
891 | static unsigned long drm_addr_inflate(unsigned long addr, | |
892 | unsigned long len, | |
893 | unsigned long pgoff, | |
894 | unsigned long flags, | |
895 | unsigned long huge_size) | |
896 | { | |
897 | unsigned long offset, inflated_len; | |
898 | unsigned long inflated_addr; | |
899 | unsigned long inflated_offset; | |
900 | ||
901 | offset = (pgoff << PAGE_SHIFT) & (huge_size - 1); | |
902 | if (offset && offset + len < 2 * huge_size) | |
903 | return addr; | |
904 | if ((addr & (huge_size - 1)) == offset) | |
905 | return addr; | |
906 | ||
907 | inflated_len = len + huge_size - PAGE_SIZE; | |
908 | if (inflated_len > TASK_SIZE) | |
909 | return addr; | |
910 | if (inflated_len < len) | |
911 | return addr; | |
912 | ||
913 | inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len, | |
914 | 0, flags); | |
915 | if (IS_ERR_VALUE(inflated_addr)) | |
916 | return addr; | |
917 | if (inflated_addr & ~PAGE_MASK) | |
918 | return addr; | |
919 | ||
920 | inflated_offset = inflated_addr & (huge_size - 1); | |
921 | inflated_addr += offset - inflated_offset; | |
922 | if (inflated_offset > offset) | |
923 | inflated_addr += huge_size; | |
924 | ||
925 | if (inflated_addr > TASK_SIZE - len) | |
926 | return addr; | |
927 | ||
928 | return inflated_addr; | |
929 | } | |
930 | ||
931 | /** | |
932 | * drm_get_unmapped_area() - Get an unused user-space virtual memory area | |
933 | * suitable for huge page table entries. | |
934 | * @file: The struct file representing the address space being mmap()'d. | |
935 | * @uaddr: Start address suggested by user-space. | |
936 | * @len: Length of the area. | |
937 | * @pgoff: The page offset into the address space. | |
938 | * @flags: mmap flags | |
939 | * @mgr: The address space manager used by the drm driver. This argument can | |
940 | * probably be removed at some point when all drivers use the same | |
941 | * address space manager. | |
942 | * | |
943 | * This function attempts to find an unused user-space virtual memory area | |
944 | * that can accommodate the size we want to map, and that is properly | |
945 | * aligned to facilitate huge page table entries matching actual | |
946 | * huge pages or huge page aligned memory in buffer objects. Buffer objects | |
947 | * are assumed to start at huge page boundary pfns (io memory) or be | |
948 | * populated by huge pages aligned to the start of the buffer object | |
949 | * (system- or coherent memory). Adapted from shmem_get_unmapped_area. | |
950 | * | |
951 | * Return: aligned user-space address. | |
952 | */ | |
953 | unsigned long drm_get_unmapped_area(struct file *file, | |
954 | unsigned long uaddr, unsigned long len, | |
955 | unsigned long pgoff, unsigned long flags, | |
956 | struct drm_vma_offset_manager *mgr) | |
957 | { | |
958 | unsigned long addr; | |
959 | unsigned long inflated_addr; | |
960 | struct drm_vma_offset_node *node; | |
961 | ||
962 | if (len > TASK_SIZE) | |
963 | return -ENOMEM; | |
964 | ||
965 | /* | |
966 | * @pgoff is the file page-offset the huge page boundaries of | |
967 | * which typically aligns to physical address huge page boundaries. | |
968 | * That's not true for DRM, however, where physical address huge | |
969 | * page boundaries instead are aligned with the offset from | |
970 | * buffer object start. So adjust @pgoff to be the offset from | |
971 | * buffer object start. | |
972 | */ | |
973 | drm_vma_offset_lock_lookup(mgr); | |
974 | node = drm_vma_offset_lookup_locked(mgr, pgoff, 1); | |
975 | if (node) | |
976 | pgoff -= node->vm_node.start; | |
977 | drm_vma_offset_unlock_lookup(mgr); | |
978 | ||
979 | addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); | |
980 | if (IS_ERR_VALUE(addr)) | |
981 | return addr; | |
982 | if (addr & ~PAGE_MASK) | |
983 | return addr; | |
984 | if (addr > TASK_SIZE - len) | |
985 | return addr; | |
986 | ||
987 | if (len < HPAGE_PMD_SIZE) | |
988 | return addr; | |
989 | if (flags & MAP_FIXED) | |
990 | return addr; | |
991 | /* | |
992 | * Our priority is to support MAP_SHARED mapped hugely; | |
993 | * and support MAP_PRIVATE mapped hugely too, until it is COWed. | |
994 | * But if caller specified an address hint, respect that as before. | |
995 | */ | |
996 | if (uaddr) | |
997 | return addr; | |
998 | ||
999 | inflated_addr = drm_addr_inflate(addr, len, pgoff, flags, | |
1000 | HPAGE_PMD_SIZE); | |
1001 | ||
1002 | if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && | |
1003 | len >= HPAGE_PUD_SIZE) | |
1004 | inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff, | |
1005 | flags, HPAGE_PUD_SIZE); | |
1006 | return inflated_addr; | |
1007 | } | |
1008 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
1009 | unsigned long drm_get_unmapped_area(struct file *file, | |
1010 | unsigned long uaddr, unsigned long len, | |
1011 | unsigned long pgoff, unsigned long flags, | |
1012 | struct drm_vma_offset_manager *mgr) | |
1013 | { | |
1014 | return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); | |
1015 | } | |
1016 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
9431042d | 1017 | EXPORT_SYMBOL_GPL(drm_get_unmapped_area); |
b1823416 | 1018 | #endif /* CONFIG_MMU */ |