drm: panel-orientation-quirks: Add quirk for GPD Win3
[linux-2.6-block.git] / drivers / dma-buf / dma-buf.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
d15bd7ee
SS
2/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
d15bd7ee
SS
12 */
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
f54d1867 17#include <linux/dma-fence.h>
d15bd7ee
SS
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
b89e3563 20#include <linux/debugfs.h>
9abdffe2 21#include <linux/module.h>
b89e3563 22#include <linux/seq_file.h>
9b495a58 23#include <linux/poll.h>
52791eee 24#include <linux/dma-resv.h>
b02da6f8 25#include <linux/mm.h>
ed63bb1d 26#include <linux/mount.h>
933a90bf 27#include <linux/pseudo_fs.h>
d15bd7ee 28
c11e391d 29#include <uapi/linux/dma-buf.h>
ed63bb1d 30#include <uapi/linux/magic.h>
c11e391d 31
bdb8d06d
HV
32#include "dma-buf-sysfs-stats.h"
33
d15bd7ee
SS
34static inline int is_dma_buf_file(struct file *);
35
b89e3563
SS
36struct dma_buf_list {
37 struct list_head head;
38 struct mutex lock;
39};
40
41static struct dma_buf_list db_list;
42
bb2bb903
GH
43static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
44{
45 struct dma_buf *dmabuf;
46 char name[DMA_BUF_NAME_LEN];
47 size_t ret = 0;
48
49 dmabuf = dentry->d_fsdata;
6348dd29 50 spin_lock(&dmabuf->name_lock);
bb2bb903
GH
51 if (dmabuf->name)
52 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
6348dd29 53 spin_unlock(&dmabuf->name_lock);
bb2bb903
GH
54
55 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
56 dentry->d_name.name, ret > 0 ? name : "");
57}
58
4ab59c3c 59static void dma_buf_release(struct dentry *dentry)
d15bd7ee
SS
60{
61 struct dma_buf *dmabuf;
62
4ab59c3c 63 dmabuf = dentry->d_fsdata;
19a508bd
CTR
64 if (unlikely(!dmabuf))
65 return;
d15bd7ee 66
f00b4dad
DV
67 BUG_ON(dmabuf->vmapping_counter);
68
9b495a58
ML
69 /*
70 * Any fences that a dma-buf poll can wait on should be signaled
71 * before releasing dma-buf. This is the responsibility of each
72 * driver that uses the reservation objects.
73 *
74 * If you hit this BUG() it means someone dropped their ref to the
75 * dma-buf while still having pending operation to the buffer.
76 */
77 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
78
63c57e8d 79 dma_buf_stats_teardown(dmabuf);
d15bd7ee 80 dmabuf->ops->release(dmabuf);
b89e3563 81
52791eee
CK
82 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
83 dma_resv_fini(dmabuf->resv);
3aac4502 84
9abdffe2 85 module_put(dmabuf->owner);
d1f37226 86 kfree(dmabuf->name);
d15bd7ee 87 kfree(dmabuf);
4ab59c3c
SS
88}
89
05cd8469
CTR
90static int dma_buf_file_release(struct inode *inode, struct file *file)
91{
92 struct dma_buf *dmabuf;
93
94 if (!is_dma_buf_file(file))
95 return -EINVAL;
96
97 dmabuf = file->private_data;
98
99 mutex_lock(&db_list.lock);
100 list_del(&dmabuf->list_node);
101 mutex_unlock(&db_list.lock);
102
103 return 0;
104}
105
4ab59c3c
SS
106static const struct dentry_operations dma_buf_dentry_ops = {
107 .d_dname = dmabuffs_dname,
108 .d_release = dma_buf_release,
109};
110
111static struct vfsmount *dma_buf_mnt;
112
113static int dma_buf_fs_init_context(struct fs_context *fc)
114{
115 struct pseudo_fs_context *ctx;
116
117 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
118 if (!ctx)
119 return -ENOMEM;
120 ctx->dops = &dma_buf_dentry_ops;
d15bd7ee
SS
121 return 0;
122}
123
4ab59c3c
SS
124static struct file_system_type dma_buf_fs_type = {
125 .name = "dmabuf",
126 .init_fs_context = dma_buf_fs_init_context,
127 .kill_sb = kill_anon_super,
128};
129
4c78513e
DV
130static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
131{
132 struct dma_buf *dmabuf;
133
134 if (!is_dma_buf_file(file))
135 return -EINVAL;
136
137 dmabuf = file->private_data;
138
e3a9d6c5
AD
139 /* check if buffer supports mmap */
140 if (!dmabuf->ops->mmap)
141 return -EINVAL;
142
4c78513e 143 /* check for overflowing the buffer's size */
b02da6f8 144 if (vma->vm_pgoff + vma_pages(vma) >
4c78513e
DV
145 dmabuf->size >> PAGE_SHIFT)
146 return -EINVAL;
147
148 return dmabuf->ops->mmap(dmabuf, vma);
149}
150
19e8697b
CJHR
151static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
152{
153 struct dma_buf *dmabuf;
154 loff_t base;
155
156 if (!is_dma_buf_file(file))
157 return -EBADF;
158
159 dmabuf = file->private_data;
160
161 /* only support discovering the end of the buffer,
162 but also allow SEEK_SET to maintain the idiomatic
163 SEEK_END(0), SEEK_CUR(0) pattern */
164 if (whence == SEEK_END)
165 base = dmabuf->size;
166 else if (whence == SEEK_SET)
167 base = 0;
168 else
169 return -EINVAL;
170
171 if (offset != 0)
172 return -EINVAL;
173
174 return base + offset;
175}
176
e7e21c72 177/**
102514ec 178 * DOC: implicit fence polling
e7e21c72
DV
179 *
180 * To support cross-device and cross-driver synchronization of buffer access
102514ec
DV
181 * implicit fences (represented internally in the kernel with &struct dma_fence)
182 * can be attached to a &dma_buf. The glue for that and a few related things are
52791eee 183 * provided in the &dma_resv structure.
e7e21c72
DV
184 *
185 * Userspace can query the state of these implicitly tracked fences using poll()
186 * and related system calls:
187 *
a9a08845 188 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
e7e21c72
DV
189 * most recent write or exclusive fence.
190 *
a9a08845 191 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
e7e21c72
DV
192 * all attached fences, shared and exclusive ones.
193 *
194 * Note that this only signals the completion of the respective fences, i.e. the
195 * DMA transfers are complete. Cache flushing and any other necessary
196 * preparations before CPU access can begin still need to happen.
197 */
198
f54d1867 199static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
9b495a58
ML
200{
201 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
202 unsigned long flags;
203
204 spin_lock_irqsave(&dcb->poll->lock, flags);
205 wake_up_locked_poll(dcb->poll, dcb->active);
206 dcb->active = 0;
207 spin_unlock_irqrestore(&dcb->poll->lock, flags);
208}
209
afc9a42b 210static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
9b495a58
ML
211{
212 struct dma_buf *dmabuf;
52791eee
CK
213 struct dma_resv *resv;
214 struct dma_resv_list *fobj;
f54d1867 215 struct dma_fence *fence_excl;
01699437 216 __poll_t events;
b016cd6e 217 unsigned shared_count, seq;
9b495a58
ML
218
219 dmabuf = file->private_data;
220 if (!dmabuf || !dmabuf->resv)
a9a08845 221 return EPOLLERR;
9b495a58
ML
222
223 resv = dmabuf->resv;
224
225 poll_wait(file, &dmabuf->poll, poll);
226
a9a08845 227 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
9b495a58
ML
228 if (!events)
229 return 0;
230
b016cd6e
CW
231retry:
232 seq = read_seqcount_begin(&resv->seq);
3c3b177a 233 rcu_read_lock();
b016cd6e
CW
234
235 fobj = rcu_dereference(resv->fence);
236 if (fobj)
237 shared_count = fobj->shared_count;
238 else
239 shared_count = 0;
6edbd6ab 240 fence_excl = dma_resv_excl_fence(resv);
b016cd6e
CW
241 if (read_seqcount_retry(&resv->seq, seq)) {
242 rcu_read_unlock();
243 goto retry;
244 }
245
a9a08845 246 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
9b495a58 247 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
a9a08845 248 __poll_t pevents = EPOLLIN;
9b495a58 249
04a5faa8 250 if (shared_count == 0)
a9a08845 251 pevents |= EPOLLOUT;
9b495a58
ML
252
253 spin_lock_irq(&dmabuf->poll.lock);
254 if (dcb->active) {
255 dcb->active |= pevents;
256 events &= ~pevents;
257 } else
258 dcb->active = pevents;
259 spin_unlock_irq(&dmabuf->poll.lock);
260
261 if (events & pevents) {
f54d1867 262 if (!dma_fence_get_rcu(fence_excl)) {
3c3b177a
ML
263 /* force a recheck */
264 events &= ~pevents;
265 dma_buf_poll_cb(NULL, &dcb->cb);
f54d1867
CW
266 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
267 dma_buf_poll_cb)) {
9b495a58 268 events &= ~pevents;
f54d1867 269 dma_fence_put(fence_excl);
04a5faa8 270 } else {
9b495a58
ML
271 /*
272 * No callback queued, wake up any additional
273 * waiters.
274 */
f54d1867 275 dma_fence_put(fence_excl);
9b495a58 276 dma_buf_poll_cb(NULL, &dcb->cb);
04a5faa8 277 }
9b495a58
ML
278 }
279 }
280
a9a08845 281 if ((events & EPOLLOUT) && shared_count > 0) {
9b495a58
ML
282 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
283 int i;
284
285 /* Only queue a new callback if no event has fired yet */
286 spin_lock_irq(&dmabuf->poll.lock);
287 if (dcb->active)
a9a08845 288 events &= ~EPOLLOUT;
9b495a58 289 else
a9a08845 290 dcb->active = EPOLLOUT;
9b495a58
ML
291 spin_unlock_irq(&dmabuf->poll.lock);
292
a9a08845 293 if (!(events & EPOLLOUT))
9b495a58
ML
294 goto out;
295
04a5faa8 296 for (i = 0; i < shared_count; ++i) {
f54d1867 297 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
04a5faa8 298
f54d1867 299 if (!dma_fence_get_rcu(fence)) {
3c3b177a
ML
300 /*
301 * fence refcount dropped to zero, this means
302 * that fobj has been freed
303 *
304 * call dma_buf_poll_cb and force a recheck!
305 */
a9a08845 306 events &= ~EPOLLOUT;
3c3b177a
ML
307 dma_buf_poll_cb(NULL, &dcb->cb);
308 break;
309 }
f54d1867
CW
310 if (!dma_fence_add_callback(fence, &dcb->cb,
311 dma_buf_poll_cb)) {
312 dma_fence_put(fence);
a9a08845 313 events &= ~EPOLLOUT;
9b495a58
ML
314 break;
315 }
f54d1867 316 dma_fence_put(fence);
04a5faa8 317 }
9b495a58
ML
318
319 /* No callback queued, wake up any additional waiters. */
04a5faa8 320 if (i == shared_count)
9b495a58
ML
321 dma_buf_poll_cb(NULL, &dcb->cb);
322 }
323
324out:
3c3b177a 325 rcu_read_unlock();
9b495a58
ML
326 return events;
327}
328
bb2bb903
GH
329/**
330 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
331 * The name of the dma-buf buffer can only be set when the dma-buf is not
332 * attached to any devices. It could theoritically support changing the
333 * name of the dma-buf if the same piece of memory is used for multiple
334 * purpose between different devices.
335 *
6d3ba803
KK
336 * @dmabuf: [in] dmabuf buffer that will be renamed.
337 * @buf: [in] A piece of userspace memory that contains the name of
338 * the dma-buf.
bb2bb903
GH
339 *
340 * Returns 0 on success. If the dma-buf buffer is already attached to
341 * devices, return -EBUSY.
342 *
343 */
344static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
345{
346 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
347 long ret = 0;
348
349 if (IS_ERR(name))
350 return PTR_ERR(name);
351
15fd552d 352 dma_resv_lock(dmabuf->resv, NULL);
bb2bb903
GH
353 if (!list_empty(&dmabuf->attachments)) {
354 ret = -EBUSY;
355 kfree(name);
356 goto out_unlock;
357 }
6348dd29 358 spin_lock(&dmabuf->name_lock);
bb2bb903
GH
359 kfree(dmabuf->name);
360 dmabuf->name = name;
6348dd29 361 spin_unlock(&dmabuf->name_lock);
bb2bb903
GH
362
363out_unlock:
15fd552d 364 dma_resv_unlock(dmabuf->resv);
bb2bb903
GH
365 return ret;
366}
367
c11e391d
DV
368static long dma_buf_ioctl(struct file *file,
369 unsigned int cmd, unsigned long arg)
370{
371 struct dma_buf *dmabuf;
372 struct dma_buf_sync sync;
373 enum dma_data_direction direction;
18b862dc 374 int ret;
c11e391d
DV
375
376 dmabuf = file->private_data;
377
378 switch (cmd) {
379 case DMA_BUF_IOCTL_SYNC:
380 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
381 return -EFAULT;
382
383 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
384 return -EINVAL;
385
386 switch (sync.flags & DMA_BUF_SYNC_RW) {
387 case DMA_BUF_SYNC_READ:
388 direction = DMA_FROM_DEVICE;
389 break;
390 case DMA_BUF_SYNC_WRITE:
391 direction = DMA_TO_DEVICE;
392 break;
393 case DMA_BUF_SYNC_RW:
394 direction = DMA_BIDIRECTIONAL;
395 break;
396 default:
397 return -EINVAL;
398 }
399
400 if (sync.flags & DMA_BUF_SYNC_END)
18b862dc 401 ret = dma_buf_end_cpu_access(dmabuf, direction);
c11e391d 402 else
18b862dc 403 ret = dma_buf_begin_cpu_access(dmabuf, direction);
c11e391d 404
18b862dc 405 return ret;
bb2bb903 406
a5bff92e
DV
407 case DMA_BUF_SET_NAME_A:
408 case DMA_BUF_SET_NAME_B:
bb2bb903
GH
409 return dma_buf_set_name(dmabuf, (const char __user *)arg);
410
c11e391d
DV
411 default:
412 return -ENOTTY;
413 }
414}
415
bcc07111
GH
416static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
417{
418 struct dma_buf *dmabuf = file->private_data;
419
420 seq_printf(m, "size:\t%zu\n", dmabuf->size);
421 /* Don't count the temporary reference taken inside procfs seq_show */
422 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
423 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
6348dd29 424 spin_lock(&dmabuf->name_lock);
bcc07111
GH
425 if (dmabuf->name)
426 seq_printf(m, "name:\t%s\n", dmabuf->name);
6348dd29 427 spin_unlock(&dmabuf->name_lock);
bcc07111
GH
428}
429
d15bd7ee 430static const struct file_operations dma_buf_fops = {
05cd8469 431 .release = dma_buf_file_release,
4c78513e 432 .mmap = dma_buf_mmap_internal,
19e8697b 433 .llseek = dma_buf_llseek,
9b495a58 434 .poll = dma_buf_poll,
c11e391d 435 .unlocked_ioctl = dma_buf_ioctl,
1832f2d8 436 .compat_ioctl = compat_ptr_ioctl,
bcc07111 437 .show_fdinfo = dma_buf_show_fdinfo,
d15bd7ee
SS
438};
439
440/*
441 * is_dma_buf_file - Check if struct file* is associated with dma_buf
442 */
443static inline int is_dma_buf_file(struct file *file)
444{
445 return file->f_op == &dma_buf_fops;
446}
447
ed63bb1d
GH
448static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
449{
450 struct file *file;
451 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
452
453 if (IS_ERR(inode))
454 return ERR_CAST(inode);
455
456 inode->i_size = dmabuf->size;
457 inode_set_bytes(inode, dmabuf->size);
458
459 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
460 flags, &dma_buf_fops);
461 if (IS_ERR(file))
462 goto err_alloc_file;
463 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
464 file->private_data = dmabuf;
bb2bb903 465 file->f_path.dentry->d_fsdata = dmabuf;
ed63bb1d
GH
466
467 return file;
468
469err_alloc_file:
470 iput(inode);
471 return file;
472}
473
2904a8c1
DV
474/**
475 * DOC: dma buf device access
476 *
477 * For device DMA access to a shared DMA buffer the usual sequence of operations
478 * is fairly simple:
479 *
480 * 1. The exporter defines his exporter instance using
481 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
482 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
483 * as a file descriptor by calling dma_buf_fd().
484 *
485 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
486 * to share with: First the filedescriptor is converted to a &dma_buf using
c138782d 487 * dma_buf_get(). Then the buffer is attached to the device using
2904a8c1
DV
488 * dma_buf_attach().
489 *
490 * Up to this stage the exporter is still free to migrate or reallocate the
491 * backing storage.
492 *
c138782d 493 * 3. Once the buffer is attached to all devices userspace can initiate DMA
2904a8c1
DV
494 * access to the shared buffer. In the kernel this is done by calling
495 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
496 *
497 * 4. Once a driver is done with a shared buffer it needs to call
498 * dma_buf_detach() (after cleaning up any mappings) and then release the
85804b70 499 * reference acquired with dma_buf_get() by calling dma_buf_put().
2904a8c1
DV
500 *
501 * For the detailed semantics exporters are expected to implement see
502 * &dma_buf_ops.
503 */
504
d15bd7ee 505/**
d8fbe341 506 * dma_buf_export - Creates a new dma_buf, and associates an anon file
d15bd7ee
SS
507 * with this buffer, so it can be exported.
508 * Also connect the allocator specific data and ops to the buffer.
78df9695 509 * Additionally, provide a name string for exporter; useful in debugging.
d15bd7ee 510 *
d8fbe341 511 * @exp_info: [in] holds all the export related information provided
f641d3b5 512 * by the exporter. see &struct dma_buf_export_info
d8fbe341 513 * for further details.
d15bd7ee 514 *
85804b70
DV
515 * Returns, on success, a newly created struct dma_buf object, which wraps the
516 * supplied private data and operations for struct dma_buf_ops. On either
517 * missing ops, or error in allocating struct dma_buf, will return negative
518 * error.
d15bd7ee 519 *
2904a8c1
DV
520 * For most cases the easiest way to create @exp_info is through the
521 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
d15bd7ee 522 */
d8fbe341 523struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
d15bd7ee
SS
524{
525 struct dma_buf *dmabuf;
52791eee 526 struct dma_resv *resv = exp_info->resv;
d15bd7ee 527 struct file *file;
3aac4502 528 size_t alloc_size = sizeof(struct dma_buf);
a026df4c 529 int ret;
5136629d 530
d8fbe341 531 if (!exp_info->resv)
52791eee 532 alloc_size += sizeof(struct dma_resv);
3aac4502
ML
533 else
534 /* prevent &dma_buf[1] == dma_buf->resv */
535 alloc_size += 1;
d15bd7ee 536
d8fbe341
SS
537 if (WARN_ON(!exp_info->priv
538 || !exp_info->ops
539 || !exp_info->ops->map_dma_buf
540 || !exp_info->ops->unmap_dma_buf
e3a9d6c5 541 || !exp_info->ops->release)) {
d15bd7ee
SS
542 return ERR_PTR(-EINVAL);
543 }
544
15fd552d 545 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
bd2275ee 546 (exp_info->ops->pin || exp_info->ops->unpin)))
15fd552d
CK
547 return ERR_PTR(-EINVAL);
548
bd2275ee 549 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
15fd552d
CK
550 return ERR_PTR(-EINVAL);
551
9abdffe2
SS
552 if (!try_module_get(exp_info->owner))
553 return ERR_PTR(-ENOENT);
554
3aac4502 555 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
9abdffe2 556 if (!dmabuf) {
a026df4c
CW
557 ret = -ENOMEM;
558 goto err_module;
9abdffe2 559 }
d15bd7ee 560
d8fbe341
SS
561 dmabuf->priv = exp_info->priv;
562 dmabuf->ops = exp_info->ops;
563 dmabuf->size = exp_info->size;
564 dmabuf->exp_name = exp_info->exp_name;
9abdffe2 565 dmabuf->owner = exp_info->owner;
6348dd29 566 spin_lock_init(&dmabuf->name_lock);
9b495a58
ML
567 init_waitqueue_head(&dmabuf->poll);
568 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
569 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
570
3aac4502 571 if (!resv) {
52791eee
CK
572 resv = (struct dma_resv *)&dmabuf[1];
573 dma_resv_init(resv);
3aac4502
ML
574 }
575 dmabuf->resv = resv;
d15bd7ee 576
ed63bb1d 577 file = dma_buf_getfile(dmabuf, exp_info->flags);
9022e24e 578 if (IS_ERR(file)) {
a026df4c
CW
579 ret = PTR_ERR(file);
580 goto err_dmabuf;
9022e24e 581 }
19e8697b
CJHR
582
583 file->f_mode |= FMODE_LSEEK;
d15bd7ee
SS
584 dmabuf->file = file;
585
bdb8d06d
HV
586 ret = dma_buf_stats_setup(dmabuf);
587 if (ret)
588 goto err_sysfs;
589
d15bd7ee
SS
590 mutex_init(&dmabuf->lock);
591 INIT_LIST_HEAD(&dmabuf->attachments);
592
b89e3563
SS
593 mutex_lock(&db_list.lock);
594 list_add(&dmabuf->list_node, &db_list.head);
595 mutex_unlock(&db_list.lock);
596
d15bd7ee 597 return dmabuf;
a026df4c 598
bdb8d06d
HV
599err_sysfs:
600 /*
601 * Set file->f_path.dentry->d_fsdata to NULL so that when
602 * dma_buf_release() gets invoked by dentry_ops, it exits
603 * early before calling the release() dma_buf op.
604 */
605 file->f_path.dentry->d_fsdata = NULL;
606 fput(file);
a026df4c
CW
607err_dmabuf:
608 kfree(dmabuf);
609err_module:
610 module_put(exp_info->owner);
611 return ERR_PTR(ret);
d15bd7ee 612}
d8fbe341 613EXPORT_SYMBOL_GPL(dma_buf_export);
d15bd7ee
SS
614
615/**
85804b70 616 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
d15bd7ee 617 * @dmabuf: [in] pointer to dma_buf for which fd is required.
55c1c4ca 618 * @flags: [in] flags to give to fd
d15bd7ee
SS
619 *
620 * On success, returns an associated 'fd'. Else, returns error.
621 */
55c1c4ca 622int dma_buf_fd(struct dma_buf *dmabuf, int flags)
d15bd7ee 623{
f5e097f0 624 int fd;
d15bd7ee
SS
625
626 if (!dmabuf || !dmabuf->file)
627 return -EINVAL;
628
f5e097f0
BP
629 fd = get_unused_fd_flags(flags);
630 if (fd < 0)
631 return fd;
d15bd7ee
SS
632
633 fd_install(fd, dmabuf->file);
634
635 return fd;
636}
637EXPORT_SYMBOL_GPL(dma_buf_fd);
638
639/**
85804b70
DV
640 * dma_buf_get - returns the struct dma_buf related to an fd
641 * @fd: [in] fd associated with the struct dma_buf to be returned
d15bd7ee 642 *
85804b70 643 * On success, returns the struct dma_buf associated with an fd; uses
d15bd7ee
SS
644 * file's refcounting done by fget to increase refcount. returns ERR_PTR
645 * otherwise.
646 */
647struct dma_buf *dma_buf_get(int fd)
648{
649 struct file *file;
650
651 file = fget(fd);
652
653 if (!file)
654 return ERR_PTR(-EBADF);
655
656 if (!is_dma_buf_file(file)) {
657 fput(file);
658 return ERR_PTR(-EINVAL);
659 }
660
661 return file->private_data;
662}
663EXPORT_SYMBOL_GPL(dma_buf_get);
664
665/**
666 * dma_buf_put - decreases refcount of the buffer
667 * @dmabuf: [in] buffer to reduce refcount of
668 *
2904a8c1
DV
669 * Uses file's refcounting done implicitly by fput().
670 *
671 * If, as a result of this call, the refcount becomes 0, the 'release' file
e9b4d7b5
DV
672 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
673 * in turn, and frees the memory allocated for dmabuf when exported.
d15bd7ee
SS
674 */
675void dma_buf_put(struct dma_buf *dmabuf)
676{
677 if (WARN_ON(!dmabuf || !dmabuf->file))
678 return;
679
680 fput(dmabuf->file);
681}
682EXPORT_SYMBOL_GPL(dma_buf_put);
683
84335675
DV
684static void mangle_sg_table(struct sg_table *sg_table)
685{
686#ifdef CONFIG_DMABUF_DEBUG
687 int i;
688 struct scatterlist *sg;
689
690 /* To catch abuse of the underlying struct page by importers mix
691 * up the bits, but take care to preserve the low SG_ bits to
692 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
693 * before passing the sgt back to the exporter. */
694 for_each_sgtable_sg(sg_table, sg, i)
695 sg->page_link ^= ~0xffUL;
696#endif
697
698}
699static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
700 enum dma_data_direction direction)
701{
702 struct sg_table *sg_table;
703
704 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
705
706 if (!IS_ERR_OR_NULL(sg_table))
707 mangle_sg_table(sg_table);
708
709 return sg_table;
710}
711
d15bd7ee 712/**
85804b70 713 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
15fd552d
CK
714 * @dmabuf: [in] buffer to attach device to.
715 * @dev: [in] device to be attached.
6f49c251
RD
716 * @importer_ops: [in] importer operations for the attachment
717 * @importer_priv: [in] importer private pointer for the attachment
d15bd7ee 718 *
2904a8c1
DV
719 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
720 * must be cleaned up by calling dma_buf_detach().
721 *
85804b70
DV
722 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
723 * functionality.
724 *
2904a8c1
DV
725 * Returns:
726 *
727 * A pointer to newly created &dma_buf_attachment on success, or a negative
728 * error code wrapped into a pointer on failure.
729 *
730 * Note that this can fail if the backing storage of @dmabuf is in a place not
731 * accessible to @dev, and cannot be moved to a more suitable place. This is
732 * indicated with the error code -EBUSY.
d15bd7ee 733 */
15fd552d
CK
734struct dma_buf_attachment *
735dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
bb42df46
CK
736 const struct dma_buf_attach_ops *importer_ops,
737 void *importer_priv)
d15bd7ee
SS
738{
739 struct dma_buf_attachment *attach;
740 int ret;
741
d1aa06a1 742 if (WARN_ON(!dmabuf || !dev))
d15bd7ee
SS
743 return ERR_PTR(-EINVAL);
744
4981cdb0
CK
745 if (WARN_ON(importer_ops && !importer_ops->move_notify))
746 return ERR_PTR(-EINVAL);
747
db7942b6 748 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
34d84ec4 749 if (!attach)
a9fbc3b7 750 return ERR_PTR(-ENOMEM);
d15bd7ee 751
d15bd7ee
SS
752 attach->dev = dev;
753 attach->dmabuf = dmabuf;
09606b54
CK
754 if (importer_ops)
755 attach->peer2peer = importer_ops->allow_peer2peer;
bb42df46
CK
756 attach->importer_ops = importer_ops;
757 attach->importer_priv = importer_priv;
2ed9201b 758
d15bd7ee 759 if (dmabuf->ops->attach) {
a19741e5 760 ret = dmabuf->ops->attach(dmabuf, attach);
d15bd7ee
SS
761 if (ret)
762 goto err_attach;
763 }
15fd552d 764 dma_resv_lock(dmabuf->resv, NULL);
d15bd7ee 765 list_add(&attach->node, &dmabuf->attachments);
15fd552d 766 dma_resv_unlock(dmabuf->resv);
d15bd7ee 767
15fd552d
CK
768 /* When either the importer or the exporter can't handle dynamic
769 * mappings we cache the mapping here to avoid issues with the
770 * reservation object lock.
771 */
772 if (dma_buf_attachment_is_dynamic(attach) !=
773 dma_buf_is_dynamic(dmabuf)) {
774 struct sg_table *sgt;
775
bb42df46 776 if (dma_buf_is_dynamic(attach->dmabuf)) {
15fd552d 777 dma_resv_lock(attach->dmabuf->resv, NULL);
7e008b02 778 ret = dmabuf->ops->pin(attach);
bb42df46
CK
779 if (ret)
780 goto err_unlock;
781 }
15fd552d 782
84335675 783 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
15fd552d
CK
784 if (!sgt)
785 sgt = ERR_PTR(-ENOMEM);
786 if (IS_ERR(sgt)) {
787 ret = PTR_ERR(sgt);
bb42df46 788 goto err_unpin;
15fd552d
CK
789 }
790 if (dma_buf_is_dynamic(attach->dmabuf))
791 dma_resv_unlock(attach->dmabuf->resv);
792 attach->sgt = sgt;
793 attach->dir = DMA_BIDIRECTIONAL;
794 }
795
d15bd7ee
SS
796 return attach;
797
d15bd7ee
SS
798err_attach:
799 kfree(attach);
d15bd7ee 800 return ERR_PTR(ret);
15fd552d 801
bb42df46
CK
802err_unpin:
803 if (dma_buf_is_dynamic(attach->dmabuf))
7e008b02 804 dmabuf->ops->unpin(attach);
bb42df46 805
15fd552d
CK
806err_unlock:
807 if (dma_buf_is_dynamic(attach->dmabuf))
808 dma_resv_unlock(attach->dmabuf->resv);
809
810 dma_buf_detach(dmabuf, attach);
811 return ERR_PTR(ret);
812}
813EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
814
815/**
816 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
817 * @dmabuf: [in] buffer to attach device to.
818 * @dev: [in] device to be attached.
819 *
820 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
821 * mapping.
822 */
823struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
824 struct device *dev)
825{
bb42df46 826 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
d15bd7ee
SS
827}
828EXPORT_SYMBOL_GPL(dma_buf_attach);
829
84335675
DV
830static void __unmap_dma_buf(struct dma_buf_attachment *attach,
831 struct sg_table *sg_table,
832 enum dma_data_direction direction)
833{
834 /* uses XOR, hence this unmangles */
835 mangle_sg_table(sg_table);
836
837 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
838}
839
d15bd7ee 840/**
85804b70 841 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
d15bd7ee
SS
842 * @dmabuf: [in] buffer to detach from.
843 * @attach: [in] attachment to be detached; is free'd after this call.
844 *
2904a8c1 845 * Clean up a device attachment obtained by calling dma_buf_attach().
85804b70
DV
846 *
847 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
d15bd7ee
SS
848 */
849void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
850{
d1aa06a1 851 if (WARN_ON(!dmabuf || !attach))
d15bd7ee
SS
852 return;
853
15fd552d
CK
854 if (attach->sgt) {
855 if (dma_buf_is_dynamic(attach->dmabuf))
856 dma_resv_lock(attach->dmabuf->resv, NULL);
857
84335675 858 __unmap_dma_buf(attach, attach->sgt, attach->dir);
f13e143e 859
bb42df46 860 if (dma_buf_is_dynamic(attach->dmabuf)) {
7e008b02 861 dmabuf->ops->unpin(attach);
15fd552d 862 dma_resv_unlock(attach->dmabuf->resv);
bb42df46 863 }
15fd552d
CK
864 }
865
15fd552d 866 dma_resv_lock(dmabuf->resv, NULL);
d15bd7ee 867 list_del(&attach->node);
15fd552d 868 dma_resv_unlock(dmabuf->resv);
d15bd7ee
SS
869 if (dmabuf->ops->detach)
870 dmabuf->ops->detach(dmabuf, attach);
871
d15bd7ee
SS
872 kfree(attach);
873}
874EXPORT_SYMBOL_GPL(dma_buf_detach);
875
bb42df46
CK
876/**
877 * dma_buf_pin - Lock down the DMA-buf
bb42df46
CK
878 * @attach: [in] attachment which should be pinned
879 *
c545781e
DV
880 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
881 * call this, and only for limited use cases like scanout and not for temporary
882 * pin operations. It is not permitted to allow userspace to pin arbitrary
883 * amounts of buffers through this interface.
884 *
885 * Buffers must be unpinned by calling dma_buf_unpin().
886 *
bb42df46
CK
887 * Returns:
888 * 0 on success, negative error code on failure.
889 */
890int dma_buf_pin(struct dma_buf_attachment *attach)
891{
892 struct dma_buf *dmabuf = attach->dmabuf;
893 int ret = 0;
894
c545781e
DV
895 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
896
bb42df46
CK
897 dma_resv_assert_held(dmabuf->resv);
898
899 if (dmabuf->ops->pin)
900 ret = dmabuf->ops->pin(attach);
901
902 return ret;
903}
904EXPORT_SYMBOL_GPL(dma_buf_pin);
905
906/**
c545781e 907 * dma_buf_unpin - Unpin a DMA-buf
bb42df46 908 * @attach: [in] attachment which should be unpinned
c545781e
DV
909 *
910 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
911 * any mapping of @attach again and inform the importer through
912 * &dma_buf_attach_ops.move_notify.
bb42df46
CK
913 */
914void dma_buf_unpin(struct dma_buf_attachment *attach)
915{
916 struct dma_buf *dmabuf = attach->dmabuf;
917
c545781e
DV
918 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
919
bb42df46
CK
920 dma_resv_assert_held(dmabuf->resv);
921
922 if (dmabuf->ops->unpin)
923 dmabuf->ops->unpin(attach);
924}
925EXPORT_SYMBOL_GPL(dma_buf_unpin);
926
d15bd7ee
SS
927/**
928 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
929 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
930 * dma_buf_ops.
931 * @attach: [in] attachment whose scatterlist is to be returned
932 * @direction: [in] direction of DMA transfer
933 *
fee0c54e 934 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
2904a8c1
DV
935 * on error. May return -EINTR if it is interrupted by a signal.
936 *
ac80cd17
JX
937 * On success, the DMA addresses and lengths in the returned scatterlist are
938 * PAGE_SIZE aligned.
939 *
c138782d 940 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
2904a8c1
DV
941 * the underlying backing storage is pinned for as long as a mapping exists,
942 * therefore users/importers should not hold onto a mapping for undue amounts of
943 * time.
89bcadc8
DV
944 *
945 * Important: Dynamic importers must wait for the exclusive fence of the struct
946 * dma_resv attached to the DMA-BUF first.
d15bd7ee
SS
947 */
948struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
949 enum dma_data_direction direction)
950{
531beb06 951 struct sg_table *sg_table;
bb42df46 952 int r;
d15bd7ee
SS
953
954 might_sleep();
955
d1aa06a1 956 if (WARN_ON(!attach || !attach->dmabuf))
d15bd7ee
SS
957 return ERR_PTR(-EINVAL);
958
15fd552d
CK
959 if (dma_buf_attachment_is_dynamic(attach))
960 dma_resv_assert_held(attach->dmabuf->resv);
961
f13e143e
CK
962 if (attach->sgt) {
963 /*
964 * Two mappings with different directions for the same
965 * attachment are not allowed.
966 */
967 if (attach->dir != direction &&
968 attach->dir != DMA_BIDIRECTIONAL)
969 return ERR_PTR(-EBUSY);
970
971 return attach->sgt;
972 }
973
bb42df46 974 if (dma_buf_is_dynamic(attach->dmabuf)) {
15fd552d 975 dma_resv_assert_held(attach->dmabuf->resv);
4981cdb0 976 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
7e008b02 977 r = attach->dmabuf->ops->pin(attach);
bb42df46
CK
978 if (r)
979 return ERR_PTR(r);
980 }
981 }
15fd552d 982
84335675 983 sg_table = __map_dma_buf(attach, direction);
fee0c54e
CC
984 if (!sg_table)
985 sg_table = ERR_PTR(-ENOMEM);
d15bd7ee 986
bb42df46 987 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
4981cdb0 988 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
7e008b02 989 attach->dmabuf->ops->unpin(attach);
bb42df46 990
f13e143e
CK
991 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
992 attach->sgt = sg_table;
993 attach->dir = direction;
994 }
995
ac80cd17 996#ifdef CONFIG_DMA_API_DEBUG
00efd65a 997 if (!IS_ERR(sg_table)) {
ac80cd17
JX
998 struct scatterlist *sg;
999 u64 addr;
1000 int len;
1001 int i;
1002
1003 for_each_sgtable_dma_sg(sg_table, sg, i) {
1004 addr = sg_dma_address(sg);
1005 len = sg_dma_len(sg);
1006 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1007 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1008 __func__, addr, len);
1009 }
1010 }
1011 }
1012#endif /* CONFIG_DMA_API_DEBUG */
d15bd7ee
SS
1013 return sg_table;
1014}
1015EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
1016
1017/**
1018 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1019 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1020 * dma_buf_ops.
1021 * @attach: [in] attachment to unmap buffer from
1022 * @sg_table: [in] scatterlist info of the buffer to unmap
33ea2dcb 1023 * @direction: [in] direction of DMA transfer
d15bd7ee 1024 *
2904a8c1 1025 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
d15bd7ee
SS
1026 */
1027void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
33ea2dcb
SS
1028 struct sg_table *sg_table,
1029 enum dma_data_direction direction)
d15bd7ee 1030{
b6fa0cd6
RC
1031 might_sleep();
1032
d1aa06a1 1033 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
d15bd7ee
SS
1034 return;
1035
15fd552d
CK
1036 if (dma_buf_attachment_is_dynamic(attach))
1037 dma_resv_assert_held(attach->dmabuf->resv);
1038
f13e143e
CK
1039 if (attach->sgt == sg_table)
1040 return;
1041
15fd552d
CK
1042 if (dma_buf_is_dynamic(attach->dmabuf))
1043 dma_resv_assert_held(attach->dmabuf->resv);
1044
84335675 1045 __unmap_dma_buf(attach, sg_table, direction);
bb42df46
CK
1046
1047 if (dma_buf_is_dynamic(attach->dmabuf) &&
4981cdb0 1048 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
bb42df46 1049 dma_buf_unpin(attach);
d15bd7ee
SS
1050}
1051EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
fc13020e 1052
bb42df46
CK
1053/**
1054 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1055 *
1056 * @dmabuf: [in] buffer which is moving
1057 *
1058 * Informs all attachmenst that they need to destroy and recreated all their
1059 * mappings.
1060 */
1061void dma_buf_move_notify(struct dma_buf *dmabuf)
1062{
1063 struct dma_buf_attachment *attach;
1064
1065 dma_resv_assert_held(dmabuf->resv);
1066
1067 list_for_each_entry(attach, &dmabuf->attachments, node)
4981cdb0 1068 if (attach->importer_ops)
bb42df46
CK
1069 attach->importer_ops->move_notify(attach);
1070}
1071EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1072
0959a168
DV
1073/**
1074 * DOC: cpu access
1075 *
1076 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1077 *
1078 * - Fallback operations in the kernel, for example when a device is connected
1079 * over USB and the kernel needs to shuffle the data around first before
1080 * sending it away. Cache coherency is handled by braketing any transactions
1081 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1082 * access.
1083 *
7f0de8d8
DV
1084 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1085 * vmap interface is introduced. Note that on very old 32-bit architectures
1086 * vmalloc space might be limited and result in vmap calls failing.
0959a168
DV
1087 *
1088 * Interfaces::
de9114ec 1089 *
0959a168
DV
1090 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1091 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1092 *
1093 * The vmap call can fail if there is no vmap support in the exporter, or if
de9114ec
DV
1094 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1095 * count for all vmap access and calls down into the exporter's vmap function
1096 * only when no vmapping exists, and only unmaps it once. Protection against
1097 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
0959a168
DV
1098 *
1099 * - For full compatibility on the importer side with existing userspace
1100 * interfaces, which might already support mmap'ing buffers. This is needed in
1101 * many processing pipelines (e.g. feeding a software rendered image into a
1102 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1103 * framework already supported this and for DMA buffer file descriptors to
1104 * replace ION buffers mmap support was needed.
1105 *
1106 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1107 * fd. But like for CPU access there's a need to braket the actual access,
1108 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1109 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1110 * be restarted.
1111 *
1112 * Some systems might need some sort of cache coherency management e.g. when
1113 * CPU and GPU domains are being accessed through dma-buf at the same time.
1114 * To circumvent this problem there are begin/end coherency markers, that
1115 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1116 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1117 * sequence would be used like following:
1118 *
1119 * - mmap dma-buf fd
1120 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1121 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1122 * want (with the new data being consumed by say the GPU or the scanout
1123 * device)
1124 * - munmap once you don't need the buffer any more
1125 *
1126 * For correctness and optimal performance, it is always required to use
1127 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1128 * mapped address. Userspace cannot rely on coherent access, even when there
1129 * are systems where it just works without calling these ioctls.
1130 *
1131 * - And as a CPU fallback in userspace processing pipelines.
1132 *
1133 * Similar to the motivation for kernel cpu access it is again important that
1134 * the userspace code of a given importing subsystem can use the same
1135 * interfaces with a imported dma-buf buffer object as with a native buffer
1136 * object. This is especially important for drm where the userspace part of
1137 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1138 * use a different way to mmap a buffer rather invasive.
1139 *
1140 * The assumption in the current dma-buf interfaces is that redirecting the
1141 * initial mmap is all that's needed. A survey of some of the existing
1142 * subsystems shows that no driver seems to do any nefarious thing like
1143 * syncing up with outstanding asynchronous processing on the device or
1144 * allocating special resources at fault time. So hopefully this is good
1145 * enough, since adding interfaces to intercept pagefaults and allow pte
1146 * shootdowns would increase the complexity quite a bit.
1147 *
1148 * Interface::
85804b70 1149 *
0959a168
DV
1150 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1151 * unsigned long);
1152 *
1153 * If the importing subsystem simply provides a special-purpose mmap call to
85804b70 1154 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
0959a168
DV
1155 * equally achieve that for a dma-buf object.
1156 */
1157
ae4e46b1
CW
1158static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1159 enum dma_data_direction direction)
1160{
1161 bool write = (direction == DMA_BIDIRECTIONAL ||
1162 direction == DMA_TO_DEVICE);
52791eee 1163 struct dma_resv *resv = dmabuf->resv;
ae4e46b1
CW
1164 long ret;
1165
1166 /* Wait on any implicit rendering fences */
d3fae3b3 1167 ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
ae4e46b1
CW
1168 if (ret < 0)
1169 return ret;
1170
1171 return 0;
1172}
fc13020e
DV
1173
1174/**
1175 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1176 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1177 * preparations. Coherency is only guaranteed in the specified range for the
1178 * specified access direction.
efb4df82 1179 * @dmabuf: [in] buffer to prepare cpu access for.
fc13020e
DV
1180 * @direction: [in] length of range for cpu access.
1181 *
0959a168
DV
1182 * After the cpu access is complete the caller should call
1183 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1184 * it guaranteed to be coherent with other DMA access.
1185 *
de9114ec
DV
1186 * This function will also wait for any DMA transactions tracked through
1187 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1188 * synchronization this function will only ensure cache coherency, callers must
1189 * ensure synchronization with such DMA transactions on their own.
1190 *
fc13020e
DV
1191 * Can return negative error values, returns 0 on success.
1192 */
831e9da7 1193int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
fc13020e
DV
1194 enum dma_data_direction direction)
1195{
1196 int ret = 0;
1197
1198 if (WARN_ON(!dmabuf))
1199 return -EINVAL;
1200
8ccf0a29
DV
1201 might_lock(&dmabuf->resv->lock.base);
1202
fc13020e 1203 if (dmabuf->ops->begin_cpu_access)
831e9da7 1204 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
fc13020e 1205
ae4e46b1
CW
1206 /* Ensure that all fences are waited upon - but we first allow
1207 * the native handler the chance to do so more efficiently if it
1208 * chooses. A double invocation here will be reasonably cheap no-op.
1209 */
1210 if (ret == 0)
1211 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1212
fc13020e
DV
1213 return ret;
1214}
1215EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1216
1217/**
1218 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1219 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1220 * actions. Coherency is only guaranteed in the specified range for the
1221 * specified access direction.
efb4df82 1222 * @dmabuf: [in] buffer to complete cpu access for.
fc13020e
DV
1223 * @direction: [in] length of range for cpu access.
1224 *
0959a168
DV
1225 * This terminates CPU access started with dma_buf_begin_cpu_access().
1226 *
87e332d5 1227 * Can return negative error values, returns 0 on success.
fc13020e 1228 */
18b862dc
CW
1229int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1230 enum dma_data_direction direction)
fc13020e 1231{
18b862dc
CW
1232 int ret = 0;
1233
fc13020e
DV
1234 WARN_ON(!dmabuf);
1235
8ccf0a29
DV
1236 might_lock(&dmabuf->resv->lock.base);
1237
fc13020e 1238 if (dmabuf->ops->end_cpu_access)
18b862dc
CW
1239 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1240
1241 return ret;
fc13020e
DV
1242}
1243EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1244
4c78513e
DV
1245
1246/**
1247 * dma_buf_mmap - Setup up a userspace mmap with the given vma
12c4727e 1248 * @dmabuf: [in] buffer that should back the vma
4c78513e
DV
1249 * @vma: [in] vma for the mmap
1250 * @pgoff: [in] offset in pages where this mmap should start within the
5136629d 1251 * dma-buf buffer.
4c78513e
DV
1252 *
1253 * This function adjusts the passed in vma so that it points at the file of the
ecf1dbac 1254 * dma_buf operation. It also adjusts the starting pgoff and does bounds
4c78513e
DV
1255 * checking on the size of the vma. Then it calls the exporters mmap function to
1256 * set up the mapping.
1257 *
1258 * Can return negative error values, returns 0 on success.
1259 */
1260int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1261 unsigned long pgoff)
1262{
1263 if (WARN_ON(!dmabuf || !vma))
1264 return -EINVAL;
1265
e3a9d6c5
AD
1266 /* check if buffer supports mmap */
1267 if (!dmabuf->ops->mmap)
1268 return -EINVAL;
1269
4c78513e 1270 /* check for offset overflow */
b02da6f8 1271 if (pgoff + vma_pages(vma) < pgoff)
4c78513e
DV
1272 return -EOVERFLOW;
1273
1274 /* check for overflowing the buffer's size */
b02da6f8 1275 if (pgoff + vma_pages(vma) >
4c78513e
DV
1276 dmabuf->size >> PAGE_SHIFT)
1277 return -EINVAL;
1278
1279 /* readjust the vma */
295992fb 1280 vma_set_file(vma, dmabuf->file);
4c78513e
DV
1281 vma->vm_pgoff = pgoff;
1282
1527f926 1283 return dmabuf->ops->mmap(dmabuf, vma);
4c78513e
DV
1284}
1285EXPORT_SYMBOL_GPL(dma_buf_mmap);
98f86c9e
DA
1286
1287/**
12c4727e
SS
1288 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1289 * address space. Same restrictions as for vmap and friends apply.
1290 * @dmabuf: [in] buffer to vmap
6619ccf1 1291 * @map: [out] returns the vmap pointer
98f86c9e
DA
1292 *
1293 * This call may fail due to lack of virtual mapping address space.
1294 * These calls are optional in drivers. The intended use for them
1295 * is for mapping objects linear in kernel space for high use objects.
de9114ec
DV
1296 *
1297 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1298 * dma_buf_end_cpu_access() around any cpu access performed through this
1299 * mapping.
fee0c54e 1300 *
6619ccf1 1301 * Returns 0 on success, or a negative errno code otherwise.
98f86c9e 1302 */
6619ccf1 1303int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
98f86c9e 1304{
6619ccf1
TZ
1305 struct dma_buf_map ptr;
1306 int ret = 0;
1307
1308 dma_buf_map_clear(map);
f00b4dad 1309
98f86c9e 1310 if (WARN_ON(!dmabuf))
6619ccf1 1311 return -EINVAL;
98f86c9e 1312
f00b4dad 1313 if (!dmabuf->ops->vmap)
6619ccf1 1314 return -EINVAL;
f00b4dad
DV
1315
1316 mutex_lock(&dmabuf->lock);
1317 if (dmabuf->vmapping_counter) {
1318 dmabuf->vmapping_counter++;
01fd30da 1319 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
6619ccf1 1320 *map = dmabuf->vmap_ptr;
f00b4dad
DV
1321 goto out_unlock;
1322 }
1323
01fd30da 1324 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
f00b4dad 1325
6619ccf1
TZ
1326 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1327 if (WARN_ON_ONCE(ret))
f00b4dad
DV
1328 goto out_unlock;
1329
6619ccf1 1330 dmabuf->vmap_ptr = ptr;
f00b4dad
DV
1331 dmabuf->vmapping_counter = 1;
1332
6619ccf1
TZ
1333 *map = dmabuf->vmap_ptr;
1334
f00b4dad
DV
1335out_unlock:
1336 mutex_unlock(&dmabuf->lock);
6619ccf1 1337 return ret;
98f86c9e
DA
1338}
1339EXPORT_SYMBOL_GPL(dma_buf_vmap);
1340
1341/**
1342 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
12c4727e 1343 * @dmabuf: [in] buffer to vunmap
20e76f1a 1344 * @map: [in] vmap pointer to vunmap
98f86c9e 1345 */
20e76f1a 1346void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
98f86c9e
DA
1347{
1348 if (WARN_ON(!dmabuf))
1349 return;
1350
01fd30da 1351 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
f00b4dad 1352 BUG_ON(dmabuf->vmapping_counter == 0);
20e76f1a 1353 BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
f00b4dad
DV
1354
1355 mutex_lock(&dmabuf->lock);
1356 if (--dmabuf->vmapping_counter == 0) {
1357 if (dmabuf->ops->vunmap)
20e76f1a 1358 dmabuf->ops->vunmap(dmabuf, map);
01fd30da 1359 dma_buf_map_clear(&dmabuf->vmap_ptr);
f00b4dad
DV
1360 }
1361 mutex_unlock(&dmabuf->lock);
98f86c9e
DA
1362}
1363EXPORT_SYMBOL_GPL(dma_buf_vunmap);
b89e3563
SS
1364
1365#ifdef CONFIG_DEBUG_FS
eb0b947e 1366static int dma_buf_debug_show(struct seq_file *s, void *unused)
b89e3563 1367{
b89e3563
SS
1368 struct dma_buf *buf_obj;
1369 struct dma_buf_attachment *attach_obj;
52791eee
CK
1370 struct dma_resv *robj;
1371 struct dma_resv_list *fobj;
5eb2c72c 1372 struct dma_fence *fence;
5eb2c72c 1373 int count = 0, attach_count, shared_count, i;
b89e3563 1374 size_t size = 0;
680753dd 1375 int ret;
b89e3563
SS
1376
1377 ret = mutex_lock_interruptible(&db_list.lock);
1378
1379 if (ret)
1380 return ret;
1381
c0b00a52 1382 seq_puts(s, "\nDma-buf Objects:\n");
ed63bb1d
GH
1383 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1384 "size", "flags", "mode", "count", "ino");
b89e3563
SS
1385
1386 list_for_each_entry(buf_obj, &db_list.head, list_node) {
15fd552d 1387
15fd552d
CK
1388 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1389 if (ret)
f45f57cc 1390 goto error_unlock;
b89e3563 1391
bb2bb903 1392 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
c0b00a52 1393 buf_obj->size,
b89e3563 1394 buf_obj->file->f_flags, buf_obj->file->f_mode,
a1f6dbac 1395 file_count(buf_obj->file),
ed63bb1d 1396 buf_obj->exp_name,
bb2bb903
GH
1397 file_inode(buf_obj->file)->i_ino,
1398 buf_obj->name ?: "");
b89e3563 1399
5eb2c72c 1400 robj = buf_obj->resv;
6edbd6ab 1401 fence = dma_resv_excl_fence(robj);
5eb2c72c
RK
1402 if (fence)
1403 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1404 fence->ops->get_driver_name(fence),
1405 fence->ops->get_timeline_name(fence),
1406 dma_fence_is_signaled(fence) ? "" : "un");
680753dd
CK
1407
1408 fobj = rcu_dereference_protected(robj->fence,
1409 dma_resv_held(robj));
1410 shared_count = fobj ? fobj->shared_count : 0;
5eb2c72c 1411 for (i = 0; i < shared_count; i++) {
680753dd
CK
1412 fence = rcu_dereference_protected(fobj->shared[i],
1413 dma_resv_held(robj));
5eb2c72c
RK
1414 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1415 fence->ops->get_driver_name(fence),
1416 fence->ops->get_timeline_name(fence),
1417 dma_fence_is_signaled(fence) ? "" : "un");
1418 }
5eb2c72c 1419
c0b00a52 1420 seq_puts(s, "\tAttached Devices:\n");
b89e3563
SS
1421 attach_count = 0;
1422
1423 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
9eddb41d 1424 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
b89e3563
SS
1425 attach_count++;
1426 }
15fd552d 1427 dma_resv_unlock(buf_obj->resv);
b89e3563 1428
c0b00a52 1429 seq_printf(s, "Total %d devices attached\n\n",
b89e3563
SS
1430 attach_count);
1431
1432 count++;
1433 size += buf_obj->size;
b89e3563
SS
1434 }
1435
1436 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1437
1438 mutex_unlock(&db_list.lock);
1439 return 0;
15fd552d 1440
f45f57cc 1441error_unlock:
15fd552d
CK
1442 mutex_unlock(&db_list.lock);
1443 return ret;
b89e3563
SS
1444}
1445
2674305a 1446DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
b89e3563
SS
1447
1448static struct dentry *dma_buf_debugfs_dir;
1449
1450static int dma_buf_init_debugfs(void)
1451{
bd3e2208 1452 struct dentry *d;
b89e3563 1453 int err = 0;
5136629d 1454
bd3e2208
MK
1455 d = debugfs_create_dir("dma_buf", NULL);
1456 if (IS_ERR(d))
1457 return PTR_ERR(d);
5136629d 1458
bd3e2208 1459 dma_buf_debugfs_dir = d;
b89e3563 1460
bd3e2208
MK
1461 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1462 NULL, &dma_buf_debug_fops);
1463 if (IS_ERR(d)) {
b89e3563 1464 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
b7479990
MK
1465 debugfs_remove_recursive(dma_buf_debugfs_dir);
1466 dma_buf_debugfs_dir = NULL;
bd3e2208 1467 err = PTR_ERR(d);
b7479990 1468 }
b89e3563
SS
1469
1470 return err;
1471}
1472
1473static void dma_buf_uninit_debugfs(void)
1474{
298b6a81 1475 debugfs_remove_recursive(dma_buf_debugfs_dir);
b89e3563 1476}
b89e3563
SS
1477#else
1478static inline int dma_buf_init_debugfs(void)
1479{
1480 return 0;
1481}
1482static inline void dma_buf_uninit_debugfs(void)
1483{
1484}
1485#endif
1486
1487static int __init dma_buf_init(void)
1488{
bdb8d06d
HV
1489 int ret;
1490
1491 ret = dma_buf_init_sysfs_statistics();
1492 if (ret)
1493 return ret;
1494
ed63bb1d
GH
1495 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1496 if (IS_ERR(dma_buf_mnt))
1497 return PTR_ERR(dma_buf_mnt);
1498
b89e3563
SS
1499 mutex_init(&db_list.lock);
1500 INIT_LIST_HEAD(&db_list.head);
1501 dma_buf_init_debugfs();
1502 return 0;
1503}
1504subsys_initcall(dma_buf_init);
1505
1506static void __exit dma_buf_deinit(void)
1507{
1508 dma_buf_uninit_debugfs();
ed63bb1d 1509 kern_unmount(dma_buf_mnt);
bdb8d06d 1510 dma_buf_uninit_sysfs_statistics();
b89e3563
SS
1511}
1512__exitcall(dma_buf_deinit);