Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d15bd7ee SS |
2 | /* |
3 | * Framework for buffer objects that can be shared across devices/subsystems. | |
4 | * | |
5 | * Copyright(C) 2011 Linaro Limited. All rights reserved. | |
6 | * Author: Sumit Semwal <sumit.semwal@ti.com> | |
7 | * | |
8 | * Many thanks to linaro-mm-sig list, and specially | |
9 | * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and | |
10 | * Daniel Vetter <daniel@ffwll.ch> for their support in creation and | |
11 | * refining of this idea. | |
d15bd7ee SS |
12 | */ |
13 | ||
14 | #include <linux/fs.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/dma-buf.h> | |
f54d1867 | 17 | #include <linux/dma-fence.h> |
c19083c7 | 18 | #include <linux/dma-fence-unwrap.h> |
d15bd7ee SS |
19 | #include <linux/anon_inodes.h> |
20 | #include <linux/export.h> | |
b89e3563 | 21 | #include <linux/debugfs.h> |
9abdffe2 | 22 | #include <linux/module.h> |
b89e3563 | 23 | #include <linux/seq_file.h> |
20e10881 | 24 | #include <linux/sync_file.h> |
9b495a58 | 25 | #include <linux/poll.h> |
52791eee | 26 | #include <linux/dma-resv.h> |
b02da6f8 | 27 | #include <linux/mm.h> |
ed63bb1d | 28 | #include <linux/mount.h> |
933a90bf | 29 | #include <linux/pseudo_fs.h> |
d15bd7ee | 30 | |
c11e391d | 31 | #include <uapi/linux/dma-buf.h> |
ed63bb1d | 32 | #include <uapi/linux/magic.h> |
c11e391d | 33 | |
bdb8d06d HV |
34 | #include "dma-buf-sysfs-stats.h" |
35 | ||
d15bd7ee SS |
36 | static inline int is_dma_buf_file(struct file *); |
37 | ||
bfc7bc53 TU |
38 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
39 | static DEFINE_MUTEX(debugfs_list_mutex); | |
40 | static LIST_HEAD(debugfs_list); | |
b89e3563 | 41 | |
bfc7bc53 TU |
42 | static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) |
43 | { | |
44 | mutex_lock(&debugfs_list_mutex); | |
45 | list_add(&dmabuf->list_node, &debugfs_list); | |
46 | mutex_unlock(&debugfs_list_mutex); | |
47 | } | |
48 | ||
49 | static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) | |
50 | { | |
51 | if (!dmabuf) | |
52 | return; | |
53 | ||
54 | mutex_lock(&debugfs_list_mutex); | |
55 | list_del(&dmabuf->list_node); | |
56 | mutex_unlock(&debugfs_list_mutex); | |
57 | } | |
58 | #else | |
59 | static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) | |
60 | { | |
61 | } | |
62 | ||
63 | static void __dma_buf_debugfs_list_del(struct file *file) | |
64 | { | |
65 | } | |
66 | #endif | |
b89e3563 | 67 | |
bb2bb903 GH |
68 | static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) |
69 | { | |
70 | struct dma_buf *dmabuf; | |
71 | char name[DMA_BUF_NAME_LEN]; | |
a3232428 | 72 | ssize_t ret = 0; |
bb2bb903 GH |
73 | |
74 | dmabuf = dentry->d_fsdata; | |
6348dd29 | 75 | spin_lock(&dmabuf->name_lock); |
bb2bb903 | 76 | if (dmabuf->name) |
a3232428 | 77 | ret = strscpy(name, dmabuf->name, sizeof(name)); |
6348dd29 | 78 | spin_unlock(&dmabuf->name_lock); |
bb2bb903 | 79 | |
0f60d288 | 80 | return dynamic_dname(buffer, buflen, "/%s:%s", |
bb2bb903 GH |
81 | dentry->d_name.name, ret > 0 ? name : ""); |
82 | } | |
83 | ||
4ab59c3c | 84 | static void dma_buf_release(struct dentry *dentry) |
d15bd7ee SS |
85 | { |
86 | struct dma_buf *dmabuf; | |
87 | ||
4ab59c3c | 88 | dmabuf = dentry->d_fsdata; |
19a508bd CTR |
89 | if (unlikely(!dmabuf)) |
90 | return; | |
d15bd7ee | 91 | |
f00b4dad DV |
92 | BUG_ON(dmabuf->vmapping_counter); |
93 | ||
9b495a58 | 94 | /* |
ff2d2384 MD |
95 | * If you hit this BUG() it could mean: |
96 | * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else | |
97 | * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback | |
9b495a58 | 98 | */ |
6b51b02a | 99 | BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); |
9b495a58 | 100 | |
63c57e8d | 101 | dma_buf_stats_teardown(dmabuf); |
d15bd7ee | 102 | dmabuf->ops->release(dmabuf); |
b89e3563 | 103 | |
52791eee CK |
104 | if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) |
105 | dma_resv_fini(dmabuf->resv); | |
3aac4502 | 106 | |
f492283b | 107 | WARN_ON(!list_empty(&dmabuf->attachments)); |
9abdffe2 | 108 | module_put(dmabuf->owner); |
d1f37226 | 109 | kfree(dmabuf->name); |
d15bd7ee | 110 | kfree(dmabuf); |
4ab59c3c SS |
111 | } |
112 | ||
05cd8469 CTR |
113 | static int dma_buf_file_release(struct inode *inode, struct file *file) |
114 | { | |
05cd8469 CTR |
115 | if (!is_dma_buf_file(file)) |
116 | return -EINVAL; | |
117 | ||
bfc7bc53 | 118 | __dma_buf_debugfs_list_del(file->private_data); |
05cd8469 CTR |
119 | |
120 | return 0; | |
121 | } | |
122 | ||
4ab59c3c SS |
123 | static const struct dentry_operations dma_buf_dentry_ops = { |
124 | .d_dname = dmabuffs_dname, | |
125 | .d_release = dma_buf_release, | |
126 | }; | |
127 | ||
128 | static struct vfsmount *dma_buf_mnt; | |
129 | ||
130 | static int dma_buf_fs_init_context(struct fs_context *fc) | |
131 | { | |
132 | struct pseudo_fs_context *ctx; | |
133 | ||
134 | ctx = init_pseudo(fc, DMA_BUF_MAGIC); | |
135 | if (!ctx) | |
136 | return -ENOMEM; | |
137 | ctx->dops = &dma_buf_dentry_ops; | |
d15bd7ee SS |
138 | return 0; |
139 | } | |
140 | ||
4ab59c3c SS |
141 | static struct file_system_type dma_buf_fs_type = { |
142 | .name = "dmabuf", | |
143 | .init_fs_context = dma_buf_fs_init_context, | |
144 | .kill_sb = kill_anon_super, | |
145 | }; | |
146 | ||
4c78513e DV |
147 | static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) |
148 | { | |
149 | struct dma_buf *dmabuf; | |
150 | ||
151 | if (!is_dma_buf_file(file)) | |
152 | return -EINVAL; | |
153 | ||
154 | dmabuf = file->private_data; | |
155 | ||
e3a9d6c5 AD |
156 | /* check if buffer supports mmap */ |
157 | if (!dmabuf->ops->mmap) | |
158 | return -EINVAL; | |
159 | ||
4c78513e | 160 | /* check for overflowing the buffer's size */ |
b02da6f8 | 161 | if (vma->vm_pgoff + vma_pages(vma) > |
4c78513e DV |
162 | dmabuf->size >> PAGE_SHIFT) |
163 | return -EINVAL; | |
164 | ||
8021fa16 | 165 | return dmabuf->ops->mmap(dmabuf, vma); |
4c78513e DV |
166 | } |
167 | ||
19e8697b CJHR |
168 | static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) |
169 | { | |
170 | struct dma_buf *dmabuf; | |
171 | loff_t base; | |
172 | ||
173 | if (!is_dma_buf_file(file)) | |
174 | return -EBADF; | |
175 | ||
176 | dmabuf = file->private_data; | |
177 | ||
178 | /* only support discovering the end of the buffer, | |
179 | but also allow SEEK_SET to maintain the idiomatic | |
180 | SEEK_END(0), SEEK_CUR(0) pattern */ | |
181 | if (whence == SEEK_END) | |
182 | base = dmabuf->size; | |
183 | else if (whence == SEEK_SET) | |
184 | base = 0; | |
185 | else | |
186 | return -EINVAL; | |
187 | ||
188 | if (offset != 0) | |
189 | return -EINVAL; | |
190 | ||
191 | return base + offset; | |
192 | } | |
193 | ||
e7e21c72 | 194 | /** |
102514ec | 195 | * DOC: implicit fence polling |
e7e21c72 DV |
196 | * |
197 | * To support cross-device and cross-driver synchronization of buffer access | |
102514ec DV |
198 | * implicit fences (represented internally in the kernel with &struct dma_fence) |
199 | * can be attached to a &dma_buf. The glue for that and a few related things are | |
52791eee | 200 | * provided in the &dma_resv structure. |
e7e21c72 DV |
201 | * |
202 | * Userspace can query the state of these implicitly tracked fences using poll() | |
203 | * and related system calls: | |
204 | * | |
a9a08845 | 205 | * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the |
e7e21c72 DV |
206 | * most recent write or exclusive fence. |
207 | * | |
a9a08845 | 208 | * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of |
e7e21c72 DV |
209 | * all attached fences, shared and exclusive ones. |
210 | * | |
211 | * Note that this only signals the completion of the respective fences, i.e. the | |
212 | * DMA transfers are complete. Cache flushing and any other necessary | |
213 | * preparations before CPU access can begin still need to happen. | |
20e10881 JE |
214 | * |
215 | * As an alternative to poll(), the set of fences on DMA buffer can be | |
216 | * exported as a &sync_file using &dma_buf_sync_file_export. | |
e7e21c72 DV |
217 | */ |
218 | ||
f54d1867 | 219 | static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
9b495a58 ML |
220 | { |
221 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; | |
ff2d2384 | 222 | struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); |
9b495a58 ML |
223 | unsigned long flags; |
224 | ||
225 | spin_lock_irqsave(&dcb->poll->lock, flags); | |
226 | wake_up_locked_poll(dcb->poll, dcb->active); | |
227 | dcb->active = 0; | |
228 | spin_unlock_irqrestore(&dcb->poll->lock, flags); | |
6b51b02a | 229 | dma_fence_put(fence); |
ff2d2384 MD |
230 | /* Paired with get_file in dma_buf_poll */ |
231 | fput(dmabuf->file); | |
6b51b02a CK |
232 | } |
233 | ||
0a42016d | 234 | static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, |
6b51b02a CK |
235 | struct dma_buf_poll_cb_t *dcb) |
236 | { | |
0a42016d | 237 | struct dma_resv_iter cursor; |
6b51b02a | 238 | struct dma_fence *fence; |
0a42016d | 239 | int r; |
6b51b02a | 240 | |
7bc80a54 CK |
241 | dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), |
242 | fence) { | |
6b51b02a CK |
243 | dma_fence_get(fence); |
244 | r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); | |
245 | if (!r) | |
246 | return true; | |
247 | dma_fence_put(fence); | |
248 | } | |
249 | ||
250 | return false; | |
251 | } | |
252 | ||
afc9a42b | 253 | static __poll_t dma_buf_poll(struct file *file, poll_table *poll) |
9b495a58 ML |
254 | { |
255 | struct dma_buf *dmabuf; | |
52791eee | 256 | struct dma_resv *resv; |
01699437 | 257 | __poll_t events; |
9b495a58 ML |
258 | |
259 | dmabuf = file->private_data; | |
260 | if (!dmabuf || !dmabuf->resv) | |
a9a08845 | 261 | return EPOLLERR; |
9b495a58 ML |
262 | |
263 | resv = dmabuf->resv; | |
264 | ||
265 | poll_wait(file, &dmabuf->poll, poll); | |
266 | ||
a9a08845 | 267 | events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); |
9b495a58 ML |
268 | if (!events) |
269 | return 0; | |
270 | ||
6b51b02a | 271 | dma_resv_lock(resv, NULL); |
9b495a58 | 272 | |
6b51b02a CK |
273 | if (events & EPOLLOUT) { |
274 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; | |
9b495a58 | 275 | |
6b51b02a | 276 | /* Check that callback isn't busy */ |
9b495a58 | 277 | spin_lock_irq(&dmabuf->poll.lock); |
6b51b02a CK |
278 | if (dcb->active) |
279 | events &= ~EPOLLOUT; | |
280 | else | |
281 | dcb->active = EPOLLOUT; | |
9b495a58 ML |
282 | spin_unlock_irq(&dmabuf->poll.lock); |
283 | ||
6b51b02a | 284 | if (events & EPOLLOUT) { |
ff2d2384 MD |
285 | /* Paired with fput in dma_buf_poll_cb */ |
286 | get_file(dmabuf->file); | |
287 | ||
0a42016d | 288 | if (!dma_buf_poll_add_cb(resv, true, dcb)) |
6b51b02a | 289 | /* No callback queued, wake up any other waiters */ |
9b495a58 | 290 | dma_buf_poll_cb(NULL, &dcb->cb); |
6b51b02a CK |
291 | else |
292 | events &= ~EPOLLOUT; | |
9b495a58 ML |
293 | } |
294 | } | |
295 | ||
6b51b02a CK |
296 | if (events & EPOLLIN) { |
297 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; | |
9b495a58 | 298 | |
6b51b02a | 299 | /* Check that callback isn't busy */ |
9b495a58 ML |
300 | spin_lock_irq(&dmabuf->poll.lock); |
301 | if (dcb->active) | |
6b51b02a | 302 | events &= ~EPOLLIN; |
9b495a58 | 303 | else |
6b51b02a | 304 | dcb->active = EPOLLIN; |
9b495a58 ML |
305 | spin_unlock_irq(&dmabuf->poll.lock); |
306 | ||
6b51b02a | 307 | if (events & EPOLLIN) { |
ff2d2384 MD |
308 | /* Paired with fput in dma_buf_poll_cb */ |
309 | get_file(dmabuf->file); | |
310 | ||
0a42016d | 311 | if (!dma_buf_poll_add_cb(resv, false, dcb)) |
6b51b02a | 312 | /* No callback queued, wake up any other waiters */ |
3c3b177a | 313 | dma_buf_poll_cb(NULL, &dcb->cb); |
6b51b02a CK |
314 | else |
315 | events &= ~EPOLLIN; | |
04a5faa8 | 316 | } |
9b495a58 ML |
317 | } |
318 | ||
6b51b02a | 319 | dma_resv_unlock(resv); |
9b495a58 ML |
320 | return events; |
321 | } | |
322 | ||
bb2bb903 GH |
323 | /** |
324 | * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. | |
e73c317e GC |
325 | * It could support changing the name of the dma-buf if the same |
326 | * piece of memory is used for multiple purpose between different devices. | |
bb2bb903 | 327 | * |
6d3ba803 KK |
328 | * @dmabuf: [in] dmabuf buffer that will be renamed. |
329 | * @buf: [in] A piece of userspace memory that contains the name of | |
330 | * the dma-buf. | |
bb2bb903 GH |
331 | * |
332 | * Returns 0 on success. If the dma-buf buffer is already attached to | |
333 | * devices, return -EBUSY. | |
334 | * | |
335 | */ | |
336 | static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) | |
337 | { | |
338 | char *name = strndup_user(buf, DMA_BUF_NAME_LEN); | |
bb2bb903 GH |
339 | |
340 | if (IS_ERR(name)) | |
341 | return PTR_ERR(name); | |
342 | ||
6348dd29 | 343 | spin_lock(&dmabuf->name_lock); |
bb2bb903 GH |
344 | kfree(dmabuf->name); |
345 | dmabuf->name = name; | |
6348dd29 | 346 | spin_unlock(&dmabuf->name_lock); |
bb2bb903 | 347 | |
e73c317e | 348 | return 0; |
bb2bb903 GH |
349 | } |
350 | ||
20e10881 JE |
351 | #if IS_ENABLED(CONFIG_SYNC_FILE) |
352 | static long dma_buf_export_sync_file(struct dma_buf *dmabuf, | |
353 | void __user *user_data) | |
354 | { | |
355 | struct dma_buf_export_sync_file arg; | |
356 | enum dma_resv_usage usage; | |
357 | struct dma_fence *fence = NULL; | |
358 | struct sync_file *sync_file; | |
359 | int fd, ret; | |
360 | ||
361 | if (copy_from_user(&arg, user_data, sizeof(arg))) | |
362 | return -EFAULT; | |
363 | ||
364 | if (arg.flags & ~DMA_BUF_SYNC_RW) | |
365 | return -EINVAL; | |
366 | ||
367 | if ((arg.flags & DMA_BUF_SYNC_RW) == 0) | |
368 | return -EINVAL; | |
369 | ||
370 | fd = get_unused_fd_flags(O_CLOEXEC); | |
371 | if (fd < 0) | |
372 | return fd; | |
373 | ||
374 | usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE); | |
375 | ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence); | |
376 | if (ret) | |
377 | goto err_put_fd; | |
378 | ||
379 | if (!fence) | |
380 | fence = dma_fence_get_stub(); | |
381 | ||
382 | sync_file = sync_file_create(fence); | |
383 | ||
384 | dma_fence_put(fence); | |
385 | ||
386 | if (!sync_file) { | |
387 | ret = -ENOMEM; | |
388 | goto err_put_fd; | |
389 | } | |
390 | ||
391 | arg.fd = fd; | |
392 | if (copy_to_user(user_data, &arg, sizeof(arg))) { | |
393 | ret = -EFAULT; | |
394 | goto err_put_file; | |
395 | } | |
396 | ||
397 | fd_install(fd, sync_file->file); | |
398 | ||
399 | return 0; | |
400 | ||
401 | err_put_file: | |
402 | fput(sync_file->file); | |
403 | err_put_fd: | |
404 | put_unused_fd(fd); | |
405 | return ret; | |
406 | } | |
59474049 JE |
407 | |
408 | static long dma_buf_import_sync_file(struct dma_buf *dmabuf, | |
409 | const void __user *user_data) | |
410 | { | |
411 | struct dma_buf_import_sync_file arg; | |
c19083c7 | 412 | struct dma_fence *fence, *f; |
59474049 | 413 | enum dma_resv_usage usage; |
c19083c7 JE |
414 | struct dma_fence_unwrap iter; |
415 | unsigned int num_fences; | |
59474049 JE |
416 | int ret = 0; |
417 | ||
418 | if (copy_from_user(&arg, user_data, sizeof(arg))) | |
419 | return -EFAULT; | |
420 | ||
421 | if (arg.flags & ~DMA_BUF_SYNC_RW) | |
422 | return -EINVAL; | |
423 | ||
424 | if ((arg.flags & DMA_BUF_SYNC_RW) == 0) | |
425 | return -EINVAL; | |
426 | ||
427 | fence = sync_file_get_fence(arg.fd); | |
428 | if (!fence) | |
429 | return -EINVAL; | |
430 | ||
431 | usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : | |
432 | DMA_RESV_USAGE_READ; | |
433 | ||
c19083c7 JE |
434 | num_fences = 0; |
435 | dma_fence_unwrap_for_each(f, &iter, fence) | |
436 | ++num_fences; | |
437 | ||
438 | if (num_fences > 0) { | |
439 | dma_resv_lock(dmabuf->resv, NULL); | |
59474049 | 440 | |
c19083c7 JE |
441 | ret = dma_resv_reserve_fences(dmabuf->resv, num_fences); |
442 | if (!ret) { | |
443 | dma_fence_unwrap_for_each(f, &iter, fence) | |
444 | dma_resv_add_fence(dmabuf->resv, f, usage); | |
445 | } | |
59474049 | 446 | |
c19083c7 JE |
447 | dma_resv_unlock(dmabuf->resv); |
448 | } | |
59474049 JE |
449 | |
450 | dma_fence_put(fence); | |
451 | ||
452 | return ret; | |
453 | } | |
20e10881 JE |
454 | #endif |
455 | ||
c11e391d DV |
456 | static long dma_buf_ioctl(struct file *file, |
457 | unsigned int cmd, unsigned long arg) | |
458 | { | |
459 | struct dma_buf *dmabuf; | |
460 | struct dma_buf_sync sync; | |
461 | enum dma_data_direction direction; | |
18b862dc | 462 | int ret; |
c11e391d DV |
463 | |
464 | dmabuf = file->private_data; | |
465 | ||
466 | switch (cmd) { | |
467 | case DMA_BUF_IOCTL_SYNC: | |
468 | if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) | |
469 | return -EFAULT; | |
470 | ||
471 | if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) | |
472 | return -EINVAL; | |
473 | ||
474 | switch (sync.flags & DMA_BUF_SYNC_RW) { | |
475 | case DMA_BUF_SYNC_READ: | |
476 | direction = DMA_FROM_DEVICE; | |
477 | break; | |
478 | case DMA_BUF_SYNC_WRITE: | |
479 | direction = DMA_TO_DEVICE; | |
480 | break; | |
481 | case DMA_BUF_SYNC_RW: | |
482 | direction = DMA_BIDIRECTIONAL; | |
483 | break; | |
484 | default: | |
485 | return -EINVAL; | |
486 | } | |
487 | ||
488 | if (sync.flags & DMA_BUF_SYNC_END) | |
18b862dc | 489 | ret = dma_buf_end_cpu_access(dmabuf, direction); |
c11e391d | 490 | else |
18b862dc | 491 | ret = dma_buf_begin_cpu_access(dmabuf, direction); |
c11e391d | 492 | |
18b862dc | 493 | return ret; |
bb2bb903 | 494 | |
a5bff92e DV |
495 | case DMA_BUF_SET_NAME_A: |
496 | case DMA_BUF_SET_NAME_B: | |
bb2bb903 GH |
497 | return dma_buf_set_name(dmabuf, (const char __user *)arg); |
498 | ||
20e10881 JE |
499 | #if IS_ENABLED(CONFIG_SYNC_FILE) |
500 | case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: | |
501 | return dma_buf_export_sync_file(dmabuf, (void __user *)arg); | |
59474049 JE |
502 | case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: |
503 | return dma_buf_import_sync_file(dmabuf, (const void __user *)arg); | |
20e10881 JE |
504 | #endif |
505 | ||
c11e391d DV |
506 | default: |
507 | return -ENOTTY; | |
508 | } | |
509 | } | |
510 | ||
bcc07111 GH |
511 | static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) |
512 | { | |
513 | struct dma_buf *dmabuf = file->private_data; | |
514 | ||
515 | seq_printf(m, "size:\t%zu\n", dmabuf->size); | |
516 | /* Don't count the temporary reference taken inside procfs seq_show */ | |
517 | seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); | |
518 | seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); | |
6348dd29 | 519 | spin_lock(&dmabuf->name_lock); |
bcc07111 GH |
520 | if (dmabuf->name) |
521 | seq_printf(m, "name:\t%s\n", dmabuf->name); | |
6348dd29 | 522 | spin_unlock(&dmabuf->name_lock); |
bcc07111 GH |
523 | } |
524 | ||
d15bd7ee | 525 | static const struct file_operations dma_buf_fops = { |
05cd8469 | 526 | .release = dma_buf_file_release, |
4c78513e | 527 | .mmap = dma_buf_mmap_internal, |
19e8697b | 528 | .llseek = dma_buf_llseek, |
9b495a58 | 529 | .poll = dma_buf_poll, |
c11e391d | 530 | .unlocked_ioctl = dma_buf_ioctl, |
1832f2d8 | 531 | .compat_ioctl = compat_ptr_ioctl, |
bcc07111 | 532 | .show_fdinfo = dma_buf_show_fdinfo, |
d15bd7ee SS |
533 | }; |
534 | ||
535 | /* | |
536 | * is_dma_buf_file - Check if struct file* is associated with dma_buf | |
537 | */ | |
538 | static inline int is_dma_buf_file(struct file *file) | |
539 | { | |
540 | return file->f_op == &dma_buf_fops; | |
541 | } | |
542 | ||
f728a5ea | 543 | static struct file *dma_buf_getfile(size_t size, int flags) |
ed63bb1d | 544 | { |
370704e7 | 545 | static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); |
ed63bb1d | 546 | struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); |
f728a5ea | 547 | struct file *file; |
ed63bb1d GH |
548 | |
549 | if (IS_ERR(inode)) | |
550 | return ERR_CAST(inode); | |
551 | ||
f728a5ea CK |
552 | inode->i_size = size; |
553 | inode_set_bytes(inode, size); | |
ed63bb1d | 554 | |
370704e7 CTK |
555 | /* |
556 | * The ->i_ino acquired from get_next_ino() is not unique thus | |
557 | * not suitable for using it as dentry name by dmabuf stats. | |
558 | * Override ->i_ino with the unique and dmabuffs specific | |
559 | * value. | |
560 | */ | |
561 | inode->i_ino = atomic64_add_return(1, &dmabuf_inode); | |
47091e4e | 562 | flags &= O_ACCMODE | O_NONBLOCK; |
ed63bb1d GH |
563 | file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", |
564 | flags, &dma_buf_fops); | |
565 | if (IS_ERR(file)) | |
566 | goto err_alloc_file; | |
ed63bb1d GH |
567 | |
568 | return file; | |
569 | ||
570 | err_alloc_file: | |
571 | iput(inode); | |
572 | return file; | |
573 | } | |
574 | ||
2904a8c1 DV |
575 | /** |
576 | * DOC: dma buf device access | |
577 | * | |
578 | * For device DMA access to a shared DMA buffer the usual sequence of operations | |
579 | * is fairly simple: | |
580 | * | |
581 | * 1. The exporter defines his exporter instance using | |
582 | * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private | |
583 | * buffer object into a &dma_buf. It then exports that &dma_buf to userspace | |
584 | * as a file descriptor by calling dma_buf_fd(). | |
585 | * | |
586 | * 2. Userspace passes this file-descriptors to all drivers it wants this buffer | |
d791aec9 | 587 | * to share with: First the file descriptor is converted to a &dma_buf using |
c138782d | 588 | * dma_buf_get(). Then the buffer is attached to the device using |
2904a8c1 DV |
589 | * dma_buf_attach(). |
590 | * | |
591 | * Up to this stage the exporter is still free to migrate or reallocate the | |
592 | * backing storage. | |
593 | * | |
c138782d | 594 | * 3. Once the buffer is attached to all devices userspace can initiate DMA |
2904a8c1 DV |
595 | * access to the shared buffer. In the kernel this is done by calling |
596 | * dma_buf_map_attachment() and dma_buf_unmap_attachment(). | |
597 | * | |
598 | * 4. Once a driver is done with a shared buffer it needs to call | |
599 | * dma_buf_detach() (after cleaning up any mappings) and then release the | |
85804b70 | 600 | * reference acquired with dma_buf_get() by calling dma_buf_put(). |
2904a8c1 DV |
601 | * |
602 | * For the detailed semantics exporters are expected to implement see | |
603 | * &dma_buf_ops. | |
604 | */ | |
605 | ||
d15bd7ee | 606 | /** |
d8fbe341 | 607 | * dma_buf_export - Creates a new dma_buf, and associates an anon file |
d15bd7ee SS |
608 | * with this buffer, so it can be exported. |
609 | * Also connect the allocator specific data and ops to the buffer. | |
78df9695 | 610 | * Additionally, provide a name string for exporter; useful in debugging. |
d15bd7ee | 611 | * |
d8fbe341 | 612 | * @exp_info: [in] holds all the export related information provided |
f641d3b5 | 613 | * by the exporter. see &struct dma_buf_export_info |
d8fbe341 | 614 | * for further details. |
d15bd7ee | 615 | * |
85804b70 DV |
616 | * Returns, on success, a newly created struct dma_buf object, which wraps the |
617 | * supplied private data and operations for struct dma_buf_ops. On either | |
618 | * missing ops, or error in allocating struct dma_buf, will return negative | |
619 | * error. | |
d15bd7ee | 620 | * |
2904a8c1 DV |
621 | * For most cases the easiest way to create @exp_info is through the |
622 | * %DEFINE_DMA_BUF_EXPORT_INFO macro. | |
d15bd7ee | 623 | */ |
d8fbe341 | 624 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) |
d15bd7ee SS |
625 | { |
626 | struct dma_buf *dmabuf; | |
52791eee | 627 | struct dma_resv *resv = exp_info->resv; |
d15bd7ee | 628 | struct file *file; |
3aac4502 | 629 | size_t alloc_size = sizeof(struct dma_buf); |
a026df4c | 630 | int ret; |
5136629d | 631 | |
f728a5ea CK |
632 | if (WARN_ON(!exp_info->priv || !exp_info->ops |
633 | || !exp_info->ops->map_dma_buf | |
634 | || !exp_info->ops->unmap_dma_buf | |
635 | || !exp_info->ops->release)) | |
d15bd7ee | 636 | return ERR_PTR(-EINVAL); |
d15bd7ee | 637 | |
15fd552d | 638 | if (WARN_ON(exp_info->ops->cache_sgt_mapping && |
bd2275ee | 639 | (exp_info->ops->pin || exp_info->ops->unpin))) |
15fd552d CK |
640 | return ERR_PTR(-EINVAL); |
641 | ||
bd2275ee | 642 | if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) |
15fd552d CK |
643 | return ERR_PTR(-EINVAL); |
644 | ||
9abdffe2 SS |
645 | if (!try_module_get(exp_info->owner)) |
646 | return ERR_PTR(-ENOENT); | |
647 | ||
f728a5ea CK |
648 | file = dma_buf_getfile(exp_info->size, exp_info->flags); |
649 | if (IS_ERR(file)) { | |
650 | ret = PTR_ERR(file); | |
651 | goto err_module; | |
652 | } | |
653 | ||
654 | if (!exp_info->resv) | |
655 | alloc_size += sizeof(struct dma_resv); | |
656 | else | |
657 | /* prevent &dma_buf[1] == dma_buf->resv */ | |
658 | alloc_size += 1; | |
3aac4502 | 659 | dmabuf = kzalloc(alloc_size, GFP_KERNEL); |
9abdffe2 | 660 | if (!dmabuf) { |
a026df4c | 661 | ret = -ENOMEM; |
f728a5ea | 662 | goto err_file; |
9abdffe2 | 663 | } |
d15bd7ee | 664 | |
d8fbe341 SS |
665 | dmabuf->priv = exp_info->priv; |
666 | dmabuf->ops = exp_info->ops; | |
667 | dmabuf->size = exp_info->size; | |
668 | dmabuf->exp_name = exp_info->exp_name; | |
9abdffe2 | 669 | dmabuf->owner = exp_info->owner; |
6348dd29 | 670 | spin_lock_init(&dmabuf->name_lock); |
9b495a58 | 671 | init_waitqueue_head(&dmabuf->poll); |
6b51b02a CK |
672 | dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; |
673 | dmabuf->cb_in.active = dmabuf->cb_out.active = 0; | |
f728a5ea | 674 | INIT_LIST_HEAD(&dmabuf->attachments); |
9b495a58 | 675 | |
3aac4502 | 676 | if (!resv) { |
f728a5ea CK |
677 | dmabuf->resv = (struct dma_resv *)&dmabuf[1]; |
678 | dma_resv_init(dmabuf->resv); | |
679 | } else { | |
680 | dmabuf->resv = resv; | |
3aac4502 | 681 | } |
d15bd7ee | 682 | |
f728a5ea CK |
683 | ret = dma_buf_stats_setup(dmabuf, file); |
684 | if (ret) | |
a026df4c | 685 | goto err_dmabuf; |
19e8697b | 686 | |
f728a5ea CK |
687 | file->private_data = dmabuf; |
688 | file->f_path.dentry->d_fsdata = dmabuf; | |
d15bd7ee SS |
689 | dmabuf->file = file; |
690 | ||
bfc7bc53 | 691 | __dma_buf_debugfs_list_add(dmabuf); |
b89e3563 | 692 | |
d15bd7ee | 693 | return dmabuf; |
a026df4c CW |
694 | |
695 | err_dmabuf: | |
f728a5ea CK |
696 | if (!resv) |
697 | dma_resv_fini(dmabuf->resv); | |
a026df4c | 698 | kfree(dmabuf); |
f728a5ea CK |
699 | err_file: |
700 | fput(file); | |
a026df4c CW |
701 | err_module: |
702 | module_put(exp_info->owner); | |
703 | return ERR_PTR(ret); | |
d15bd7ee | 704 | } |
16b0314a | 705 | EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); |
d15bd7ee SS |
706 | |
707 | /** | |
85804b70 | 708 | * dma_buf_fd - returns a file descriptor for the given struct dma_buf |
d15bd7ee | 709 | * @dmabuf: [in] pointer to dma_buf for which fd is required. |
55c1c4ca | 710 | * @flags: [in] flags to give to fd |
d15bd7ee SS |
711 | * |
712 | * On success, returns an associated 'fd'. Else, returns error. | |
713 | */ | |
55c1c4ca | 714 | int dma_buf_fd(struct dma_buf *dmabuf, int flags) |
d15bd7ee | 715 | { |
f5e097f0 | 716 | int fd; |
d15bd7ee SS |
717 | |
718 | if (!dmabuf || !dmabuf->file) | |
719 | return -EINVAL; | |
720 | ||
f5e097f0 BP |
721 | fd = get_unused_fd_flags(flags); |
722 | if (fd < 0) | |
723 | return fd; | |
d15bd7ee SS |
724 | |
725 | fd_install(fd, dmabuf->file); | |
726 | ||
727 | return fd; | |
728 | } | |
16b0314a | 729 | EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); |
d15bd7ee SS |
730 | |
731 | /** | |
85804b70 DV |
732 | * dma_buf_get - returns the struct dma_buf related to an fd |
733 | * @fd: [in] fd associated with the struct dma_buf to be returned | |
d15bd7ee | 734 | * |
85804b70 | 735 | * On success, returns the struct dma_buf associated with an fd; uses |
d15bd7ee SS |
736 | * file's refcounting done by fget to increase refcount. returns ERR_PTR |
737 | * otherwise. | |
738 | */ | |
739 | struct dma_buf *dma_buf_get(int fd) | |
740 | { | |
741 | struct file *file; | |
742 | ||
743 | file = fget(fd); | |
744 | ||
745 | if (!file) | |
746 | return ERR_PTR(-EBADF); | |
747 | ||
748 | if (!is_dma_buf_file(file)) { | |
749 | fput(file); | |
750 | return ERR_PTR(-EINVAL); | |
751 | } | |
752 | ||
753 | return file->private_data; | |
754 | } | |
16b0314a | 755 | EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); |
d15bd7ee SS |
756 | |
757 | /** | |
758 | * dma_buf_put - decreases refcount of the buffer | |
759 | * @dmabuf: [in] buffer to reduce refcount of | |
760 | * | |
2904a8c1 DV |
761 | * Uses file's refcounting done implicitly by fput(). |
762 | * | |
763 | * If, as a result of this call, the refcount becomes 0, the 'release' file | |
e9b4d7b5 DV |
764 | * operation related to this fd is called. It calls &dma_buf_ops.release vfunc |
765 | * in turn, and frees the memory allocated for dmabuf when exported. | |
d15bd7ee SS |
766 | */ |
767 | void dma_buf_put(struct dma_buf *dmabuf) | |
768 | { | |
769 | if (WARN_ON(!dmabuf || !dmabuf->file)) | |
770 | return; | |
771 | ||
772 | fput(dmabuf->file); | |
773 | } | |
16b0314a | 774 | EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); |
d15bd7ee | 775 | |
84335675 DV |
776 | static void mangle_sg_table(struct sg_table *sg_table) |
777 | { | |
778 | #ifdef CONFIG_DMABUF_DEBUG | |
779 | int i; | |
780 | struct scatterlist *sg; | |
781 | ||
782 | /* To catch abuse of the underlying struct page by importers mix | |
783 | * up the bits, but take care to preserve the low SG_ bits to | |
784 | * not corrupt the sgt. The mixing is undone in __unmap_dma_buf | |
785 | * before passing the sgt back to the exporter. */ | |
786 | for_each_sgtable_sg(sg_table, sg, i) | |
787 | sg->page_link ^= ~0xffUL; | |
788 | #endif | |
789 | ||
790 | } | |
791 | static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, | |
792 | enum dma_data_direction direction) | |
793 | { | |
794 | struct sg_table *sg_table; | |
46b35b33 | 795 | signed long ret; |
84335675 DV |
796 | |
797 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); | |
46b35b33 CK |
798 | if (IS_ERR_OR_NULL(sg_table)) |
799 | return sg_table; | |
800 | ||
801 | if (!dma_buf_attachment_is_dynamic(attach)) { | |
802 | ret = dma_resv_wait_timeout(attach->dmabuf->resv, | |
803 | DMA_RESV_USAGE_KERNEL, true, | |
804 | MAX_SCHEDULE_TIMEOUT); | |
805 | if (ret < 0) { | |
806 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, | |
807 | direction); | |
808 | return ERR_PTR(ret); | |
809 | } | |
810 | } | |
84335675 | 811 | |
46b35b33 | 812 | mangle_sg_table(sg_table); |
84335675 DV |
813 | return sg_table; |
814 | } | |
815 | ||
ae2e7f28 DO |
816 | /** |
817 | * DOC: locking convention | |
818 | * | |
819 | * In order to avoid deadlock situations between dma-buf exports and importers, | |
820 | * all dma-buf API users must follow the common dma-buf locking convention. | |
821 | * | |
822 | * Convention for importers | |
823 | * | |
824 | * 1. Importers must hold the dma-buf reservation lock when calling these | |
825 | * functions: | |
826 | * | |
827 | * - dma_buf_pin() | |
828 | * - dma_buf_unpin() | |
829 | * - dma_buf_map_attachment() | |
830 | * - dma_buf_unmap_attachment() | |
831 | * - dma_buf_vmap() | |
832 | * - dma_buf_vunmap() | |
833 | * | |
834 | * 2. Importers must not hold the dma-buf reservation lock when calling these | |
835 | * functions: | |
836 | * | |
837 | * - dma_buf_attach() | |
838 | * - dma_buf_dynamic_attach() | |
839 | * - dma_buf_detach() | |
e3ecbd21 | 840 | * - dma_buf_export() |
ae2e7f28 DO |
841 | * - dma_buf_fd() |
842 | * - dma_buf_get() | |
843 | * - dma_buf_put() | |
844 | * - dma_buf_mmap() | |
845 | * - dma_buf_begin_cpu_access() | |
846 | * - dma_buf_end_cpu_access() | |
847 | * - dma_buf_map_attachment_unlocked() | |
848 | * - dma_buf_unmap_attachment_unlocked() | |
849 | * - dma_buf_vmap_unlocked() | |
850 | * - dma_buf_vunmap_unlocked() | |
851 | * | |
852 | * Convention for exporters | |
853 | * | |
854 | * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf | |
855 | * reservation and exporter can take the lock: | |
856 | * | |
857 | * - &dma_buf_ops.attach() | |
858 | * - &dma_buf_ops.detach() | |
859 | * - &dma_buf_ops.release() | |
860 | * - &dma_buf_ops.begin_cpu_access() | |
861 | * - &dma_buf_ops.end_cpu_access() | |
8021fa16 | 862 | * - &dma_buf_ops.mmap() |
ae2e7f28 DO |
863 | * |
864 | * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf | |
865 | * reservation and exporter can't take the lock: | |
866 | * | |
867 | * - &dma_buf_ops.pin() | |
868 | * - &dma_buf_ops.unpin() | |
869 | * - &dma_buf_ops.map_dma_buf() | |
870 | * - &dma_buf_ops.unmap_dma_buf() | |
ae2e7f28 DO |
871 | * - &dma_buf_ops.vmap() |
872 | * - &dma_buf_ops.vunmap() | |
873 | * | |
874 | * 3. Exporters must hold the dma-buf reservation lock when calling these | |
875 | * functions: | |
876 | * | |
877 | * - dma_buf_move_notify() | |
878 | */ | |
879 | ||
d15bd7ee | 880 | /** |
85804b70 | 881 | * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list |
15fd552d CK |
882 | * @dmabuf: [in] buffer to attach device to. |
883 | * @dev: [in] device to be attached. | |
6f49c251 RD |
884 | * @importer_ops: [in] importer operations for the attachment |
885 | * @importer_priv: [in] importer private pointer for the attachment | |
d15bd7ee | 886 | * |
2904a8c1 DV |
887 | * Returns struct dma_buf_attachment pointer for this attachment. Attachments |
888 | * must be cleaned up by calling dma_buf_detach(). | |
889 | * | |
85804b70 DV |
890 | * Optionally this calls &dma_buf_ops.attach to allow device-specific attach |
891 | * functionality. | |
892 | * | |
2904a8c1 DV |
893 | * Returns: |
894 | * | |
895 | * A pointer to newly created &dma_buf_attachment on success, or a negative | |
896 | * error code wrapped into a pointer on failure. | |
897 | * | |
898 | * Note that this can fail if the backing storage of @dmabuf is in a place not | |
899 | * accessible to @dev, and cannot be moved to a more suitable place. This is | |
900 | * indicated with the error code -EBUSY. | |
d15bd7ee | 901 | */ |
15fd552d CK |
902 | struct dma_buf_attachment * |
903 | dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, | |
bb42df46 CK |
904 | const struct dma_buf_attach_ops *importer_ops, |
905 | void *importer_priv) | |
d15bd7ee SS |
906 | { |
907 | struct dma_buf_attachment *attach; | |
908 | int ret; | |
909 | ||
d1aa06a1 | 910 | if (WARN_ON(!dmabuf || !dev)) |
d15bd7ee SS |
911 | return ERR_PTR(-EINVAL); |
912 | ||
4981cdb0 CK |
913 | if (WARN_ON(importer_ops && !importer_ops->move_notify)) |
914 | return ERR_PTR(-EINVAL); | |
915 | ||
db7942b6 | 916 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); |
34d84ec4 | 917 | if (!attach) |
a9fbc3b7 | 918 | return ERR_PTR(-ENOMEM); |
d15bd7ee | 919 | |
d15bd7ee SS |
920 | attach->dev = dev; |
921 | attach->dmabuf = dmabuf; | |
09606b54 CK |
922 | if (importer_ops) |
923 | attach->peer2peer = importer_ops->allow_peer2peer; | |
bb42df46 CK |
924 | attach->importer_ops = importer_ops; |
925 | attach->importer_priv = importer_priv; | |
2ed9201b | 926 | |
d15bd7ee | 927 | if (dmabuf->ops->attach) { |
a19741e5 | 928 | ret = dmabuf->ops->attach(dmabuf, attach); |
d15bd7ee SS |
929 | if (ret) |
930 | goto err_attach; | |
931 | } | |
15fd552d | 932 | dma_resv_lock(dmabuf->resv, NULL); |
d15bd7ee | 933 | list_add(&attach->node, &dmabuf->attachments); |
15fd552d | 934 | dma_resv_unlock(dmabuf->resv); |
d15bd7ee | 935 | |
15fd552d CK |
936 | /* When either the importer or the exporter can't handle dynamic |
937 | * mappings we cache the mapping here to avoid issues with the | |
938 | * reservation object lock. | |
939 | */ | |
940 | if (dma_buf_attachment_is_dynamic(attach) != | |
941 | dma_buf_is_dynamic(dmabuf)) { | |
942 | struct sg_table *sgt; | |
943 | ||
809d9c72 | 944 | dma_resv_lock(attach->dmabuf->resv, NULL); |
bb42df46 | 945 | if (dma_buf_is_dynamic(attach->dmabuf)) { |
7e008b02 | 946 | ret = dmabuf->ops->pin(attach); |
bb42df46 CK |
947 | if (ret) |
948 | goto err_unlock; | |
949 | } | |
15fd552d | 950 | |
84335675 | 951 | sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); |
15fd552d CK |
952 | if (!sgt) |
953 | sgt = ERR_PTR(-ENOMEM); | |
954 | if (IS_ERR(sgt)) { | |
955 | ret = PTR_ERR(sgt); | |
bb42df46 | 956 | goto err_unpin; |
15fd552d | 957 | } |
809d9c72 | 958 | dma_resv_unlock(attach->dmabuf->resv); |
15fd552d CK |
959 | attach->sgt = sgt; |
960 | attach->dir = DMA_BIDIRECTIONAL; | |
961 | } | |
962 | ||
d15bd7ee SS |
963 | return attach; |
964 | ||
d15bd7ee SS |
965 | err_attach: |
966 | kfree(attach); | |
d15bd7ee | 967 | return ERR_PTR(ret); |
15fd552d | 968 | |
bb42df46 CK |
969 | err_unpin: |
970 | if (dma_buf_is_dynamic(attach->dmabuf)) | |
7e008b02 | 971 | dmabuf->ops->unpin(attach); |
bb42df46 | 972 | |
15fd552d | 973 | err_unlock: |
809d9c72 | 974 | dma_resv_unlock(attach->dmabuf->resv); |
15fd552d CK |
975 | |
976 | dma_buf_detach(dmabuf, attach); | |
977 | return ERR_PTR(ret); | |
978 | } | |
16b0314a | 979 | EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); |
15fd552d CK |
980 | |
981 | /** | |
982 | * dma_buf_attach - Wrapper for dma_buf_dynamic_attach | |
983 | * @dmabuf: [in] buffer to attach device to. | |
984 | * @dev: [in] device to be attached. | |
985 | * | |
986 | * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static | |
987 | * mapping. | |
988 | */ | |
989 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | |
990 | struct device *dev) | |
991 | { | |
bb42df46 | 992 | return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); |
d15bd7ee | 993 | } |
16b0314a | 994 | EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); |
d15bd7ee | 995 | |
84335675 DV |
996 | static void __unmap_dma_buf(struct dma_buf_attachment *attach, |
997 | struct sg_table *sg_table, | |
998 | enum dma_data_direction direction) | |
999 | { | |
1000 | /* uses XOR, hence this unmangles */ | |
1001 | mangle_sg_table(sg_table); | |
1002 | ||
1003 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); | |
1004 | } | |
1005 | ||
d15bd7ee | 1006 | /** |
85804b70 | 1007 | * dma_buf_detach - Remove the given attachment from dmabuf's attachments list |
d15bd7ee SS |
1008 | * @dmabuf: [in] buffer to detach from. |
1009 | * @attach: [in] attachment to be detached; is free'd after this call. | |
1010 | * | |
2904a8c1 | 1011 | * Clean up a device attachment obtained by calling dma_buf_attach(). |
85804b70 DV |
1012 | * |
1013 | * Optionally this calls &dma_buf_ops.detach for device-specific detach. | |
d15bd7ee SS |
1014 | */ |
1015 | void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) | |
1016 | { | |
d3292dae | 1017 | if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf)) |
d15bd7ee SS |
1018 | return; |
1019 | ||
d3292dae | 1020 | dma_resv_lock(dmabuf->resv, NULL); |
809d9c72 | 1021 | |
15fd552d | 1022 | if (attach->sgt) { |
15fd552d | 1023 | |
84335675 | 1024 | __unmap_dma_buf(attach, attach->sgt, attach->dir); |
f13e143e | 1025 | |
809d9c72 | 1026 | if (dma_buf_is_dynamic(attach->dmabuf)) |
7e008b02 | 1027 | dmabuf->ops->unpin(attach); |
15fd552d | 1028 | } |
d15bd7ee | 1029 | list_del(&attach->node); |
809d9c72 | 1030 | |
15fd552d | 1031 | dma_resv_unlock(dmabuf->resv); |
809d9c72 | 1032 | |
d15bd7ee SS |
1033 | if (dmabuf->ops->detach) |
1034 | dmabuf->ops->detach(dmabuf, attach); | |
1035 | ||
d15bd7ee SS |
1036 | kfree(attach); |
1037 | } | |
16b0314a | 1038 | EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); |
d15bd7ee | 1039 | |
bb42df46 CK |
1040 | /** |
1041 | * dma_buf_pin - Lock down the DMA-buf | |
bb42df46 CK |
1042 | * @attach: [in] attachment which should be pinned |
1043 | * | |
c545781e DV |
1044 | * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may |
1045 | * call this, and only for limited use cases like scanout and not for temporary | |
1046 | * pin operations. It is not permitted to allow userspace to pin arbitrary | |
1047 | * amounts of buffers through this interface. | |
1048 | * | |
1049 | * Buffers must be unpinned by calling dma_buf_unpin(). | |
1050 | * | |
bb42df46 CK |
1051 | * Returns: |
1052 | * 0 on success, negative error code on failure. | |
1053 | */ | |
1054 | int dma_buf_pin(struct dma_buf_attachment *attach) | |
1055 | { | |
1056 | struct dma_buf *dmabuf = attach->dmabuf; | |
1057 | int ret = 0; | |
1058 | ||
c545781e DV |
1059 | WARN_ON(!dma_buf_attachment_is_dynamic(attach)); |
1060 | ||
bb42df46 CK |
1061 | dma_resv_assert_held(dmabuf->resv); |
1062 | ||
1063 | if (dmabuf->ops->pin) | |
1064 | ret = dmabuf->ops->pin(attach); | |
1065 | ||
1066 | return ret; | |
1067 | } | |
16b0314a | 1068 | EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); |
bb42df46 CK |
1069 | |
1070 | /** | |
c545781e | 1071 | * dma_buf_unpin - Unpin a DMA-buf |
bb42df46 | 1072 | * @attach: [in] attachment which should be unpinned |
c545781e DV |
1073 | * |
1074 | * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move | |
1075 | * any mapping of @attach again and inform the importer through | |
1076 | * &dma_buf_attach_ops.move_notify. | |
bb42df46 CK |
1077 | */ |
1078 | void dma_buf_unpin(struct dma_buf_attachment *attach) | |
1079 | { | |
1080 | struct dma_buf *dmabuf = attach->dmabuf; | |
1081 | ||
c545781e DV |
1082 | WARN_ON(!dma_buf_attachment_is_dynamic(attach)); |
1083 | ||
bb42df46 CK |
1084 | dma_resv_assert_held(dmabuf->resv); |
1085 | ||
1086 | if (dmabuf->ops->unpin) | |
1087 | dmabuf->ops->unpin(attach); | |
1088 | } | |
16b0314a | 1089 | EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); |
bb42df46 | 1090 | |
d15bd7ee SS |
1091 | /** |
1092 | * dma_buf_map_attachment - Returns the scatterlist table of the attachment; | |
1093 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the | |
1094 | * dma_buf_ops. | |
1095 | * @attach: [in] attachment whose scatterlist is to be returned | |
1096 | * @direction: [in] direction of DMA transfer | |
1097 | * | |
fee0c54e | 1098 | * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR |
2904a8c1 DV |
1099 | * on error. May return -EINTR if it is interrupted by a signal. |
1100 | * | |
ac80cd17 JX |
1101 | * On success, the DMA addresses and lengths in the returned scatterlist are |
1102 | * PAGE_SIZE aligned. | |
1103 | * | |
c138782d | 1104 | * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that |
2904a8c1 DV |
1105 | * the underlying backing storage is pinned for as long as a mapping exists, |
1106 | * therefore users/importers should not hold onto a mapping for undue amounts of | |
1107 | * time. | |
89bcadc8 DV |
1108 | * |
1109 | * Important: Dynamic importers must wait for the exclusive fence of the struct | |
1110 | * dma_resv attached to the DMA-BUF first. | |
d15bd7ee SS |
1111 | */ |
1112 | struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, | |
1113 | enum dma_data_direction direction) | |
1114 | { | |
531beb06 | 1115 | struct sg_table *sg_table; |
bb42df46 | 1116 | int r; |
d15bd7ee SS |
1117 | |
1118 | might_sleep(); | |
1119 | ||
d1aa06a1 | 1120 | if (WARN_ON(!attach || !attach->dmabuf)) |
d15bd7ee SS |
1121 | return ERR_PTR(-EINVAL); |
1122 | ||
47e982d5 | 1123 | dma_resv_assert_held(attach->dmabuf->resv); |
15fd552d | 1124 | |
f13e143e CK |
1125 | if (attach->sgt) { |
1126 | /* | |
1127 | * Two mappings with different directions for the same | |
1128 | * attachment are not allowed. | |
1129 | */ | |
1130 | if (attach->dir != direction && | |
1131 | attach->dir != DMA_BIDIRECTIONAL) | |
1132 | return ERR_PTR(-EBUSY); | |
1133 | ||
1134 | return attach->sgt; | |
1135 | } | |
1136 | ||
bb42df46 | 1137 | if (dma_buf_is_dynamic(attach->dmabuf)) { |
4981cdb0 | 1138 | if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { |
7e008b02 | 1139 | r = attach->dmabuf->ops->pin(attach); |
bb42df46 CK |
1140 | if (r) |
1141 | return ERR_PTR(r); | |
1142 | } | |
1143 | } | |
15fd552d | 1144 | |
84335675 | 1145 | sg_table = __map_dma_buf(attach, direction); |
fee0c54e CC |
1146 | if (!sg_table) |
1147 | sg_table = ERR_PTR(-ENOMEM); | |
d15bd7ee | 1148 | |
bb42df46 | 1149 | if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && |
4981cdb0 | 1150 | !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
7e008b02 | 1151 | attach->dmabuf->ops->unpin(attach); |
bb42df46 | 1152 | |
f13e143e CK |
1153 | if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { |
1154 | attach->sgt = sg_table; | |
1155 | attach->dir = direction; | |
1156 | } | |
1157 | ||
ac80cd17 | 1158 | #ifdef CONFIG_DMA_API_DEBUG |
00efd65a | 1159 | if (!IS_ERR(sg_table)) { |
ac80cd17 JX |
1160 | struct scatterlist *sg; |
1161 | u64 addr; | |
1162 | int len; | |
1163 | int i; | |
1164 | ||
1165 | for_each_sgtable_dma_sg(sg_table, sg, i) { | |
1166 | addr = sg_dma_address(sg); | |
1167 | len = sg_dma_len(sg); | |
1168 | if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { | |
1169 | pr_debug("%s: addr %llx or len %x is not page aligned!\n", | |
1170 | __func__, addr, len); | |
1171 | } | |
1172 | } | |
1173 | } | |
1174 | #endif /* CONFIG_DMA_API_DEBUG */ | |
d15bd7ee SS |
1175 | return sg_table; |
1176 | } | |
16b0314a | 1177 | EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); |
d15bd7ee | 1178 | |
19d6634d DO |
1179 | /** |
1180 | * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; | |
1181 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the | |
1182 | * dma_buf_ops. | |
1183 | * @attach: [in] attachment whose scatterlist is to be returned | |
1184 | * @direction: [in] direction of DMA transfer | |
1185 | * | |
1186 | * Unlocked variant of dma_buf_map_attachment(). | |
1187 | */ | |
1188 | struct sg_table * | |
1189 | dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, | |
1190 | enum dma_data_direction direction) | |
1191 | { | |
1192 | struct sg_table *sg_table; | |
1193 | ||
1194 | might_sleep(); | |
1195 | ||
1196 | if (WARN_ON(!attach || !attach->dmabuf)) | |
1197 | return ERR_PTR(-EINVAL); | |
1198 | ||
1199 | dma_resv_lock(attach->dmabuf->resv, NULL); | |
1200 | sg_table = dma_buf_map_attachment(attach, direction); | |
1201 | dma_resv_unlock(attach->dmabuf->resv); | |
1202 | ||
1203 | return sg_table; | |
1204 | } | |
1205 | EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF); | |
1206 | ||
d15bd7ee SS |
1207 | /** |
1208 | * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might | |
1209 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of | |
1210 | * dma_buf_ops. | |
1211 | * @attach: [in] attachment to unmap buffer from | |
1212 | * @sg_table: [in] scatterlist info of the buffer to unmap | |
33ea2dcb | 1213 | * @direction: [in] direction of DMA transfer |
d15bd7ee | 1214 | * |
2904a8c1 | 1215 | * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). |
d15bd7ee SS |
1216 | */ |
1217 | void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, | |
33ea2dcb SS |
1218 | struct sg_table *sg_table, |
1219 | enum dma_data_direction direction) | |
d15bd7ee | 1220 | { |
b6fa0cd6 RC |
1221 | might_sleep(); |
1222 | ||
d1aa06a1 | 1223 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
d15bd7ee SS |
1224 | return; |
1225 | ||
47e982d5 | 1226 | dma_resv_assert_held(attach->dmabuf->resv); |
15fd552d | 1227 | |
f13e143e CK |
1228 | if (attach->sgt == sg_table) |
1229 | return; | |
1230 | ||
84335675 | 1231 | __unmap_dma_buf(attach, sg_table, direction); |
bb42df46 CK |
1232 | |
1233 | if (dma_buf_is_dynamic(attach->dmabuf) && | |
4981cdb0 | 1234 | !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
bb42df46 | 1235 | dma_buf_unpin(attach); |
d15bd7ee | 1236 | } |
16b0314a | 1237 | EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); |
fc13020e | 1238 | |
19d6634d DO |
1239 | /** |
1240 | * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might | |
1241 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of | |
1242 | * dma_buf_ops. | |
1243 | * @attach: [in] attachment to unmap buffer from | |
1244 | * @sg_table: [in] scatterlist info of the buffer to unmap | |
1245 | * @direction: [in] direction of DMA transfer | |
1246 | * | |
1247 | * Unlocked variant of dma_buf_unmap_attachment(). | |
1248 | */ | |
1249 | void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, | |
1250 | struct sg_table *sg_table, | |
1251 | enum dma_data_direction direction) | |
1252 | { | |
1253 | might_sleep(); | |
1254 | ||
1255 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) | |
1256 | return; | |
1257 | ||
1258 | dma_resv_lock(attach->dmabuf->resv, NULL); | |
1259 | dma_buf_unmap_attachment(attach, sg_table, direction); | |
1260 | dma_resv_unlock(attach->dmabuf->resv); | |
1261 | } | |
1262 | EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF); | |
1263 | ||
bb42df46 CK |
1264 | /** |
1265 | * dma_buf_move_notify - notify attachments that DMA-buf is moving | |
1266 | * | |
1267 | * @dmabuf: [in] buffer which is moving | |
1268 | * | |
b56ffa58 | 1269 | * Informs all attachments that they need to destroy and recreate all their |
bb42df46 CK |
1270 | * mappings. |
1271 | */ | |
1272 | void dma_buf_move_notify(struct dma_buf *dmabuf) | |
1273 | { | |
1274 | struct dma_buf_attachment *attach; | |
1275 | ||
1276 | dma_resv_assert_held(dmabuf->resv); | |
1277 | ||
1278 | list_for_each_entry(attach, &dmabuf->attachments, node) | |
4981cdb0 | 1279 | if (attach->importer_ops) |
bb42df46 CK |
1280 | attach->importer_ops->move_notify(attach); |
1281 | } | |
16b0314a | 1282 | EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); |
bb42df46 | 1283 | |
0959a168 DV |
1284 | /** |
1285 | * DOC: cpu access | |
1286 | * | |
b56ffa58 | 1287 | * There are multiple reasons for supporting CPU access to a dma buffer object: |
0959a168 DV |
1288 | * |
1289 | * - Fallback operations in the kernel, for example when a device is connected | |
1290 | * over USB and the kernel needs to shuffle the data around first before | |
b56ffa58 | 1291 | * sending it away. Cache coherency is handled by bracketing any transactions |
0959a168 DV |
1292 | * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() |
1293 | * access. | |
1294 | * | |
7f0de8d8 DV |
1295 | * Since for most kernel internal dma-buf accesses need the entire buffer, a |
1296 | * vmap interface is introduced. Note that on very old 32-bit architectures | |
1297 | * vmalloc space might be limited and result in vmap calls failing. | |
0959a168 DV |
1298 | * |
1299 | * Interfaces:: | |
de9114ec | 1300 | * |
7938f421 LDM |
1301 | * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) |
1302 | * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) | |
0959a168 DV |
1303 | * |
1304 | * The vmap call can fail if there is no vmap support in the exporter, or if | |
de9114ec DV |
1305 | * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference |
1306 | * count for all vmap access and calls down into the exporter's vmap function | |
1307 | * only when no vmapping exists, and only unmaps it once. Protection against | |
1308 | * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. | |
0959a168 DV |
1309 | * |
1310 | * - For full compatibility on the importer side with existing userspace | |
1311 | * interfaces, which might already support mmap'ing buffers. This is needed in | |
1312 | * many processing pipelines (e.g. feeding a software rendered image into a | |
1313 | * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION | |
1314 | * framework already supported this and for DMA buffer file descriptors to | |
1315 | * replace ION buffers mmap support was needed. | |
1316 | * | |
1317 | * There is no special interfaces, userspace simply calls mmap on the dma-buf | |
b56ffa58 | 1318 | * fd. But like for CPU access there's a need to bracket the actual access, |
0959a168 DV |
1319 | * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that |
1320 | * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must | |
1321 | * be restarted. | |
1322 | * | |
1323 | * Some systems might need some sort of cache coherency management e.g. when | |
1324 | * CPU and GPU domains are being accessed through dma-buf at the same time. | |
1325 | * To circumvent this problem there are begin/end coherency markers, that | |
1326 | * forward directly to existing dma-buf device drivers vfunc hooks. Userspace | |
1327 | * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The | |
1328 | * sequence would be used like following: | |
1329 | * | |
1330 | * - mmap dma-buf fd | |
1331 | * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write | |
1332 | * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you | |
1333 | * want (with the new data being consumed by say the GPU or the scanout | |
1334 | * device) | |
1335 | * - munmap once you don't need the buffer any more | |
1336 | * | |
1337 | * For correctness and optimal performance, it is always required to use | |
1338 | * SYNC_START and SYNC_END before and after, respectively, when accessing the | |
1339 | * mapped address. Userspace cannot rely on coherent access, even when there | |
1340 | * are systems where it just works without calling these ioctls. | |
1341 | * | |
1342 | * - And as a CPU fallback in userspace processing pipelines. | |
1343 | * | |
1344 | * Similar to the motivation for kernel cpu access it is again important that | |
1345 | * the userspace code of a given importing subsystem can use the same | |
1346 | * interfaces with a imported dma-buf buffer object as with a native buffer | |
1347 | * object. This is especially important for drm where the userspace part of | |
1348 | * contemporary OpenGL, X, and other drivers is huge, and reworking them to | |
1349 | * use a different way to mmap a buffer rather invasive. | |
1350 | * | |
1351 | * The assumption in the current dma-buf interfaces is that redirecting the | |
1352 | * initial mmap is all that's needed. A survey of some of the existing | |
1353 | * subsystems shows that no driver seems to do any nefarious thing like | |
1354 | * syncing up with outstanding asynchronous processing on the device or | |
1355 | * allocating special resources at fault time. So hopefully this is good | |
1356 | * enough, since adding interfaces to intercept pagefaults and allow pte | |
1357 | * shootdowns would increase the complexity quite a bit. | |
1358 | * | |
1359 | * Interface:: | |
85804b70 | 1360 | * |
0959a168 DV |
1361 | * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, |
1362 | * unsigned long); | |
1363 | * | |
1364 | * If the importing subsystem simply provides a special-purpose mmap call to | |
85804b70 | 1365 | * set up a mapping in userspace, calling do_mmap with &dma_buf.file will |
0959a168 DV |
1366 | * equally achieve that for a dma-buf object. |
1367 | */ | |
1368 | ||
ae4e46b1 CW |
1369 | static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
1370 | enum dma_data_direction direction) | |
1371 | { | |
1372 | bool write = (direction == DMA_BIDIRECTIONAL || | |
1373 | direction == DMA_TO_DEVICE); | |
52791eee | 1374 | struct dma_resv *resv = dmabuf->resv; |
ae4e46b1 CW |
1375 | long ret; |
1376 | ||
1377 | /* Wait on any implicit rendering fences */ | |
7bc80a54 CK |
1378 | ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), |
1379 | true, MAX_SCHEDULE_TIMEOUT); | |
ae4e46b1 CW |
1380 | if (ret < 0) |
1381 | return ret; | |
1382 | ||
1383 | return 0; | |
1384 | } | |
fc13020e DV |
1385 | |
1386 | /** | |
1387 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the | |
1388 | * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific | |
1389 | * preparations. Coherency is only guaranteed in the specified range for the | |
1390 | * specified access direction. | |
efb4df82 | 1391 | * @dmabuf: [in] buffer to prepare cpu access for. |
b56ffa58 | 1392 | * @direction: [in] direction of access. |
fc13020e | 1393 | * |
0959a168 | 1394 | * After the cpu access is complete the caller should call |
b56ffa58 | 1395 | * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is |
0959a168 DV |
1396 | * it guaranteed to be coherent with other DMA access. |
1397 | * | |
de9114ec DV |
1398 | * This function will also wait for any DMA transactions tracked through |
1399 | * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit | |
1400 | * synchronization this function will only ensure cache coherency, callers must | |
1401 | * ensure synchronization with such DMA transactions on their own. | |
1402 | * | |
fc13020e DV |
1403 | * Can return negative error values, returns 0 on success. |
1404 | */ | |
831e9da7 | 1405 | int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
fc13020e DV |
1406 | enum dma_data_direction direction) |
1407 | { | |
1408 | int ret = 0; | |
1409 | ||
1410 | if (WARN_ON(!dmabuf)) | |
1411 | return -EINVAL; | |
1412 | ||
8ccf0a29 DV |
1413 | might_lock(&dmabuf->resv->lock.base); |
1414 | ||
fc13020e | 1415 | if (dmabuf->ops->begin_cpu_access) |
831e9da7 | 1416 | ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); |
fc13020e | 1417 | |
ae4e46b1 CW |
1418 | /* Ensure that all fences are waited upon - but we first allow |
1419 | * the native handler the chance to do so more efficiently if it | |
1420 | * chooses. A double invocation here will be reasonably cheap no-op. | |
1421 | */ | |
1422 | if (ret == 0) | |
1423 | ret = __dma_buf_begin_cpu_access(dmabuf, direction); | |
1424 | ||
fc13020e DV |
1425 | return ret; |
1426 | } | |
16b0314a | 1427 | EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); |
fc13020e DV |
1428 | |
1429 | /** | |
1430 | * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the | |
1431 | * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific | |
1432 | * actions. Coherency is only guaranteed in the specified range for the | |
1433 | * specified access direction. | |
efb4df82 | 1434 | * @dmabuf: [in] buffer to complete cpu access for. |
b56ffa58 | 1435 | * @direction: [in] direction of access. |
fc13020e | 1436 | * |
0959a168 DV |
1437 | * This terminates CPU access started with dma_buf_begin_cpu_access(). |
1438 | * | |
87e332d5 | 1439 | * Can return negative error values, returns 0 on success. |
fc13020e | 1440 | */ |
18b862dc CW |
1441 | int dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
1442 | enum dma_data_direction direction) | |
fc13020e | 1443 | { |
18b862dc CW |
1444 | int ret = 0; |
1445 | ||
fc13020e DV |
1446 | WARN_ON(!dmabuf); |
1447 | ||
8ccf0a29 DV |
1448 | might_lock(&dmabuf->resv->lock.base); |
1449 | ||
fc13020e | 1450 | if (dmabuf->ops->end_cpu_access) |
18b862dc CW |
1451 | ret = dmabuf->ops->end_cpu_access(dmabuf, direction); |
1452 | ||
1453 | return ret; | |
fc13020e | 1454 | } |
16b0314a | 1455 | EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); |
fc13020e | 1456 | |
4c78513e DV |
1457 | |
1458 | /** | |
1459 | * dma_buf_mmap - Setup up a userspace mmap with the given vma | |
12c4727e | 1460 | * @dmabuf: [in] buffer that should back the vma |
4c78513e DV |
1461 | * @vma: [in] vma for the mmap |
1462 | * @pgoff: [in] offset in pages where this mmap should start within the | |
5136629d | 1463 | * dma-buf buffer. |
4c78513e DV |
1464 | * |
1465 | * This function adjusts the passed in vma so that it points at the file of the | |
ecf1dbac | 1466 | * dma_buf operation. It also adjusts the starting pgoff and does bounds |
4c78513e DV |
1467 | * checking on the size of the vma. Then it calls the exporters mmap function to |
1468 | * set up the mapping. | |
1469 | * | |
1470 | * Can return negative error values, returns 0 on success. | |
1471 | */ | |
1472 | int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, | |
1473 | unsigned long pgoff) | |
1474 | { | |
1475 | if (WARN_ON(!dmabuf || !vma)) | |
1476 | return -EINVAL; | |
1477 | ||
e3a9d6c5 AD |
1478 | /* check if buffer supports mmap */ |
1479 | if (!dmabuf->ops->mmap) | |
1480 | return -EINVAL; | |
1481 | ||
4c78513e | 1482 | /* check for offset overflow */ |
b02da6f8 | 1483 | if (pgoff + vma_pages(vma) < pgoff) |
4c78513e DV |
1484 | return -EOVERFLOW; |
1485 | ||
1486 | /* check for overflowing the buffer's size */ | |
b02da6f8 | 1487 | if (pgoff + vma_pages(vma) > |
4c78513e DV |
1488 | dmabuf->size >> PAGE_SHIFT) |
1489 | return -EINVAL; | |
1490 | ||
1491 | /* readjust the vma */ | |
295992fb | 1492 | vma_set_file(vma, dmabuf->file); |
4c78513e DV |
1493 | vma->vm_pgoff = pgoff; |
1494 | ||
8021fa16 | 1495 | return dmabuf->ops->mmap(dmabuf, vma); |
4c78513e | 1496 | } |
16b0314a | 1497 | EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); |
98f86c9e DA |
1498 | |
1499 | /** | |
12c4727e SS |
1500 | * dma_buf_vmap - Create virtual mapping for the buffer object into kernel |
1501 | * address space. Same restrictions as for vmap and friends apply. | |
1502 | * @dmabuf: [in] buffer to vmap | |
6619ccf1 | 1503 | * @map: [out] returns the vmap pointer |
98f86c9e DA |
1504 | * |
1505 | * This call may fail due to lack of virtual mapping address space. | |
1506 | * These calls are optional in drivers. The intended use for them | |
1507 | * is for mapping objects linear in kernel space for high use objects. | |
de9114ec DV |
1508 | * |
1509 | * To ensure coherency users must call dma_buf_begin_cpu_access() and | |
1510 | * dma_buf_end_cpu_access() around any cpu access performed through this | |
1511 | * mapping. | |
fee0c54e | 1512 | * |
6619ccf1 | 1513 | * Returns 0 on success, or a negative errno code otherwise. |
98f86c9e | 1514 | */ |
7938f421 | 1515 | int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) |
98f86c9e | 1516 | { |
7938f421 | 1517 | struct iosys_map ptr; |
28743e25 | 1518 | int ret; |
6619ccf1 | 1519 | |
7938f421 | 1520 | iosys_map_clear(map); |
f00b4dad | 1521 | |
98f86c9e | 1522 | if (WARN_ON(!dmabuf)) |
6619ccf1 | 1523 | return -EINVAL; |
98f86c9e | 1524 | |
34c7797f DO |
1525 | dma_resv_assert_held(dmabuf->resv); |
1526 | ||
f00b4dad | 1527 | if (!dmabuf->ops->vmap) |
6619ccf1 | 1528 | return -EINVAL; |
f00b4dad | 1529 | |
f00b4dad DV |
1530 | if (dmabuf->vmapping_counter) { |
1531 | dmabuf->vmapping_counter++; | |
7938f421 | 1532 | BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); |
6619ccf1 | 1533 | *map = dmabuf->vmap_ptr; |
28743e25 | 1534 | return 0; |
f00b4dad DV |
1535 | } |
1536 | ||
7938f421 | 1537 | BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); |
f00b4dad | 1538 | |
6619ccf1 TZ |
1539 | ret = dmabuf->ops->vmap(dmabuf, &ptr); |
1540 | if (WARN_ON_ONCE(ret)) | |
28743e25 | 1541 | return ret; |
f00b4dad | 1542 | |
6619ccf1 | 1543 | dmabuf->vmap_ptr = ptr; |
f00b4dad DV |
1544 | dmabuf->vmapping_counter = 1; |
1545 | ||
6619ccf1 TZ |
1546 | *map = dmabuf->vmap_ptr; |
1547 | ||
28743e25 | 1548 | return 0; |
98f86c9e | 1549 | } |
16b0314a | 1550 | EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); |
98f86c9e | 1551 | |
56e5abba DO |
1552 | /** |
1553 | * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel | |
1554 | * address space. Same restrictions as for vmap and friends apply. | |
1555 | * @dmabuf: [in] buffer to vmap | |
1556 | * @map: [out] returns the vmap pointer | |
1557 | * | |
1558 | * Unlocked version of dma_buf_vmap() | |
1559 | * | |
1560 | * Returns 0 on success, or a negative errno code otherwise. | |
1561 | */ | |
1562 | int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) | |
1563 | { | |
1564 | int ret; | |
1565 | ||
1566 | iosys_map_clear(map); | |
1567 | ||
1568 | if (WARN_ON(!dmabuf)) | |
1569 | return -EINVAL; | |
1570 | ||
1571 | dma_resv_lock(dmabuf->resv, NULL); | |
1572 | ret = dma_buf_vmap(dmabuf, map); | |
1573 | dma_resv_unlock(dmabuf->resv); | |
1574 | ||
1575 | return ret; | |
1576 | } | |
1577 | EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF); | |
1578 | ||
98f86c9e DA |
1579 | /** |
1580 | * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. | |
12c4727e | 1581 | * @dmabuf: [in] buffer to vunmap |
20e76f1a | 1582 | * @map: [in] vmap pointer to vunmap |
98f86c9e | 1583 | */ |
7938f421 | 1584 | void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) |
98f86c9e DA |
1585 | { |
1586 | if (WARN_ON(!dmabuf)) | |
1587 | return; | |
1588 | ||
34c7797f DO |
1589 | dma_resv_assert_held(dmabuf->resv); |
1590 | ||
7938f421 | 1591 | BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); |
f00b4dad | 1592 | BUG_ON(dmabuf->vmapping_counter == 0); |
7938f421 | 1593 | BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); |
f00b4dad | 1594 | |
f00b4dad DV |
1595 | if (--dmabuf->vmapping_counter == 0) { |
1596 | if (dmabuf->ops->vunmap) | |
20e76f1a | 1597 | dmabuf->ops->vunmap(dmabuf, map); |
7938f421 | 1598 | iosys_map_clear(&dmabuf->vmap_ptr); |
f00b4dad | 1599 | } |
98f86c9e | 1600 | } |
16b0314a | 1601 | EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); |
b89e3563 | 1602 | |
56e5abba DO |
1603 | /** |
1604 | * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. | |
1605 | * @dmabuf: [in] buffer to vunmap | |
1606 | * @map: [in] vmap pointer to vunmap | |
1607 | */ | |
1608 | void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) | |
1609 | { | |
1610 | if (WARN_ON(!dmabuf)) | |
1611 | return; | |
1612 | ||
1613 | dma_resv_lock(dmabuf->resv, NULL); | |
1614 | dma_buf_vunmap(dmabuf, map); | |
1615 | dma_resv_unlock(dmabuf->resv); | |
1616 | } | |
1617 | EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF); | |
1618 | ||
b89e3563 | 1619 | #ifdef CONFIG_DEBUG_FS |
eb0b947e | 1620 | static int dma_buf_debug_show(struct seq_file *s, void *unused) |
b89e3563 | 1621 | { |
b89e3563 SS |
1622 | struct dma_buf *buf_obj; |
1623 | struct dma_buf_attachment *attach_obj; | |
63639d01 | 1624 | int count = 0, attach_count; |
b89e3563 | 1625 | size_t size = 0; |
680753dd | 1626 | int ret; |
b89e3563 | 1627 | |
bfc7bc53 | 1628 | ret = mutex_lock_interruptible(&debugfs_list_mutex); |
b89e3563 SS |
1629 | |
1630 | if (ret) | |
1631 | return ret; | |
1632 | ||
c0b00a52 | 1633 | seq_puts(s, "\nDma-buf Objects:\n"); |
6c01aa13 | 1634 | seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", |
ed63bb1d | 1635 | "size", "flags", "mode", "count", "ino"); |
b89e3563 | 1636 | |
bfc7bc53 | 1637 | list_for_each_entry(buf_obj, &debugfs_list, list_node) { |
15fd552d | 1638 | |
15fd552d CK |
1639 | ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); |
1640 | if (ret) | |
f45f57cc | 1641 | goto error_unlock; |
b89e3563 | 1642 | |
8c0fd126 GC |
1643 | |
1644 | spin_lock(&buf_obj->name_lock); | |
bb2bb903 | 1645 | seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", |
c0b00a52 | 1646 | buf_obj->size, |
b89e3563 | 1647 | buf_obj->file->f_flags, buf_obj->file->f_mode, |
a1f6dbac | 1648 | file_count(buf_obj->file), |
ed63bb1d | 1649 | buf_obj->exp_name, |
bb2bb903 | 1650 | file_inode(buf_obj->file)->i_ino, |
6c01aa13 | 1651 | buf_obj->name ?: "<none>"); |
8c0fd126 | 1652 | spin_unlock(&buf_obj->name_lock); |
b89e3563 | 1653 | |
a25efb38 | 1654 | dma_resv_describe(buf_obj->resv, s); |
5eb2c72c | 1655 | |
c0b00a52 | 1656 | seq_puts(s, "\tAttached Devices:\n"); |
b89e3563 SS |
1657 | attach_count = 0; |
1658 | ||
1659 | list_for_each_entry(attach_obj, &buf_obj->attachments, node) { | |
9eddb41d | 1660 | seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); |
b89e3563 SS |
1661 | attach_count++; |
1662 | } | |
15fd552d | 1663 | dma_resv_unlock(buf_obj->resv); |
b89e3563 | 1664 | |
c0b00a52 | 1665 | seq_printf(s, "Total %d devices attached\n\n", |
b89e3563 SS |
1666 | attach_count); |
1667 | ||
1668 | count++; | |
1669 | size += buf_obj->size; | |
b89e3563 SS |
1670 | } |
1671 | ||
1672 | seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); | |
1673 | ||
bfc7bc53 | 1674 | mutex_unlock(&debugfs_list_mutex); |
b89e3563 | 1675 | return 0; |
15fd552d | 1676 | |
f45f57cc | 1677 | error_unlock: |
bfc7bc53 | 1678 | mutex_unlock(&debugfs_list_mutex); |
15fd552d | 1679 | return ret; |
b89e3563 SS |
1680 | } |
1681 | ||
2674305a | 1682 | DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); |
b89e3563 SS |
1683 | |
1684 | static struct dentry *dma_buf_debugfs_dir; | |
1685 | ||
1686 | static int dma_buf_init_debugfs(void) | |
1687 | { | |
bd3e2208 | 1688 | struct dentry *d; |
b89e3563 | 1689 | int err = 0; |
5136629d | 1690 | |
bd3e2208 MK |
1691 | d = debugfs_create_dir("dma_buf", NULL); |
1692 | if (IS_ERR(d)) | |
1693 | return PTR_ERR(d); | |
5136629d | 1694 | |
bd3e2208 | 1695 | dma_buf_debugfs_dir = d; |
b89e3563 | 1696 | |
bd3e2208 MK |
1697 | d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, |
1698 | NULL, &dma_buf_debug_fops); | |
1699 | if (IS_ERR(d)) { | |
b89e3563 | 1700 | pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); |
b7479990 MK |
1701 | debugfs_remove_recursive(dma_buf_debugfs_dir); |
1702 | dma_buf_debugfs_dir = NULL; | |
bd3e2208 | 1703 | err = PTR_ERR(d); |
b7479990 | 1704 | } |
b89e3563 SS |
1705 | |
1706 | return err; | |
1707 | } | |
1708 | ||
1709 | static void dma_buf_uninit_debugfs(void) | |
1710 | { | |
298b6a81 | 1711 | debugfs_remove_recursive(dma_buf_debugfs_dir); |
b89e3563 | 1712 | } |
b89e3563 SS |
1713 | #else |
1714 | static inline int dma_buf_init_debugfs(void) | |
1715 | { | |
1716 | return 0; | |
1717 | } | |
1718 | static inline void dma_buf_uninit_debugfs(void) | |
1719 | { | |
1720 | } | |
1721 | #endif | |
1722 | ||
1723 | static int __init dma_buf_init(void) | |
1724 | { | |
bdb8d06d HV |
1725 | int ret; |
1726 | ||
1727 | ret = dma_buf_init_sysfs_statistics(); | |
1728 | if (ret) | |
1729 | return ret; | |
1730 | ||
ed63bb1d GH |
1731 | dma_buf_mnt = kern_mount(&dma_buf_fs_type); |
1732 | if (IS_ERR(dma_buf_mnt)) | |
1733 | return PTR_ERR(dma_buf_mnt); | |
1734 | ||
b89e3563 SS |
1735 | dma_buf_init_debugfs(); |
1736 | return 0; | |
1737 | } | |
1738 | subsys_initcall(dma_buf_init); | |
1739 | ||
1740 | static void __exit dma_buf_deinit(void) | |
1741 | { | |
1742 | dma_buf_uninit_debugfs(); | |
ed63bb1d | 1743 | kern_unmount(dma_buf_mnt); |
bdb8d06d | 1744 | dma_buf_uninit_sysfs_statistics(); |
b89e3563 SS |
1745 | } |
1746 | __exitcall(dma_buf_deinit); |