Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d15bd7ee SS |
2 | /* |
3 | * Framework for buffer objects that can be shared across devices/subsystems. | |
4 | * | |
5 | * Copyright(C) 2011 Linaro Limited. All rights reserved. | |
6 | * Author: Sumit Semwal <sumit.semwal@ti.com> | |
7 | * | |
8 | * Many thanks to linaro-mm-sig list, and specially | |
9 | * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and | |
10 | * Daniel Vetter <daniel@ffwll.ch> for their support in creation and | |
11 | * refining of this idea. | |
d15bd7ee SS |
12 | */ |
13 | ||
14 | #include <linux/fs.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/dma-buf.h> | |
f54d1867 | 17 | #include <linux/dma-fence.h> |
c19083c7 | 18 | #include <linux/dma-fence-unwrap.h> |
d15bd7ee SS |
19 | #include <linux/anon_inodes.h> |
20 | #include <linux/export.h> | |
b89e3563 | 21 | #include <linux/debugfs.h> |
9abdffe2 | 22 | #include <linux/module.h> |
b89e3563 | 23 | #include <linux/seq_file.h> |
20e10881 | 24 | #include <linux/sync_file.h> |
9b495a58 | 25 | #include <linux/poll.h> |
52791eee | 26 | #include <linux/dma-resv.h> |
b02da6f8 | 27 | #include <linux/mm.h> |
ed63bb1d | 28 | #include <linux/mount.h> |
933a90bf | 29 | #include <linux/pseudo_fs.h> |
d15bd7ee | 30 | |
c11e391d | 31 | #include <uapi/linux/dma-buf.h> |
ed63bb1d | 32 | #include <uapi/linux/magic.h> |
c11e391d | 33 | |
bdb8d06d HV |
34 | #include "dma-buf-sysfs-stats.h" |
35 | ||
d15bd7ee SS |
36 | static inline int is_dma_buf_file(struct file *); |
37 | ||
b89e3563 SS |
38 | struct dma_buf_list { |
39 | struct list_head head; | |
40 | struct mutex lock; | |
41 | }; | |
42 | ||
43 | static struct dma_buf_list db_list; | |
44 | ||
bb2bb903 GH |
45 | static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) |
46 | { | |
47 | struct dma_buf *dmabuf; | |
48 | char name[DMA_BUF_NAME_LEN]; | |
49 | size_t ret = 0; | |
50 | ||
51 | dmabuf = dentry->d_fsdata; | |
6348dd29 | 52 | spin_lock(&dmabuf->name_lock); |
bb2bb903 GH |
53 | if (dmabuf->name) |
54 | ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); | |
6348dd29 | 55 | spin_unlock(&dmabuf->name_lock); |
bb2bb903 | 56 | |
0f60d288 | 57 | return dynamic_dname(buffer, buflen, "/%s:%s", |
bb2bb903 GH |
58 | dentry->d_name.name, ret > 0 ? name : ""); |
59 | } | |
60 | ||
4ab59c3c | 61 | static void dma_buf_release(struct dentry *dentry) |
d15bd7ee SS |
62 | { |
63 | struct dma_buf *dmabuf; | |
64 | ||
4ab59c3c | 65 | dmabuf = dentry->d_fsdata; |
19a508bd CTR |
66 | if (unlikely(!dmabuf)) |
67 | return; | |
d15bd7ee | 68 | |
f00b4dad DV |
69 | BUG_ON(dmabuf->vmapping_counter); |
70 | ||
9b495a58 | 71 | /* |
ff2d2384 MD |
72 | * If you hit this BUG() it could mean: |
73 | * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else | |
74 | * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback | |
9b495a58 | 75 | */ |
6b51b02a | 76 | BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); |
9b495a58 | 77 | |
63c57e8d | 78 | dma_buf_stats_teardown(dmabuf); |
d15bd7ee | 79 | dmabuf->ops->release(dmabuf); |
b89e3563 | 80 | |
52791eee CK |
81 | if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) |
82 | dma_resv_fini(dmabuf->resv); | |
3aac4502 | 83 | |
f492283b | 84 | WARN_ON(!list_empty(&dmabuf->attachments)); |
9abdffe2 | 85 | module_put(dmabuf->owner); |
d1f37226 | 86 | kfree(dmabuf->name); |
d15bd7ee | 87 | kfree(dmabuf); |
4ab59c3c SS |
88 | } |
89 | ||
05cd8469 CTR |
90 | static int dma_buf_file_release(struct inode *inode, struct file *file) |
91 | { | |
92 | struct dma_buf *dmabuf; | |
93 | ||
94 | if (!is_dma_buf_file(file)) | |
95 | return -EINVAL; | |
96 | ||
97 | dmabuf = file->private_data; | |
f728a5ea CK |
98 | if (dmabuf) { |
99 | mutex_lock(&db_list.lock); | |
100 | list_del(&dmabuf->list_node); | |
101 | mutex_unlock(&db_list.lock); | |
102 | } | |
05cd8469 CTR |
103 | |
104 | return 0; | |
105 | } | |
106 | ||
4ab59c3c SS |
107 | static const struct dentry_operations dma_buf_dentry_ops = { |
108 | .d_dname = dmabuffs_dname, | |
109 | .d_release = dma_buf_release, | |
110 | }; | |
111 | ||
112 | static struct vfsmount *dma_buf_mnt; | |
113 | ||
114 | static int dma_buf_fs_init_context(struct fs_context *fc) | |
115 | { | |
116 | struct pseudo_fs_context *ctx; | |
117 | ||
118 | ctx = init_pseudo(fc, DMA_BUF_MAGIC); | |
119 | if (!ctx) | |
120 | return -ENOMEM; | |
121 | ctx->dops = &dma_buf_dentry_ops; | |
d15bd7ee SS |
122 | return 0; |
123 | } | |
124 | ||
4ab59c3c SS |
125 | static struct file_system_type dma_buf_fs_type = { |
126 | .name = "dmabuf", | |
127 | .init_fs_context = dma_buf_fs_init_context, | |
128 | .kill_sb = kill_anon_super, | |
129 | }; | |
130 | ||
4c78513e DV |
131 | static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) |
132 | { | |
133 | struct dma_buf *dmabuf; | |
134 | ||
135 | if (!is_dma_buf_file(file)) | |
136 | return -EINVAL; | |
137 | ||
138 | dmabuf = file->private_data; | |
139 | ||
e3a9d6c5 AD |
140 | /* check if buffer supports mmap */ |
141 | if (!dmabuf->ops->mmap) | |
142 | return -EINVAL; | |
143 | ||
4c78513e | 144 | /* check for overflowing the buffer's size */ |
b02da6f8 | 145 | if (vma->vm_pgoff + vma_pages(vma) > |
4c78513e DV |
146 | dmabuf->size >> PAGE_SHIFT) |
147 | return -EINVAL; | |
148 | ||
8021fa16 | 149 | return dmabuf->ops->mmap(dmabuf, vma); |
4c78513e DV |
150 | } |
151 | ||
19e8697b CJHR |
152 | static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) |
153 | { | |
154 | struct dma_buf *dmabuf; | |
155 | loff_t base; | |
156 | ||
157 | if (!is_dma_buf_file(file)) | |
158 | return -EBADF; | |
159 | ||
160 | dmabuf = file->private_data; | |
161 | ||
162 | /* only support discovering the end of the buffer, | |
163 | but also allow SEEK_SET to maintain the idiomatic | |
164 | SEEK_END(0), SEEK_CUR(0) pattern */ | |
165 | if (whence == SEEK_END) | |
166 | base = dmabuf->size; | |
167 | else if (whence == SEEK_SET) | |
168 | base = 0; | |
169 | else | |
170 | return -EINVAL; | |
171 | ||
172 | if (offset != 0) | |
173 | return -EINVAL; | |
174 | ||
175 | return base + offset; | |
176 | } | |
177 | ||
e7e21c72 | 178 | /** |
102514ec | 179 | * DOC: implicit fence polling |
e7e21c72 DV |
180 | * |
181 | * To support cross-device and cross-driver synchronization of buffer access | |
102514ec DV |
182 | * implicit fences (represented internally in the kernel with &struct dma_fence) |
183 | * can be attached to a &dma_buf. The glue for that and a few related things are | |
52791eee | 184 | * provided in the &dma_resv structure. |
e7e21c72 DV |
185 | * |
186 | * Userspace can query the state of these implicitly tracked fences using poll() | |
187 | * and related system calls: | |
188 | * | |
a9a08845 | 189 | * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the |
e7e21c72 DV |
190 | * most recent write or exclusive fence. |
191 | * | |
a9a08845 | 192 | * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of |
e7e21c72 DV |
193 | * all attached fences, shared and exclusive ones. |
194 | * | |
195 | * Note that this only signals the completion of the respective fences, i.e. the | |
196 | * DMA transfers are complete. Cache flushing and any other necessary | |
197 | * preparations before CPU access can begin still need to happen. | |
20e10881 JE |
198 | * |
199 | * As an alternative to poll(), the set of fences on DMA buffer can be | |
200 | * exported as a &sync_file using &dma_buf_sync_file_export. | |
e7e21c72 DV |
201 | */ |
202 | ||
f54d1867 | 203 | static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) |
9b495a58 ML |
204 | { |
205 | struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; | |
ff2d2384 | 206 | struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); |
9b495a58 ML |
207 | unsigned long flags; |
208 | ||
209 | spin_lock_irqsave(&dcb->poll->lock, flags); | |
210 | wake_up_locked_poll(dcb->poll, dcb->active); | |
211 | dcb->active = 0; | |
212 | spin_unlock_irqrestore(&dcb->poll->lock, flags); | |
6b51b02a | 213 | dma_fence_put(fence); |
ff2d2384 MD |
214 | /* Paired with get_file in dma_buf_poll */ |
215 | fput(dmabuf->file); | |
6b51b02a CK |
216 | } |
217 | ||
0a42016d | 218 | static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, |
6b51b02a CK |
219 | struct dma_buf_poll_cb_t *dcb) |
220 | { | |
0a42016d | 221 | struct dma_resv_iter cursor; |
6b51b02a | 222 | struct dma_fence *fence; |
0a42016d | 223 | int r; |
6b51b02a | 224 | |
7bc80a54 CK |
225 | dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), |
226 | fence) { | |
6b51b02a CK |
227 | dma_fence_get(fence); |
228 | r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); | |
229 | if (!r) | |
230 | return true; | |
231 | dma_fence_put(fence); | |
232 | } | |
233 | ||
234 | return false; | |
235 | } | |
236 | ||
afc9a42b | 237 | static __poll_t dma_buf_poll(struct file *file, poll_table *poll) |
9b495a58 ML |
238 | { |
239 | struct dma_buf *dmabuf; | |
52791eee | 240 | struct dma_resv *resv; |
01699437 | 241 | __poll_t events; |
9b495a58 ML |
242 | |
243 | dmabuf = file->private_data; | |
244 | if (!dmabuf || !dmabuf->resv) | |
a9a08845 | 245 | return EPOLLERR; |
9b495a58 ML |
246 | |
247 | resv = dmabuf->resv; | |
248 | ||
249 | poll_wait(file, &dmabuf->poll, poll); | |
250 | ||
a9a08845 | 251 | events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); |
9b495a58 ML |
252 | if (!events) |
253 | return 0; | |
254 | ||
6b51b02a | 255 | dma_resv_lock(resv, NULL); |
9b495a58 | 256 | |
6b51b02a CK |
257 | if (events & EPOLLOUT) { |
258 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; | |
9b495a58 | 259 | |
6b51b02a | 260 | /* Check that callback isn't busy */ |
9b495a58 | 261 | spin_lock_irq(&dmabuf->poll.lock); |
6b51b02a CK |
262 | if (dcb->active) |
263 | events &= ~EPOLLOUT; | |
264 | else | |
265 | dcb->active = EPOLLOUT; | |
9b495a58 ML |
266 | spin_unlock_irq(&dmabuf->poll.lock); |
267 | ||
6b51b02a | 268 | if (events & EPOLLOUT) { |
ff2d2384 MD |
269 | /* Paired with fput in dma_buf_poll_cb */ |
270 | get_file(dmabuf->file); | |
271 | ||
0a42016d | 272 | if (!dma_buf_poll_add_cb(resv, true, dcb)) |
6b51b02a | 273 | /* No callback queued, wake up any other waiters */ |
9b495a58 | 274 | dma_buf_poll_cb(NULL, &dcb->cb); |
6b51b02a CK |
275 | else |
276 | events &= ~EPOLLOUT; | |
9b495a58 ML |
277 | } |
278 | } | |
279 | ||
6b51b02a CK |
280 | if (events & EPOLLIN) { |
281 | struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; | |
9b495a58 | 282 | |
6b51b02a | 283 | /* Check that callback isn't busy */ |
9b495a58 ML |
284 | spin_lock_irq(&dmabuf->poll.lock); |
285 | if (dcb->active) | |
6b51b02a | 286 | events &= ~EPOLLIN; |
9b495a58 | 287 | else |
6b51b02a | 288 | dcb->active = EPOLLIN; |
9b495a58 ML |
289 | spin_unlock_irq(&dmabuf->poll.lock); |
290 | ||
6b51b02a | 291 | if (events & EPOLLIN) { |
ff2d2384 MD |
292 | /* Paired with fput in dma_buf_poll_cb */ |
293 | get_file(dmabuf->file); | |
294 | ||
0a42016d | 295 | if (!dma_buf_poll_add_cb(resv, false, dcb)) |
6b51b02a | 296 | /* No callback queued, wake up any other waiters */ |
3c3b177a | 297 | dma_buf_poll_cb(NULL, &dcb->cb); |
6b51b02a CK |
298 | else |
299 | events &= ~EPOLLIN; | |
04a5faa8 | 300 | } |
9b495a58 ML |
301 | } |
302 | ||
6b51b02a | 303 | dma_resv_unlock(resv); |
9b495a58 ML |
304 | return events; |
305 | } | |
306 | ||
bb2bb903 GH |
307 | /** |
308 | * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. | |
e73c317e GC |
309 | * It could support changing the name of the dma-buf if the same |
310 | * piece of memory is used for multiple purpose between different devices. | |
bb2bb903 | 311 | * |
6d3ba803 KK |
312 | * @dmabuf: [in] dmabuf buffer that will be renamed. |
313 | * @buf: [in] A piece of userspace memory that contains the name of | |
314 | * the dma-buf. | |
bb2bb903 GH |
315 | * |
316 | * Returns 0 on success. If the dma-buf buffer is already attached to | |
317 | * devices, return -EBUSY. | |
318 | * | |
319 | */ | |
320 | static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) | |
321 | { | |
322 | char *name = strndup_user(buf, DMA_BUF_NAME_LEN); | |
bb2bb903 GH |
323 | |
324 | if (IS_ERR(name)) | |
325 | return PTR_ERR(name); | |
326 | ||
6348dd29 | 327 | spin_lock(&dmabuf->name_lock); |
bb2bb903 GH |
328 | kfree(dmabuf->name); |
329 | dmabuf->name = name; | |
6348dd29 | 330 | spin_unlock(&dmabuf->name_lock); |
bb2bb903 | 331 | |
e73c317e | 332 | return 0; |
bb2bb903 GH |
333 | } |
334 | ||
20e10881 JE |
335 | #if IS_ENABLED(CONFIG_SYNC_FILE) |
336 | static long dma_buf_export_sync_file(struct dma_buf *dmabuf, | |
337 | void __user *user_data) | |
338 | { | |
339 | struct dma_buf_export_sync_file arg; | |
340 | enum dma_resv_usage usage; | |
341 | struct dma_fence *fence = NULL; | |
342 | struct sync_file *sync_file; | |
343 | int fd, ret; | |
344 | ||
345 | if (copy_from_user(&arg, user_data, sizeof(arg))) | |
346 | return -EFAULT; | |
347 | ||
348 | if (arg.flags & ~DMA_BUF_SYNC_RW) | |
349 | return -EINVAL; | |
350 | ||
351 | if ((arg.flags & DMA_BUF_SYNC_RW) == 0) | |
352 | return -EINVAL; | |
353 | ||
354 | fd = get_unused_fd_flags(O_CLOEXEC); | |
355 | if (fd < 0) | |
356 | return fd; | |
357 | ||
358 | usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE); | |
359 | ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence); | |
360 | if (ret) | |
361 | goto err_put_fd; | |
362 | ||
363 | if (!fence) | |
364 | fence = dma_fence_get_stub(); | |
365 | ||
366 | sync_file = sync_file_create(fence); | |
367 | ||
368 | dma_fence_put(fence); | |
369 | ||
370 | if (!sync_file) { | |
371 | ret = -ENOMEM; | |
372 | goto err_put_fd; | |
373 | } | |
374 | ||
375 | arg.fd = fd; | |
376 | if (copy_to_user(user_data, &arg, sizeof(arg))) { | |
377 | ret = -EFAULT; | |
378 | goto err_put_file; | |
379 | } | |
380 | ||
381 | fd_install(fd, sync_file->file); | |
382 | ||
383 | return 0; | |
384 | ||
385 | err_put_file: | |
386 | fput(sync_file->file); | |
387 | err_put_fd: | |
388 | put_unused_fd(fd); | |
389 | return ret; | |
390 | } | |
59474049 JE |
391 | |
392 | static long dma_buf_import_sync_file(struct dma_buf *dmabuf, | |
393 | const void __user *user_data) | |
394 | { | |
395 | struct dma_buf_import_sync_file arg; | |
c19083c7 | 396 | struct dma_fence *fence, *f; |
59474049 | 397 | enum dma_resv_usage usage; |
c19083c7 JE |
398 | struct dma_fence_unwrap iter; |
399 | unsigned int num_fences; | |
59474049 JE |
400 | int ret = 0; |
401 | ||
402 | if (copy_from_user(&arg, user_data, sizeof(arg))) | |
403 | return -EFAULT; | |
404 | ||
405 | if (arg.flags & ~DMA_BUF_SYNC_RW) | |
406 | return -EINVAL; | |
407 | ||
408 | if ((arg.flags & DMA_BUF_SYNC_RW) == 0) | |
409 | return -EINVAL; | |
410 | ||
411 | fence = sync_file_get_fence(arg.fd); | |
412 | if (!fence) | |
413 | return -EINVAL; | |
414 | ||
415 | usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : | |
416 | DMA_RESV_USAGE_READ; | |
417 | ||
c19083c7 JE |
418 | num_fences = 0; |
419 | dma_fence_unwrap_for_each(f, &iter, fence) | |
420 | ++num_fences; | |
421 | ||
422 | if (num_fences > 0) { | |
423 | dma_resv_lock(dmabuf->resv, NULL); | |
59474049 | 424 | |
c19083c7 JE |
425 | ret = dma_resv_reserve_fences(dmabuf->resv, num_fences); |
426 | if (!ret) { | |
427 | dma_fence_unwrap_for_each(f, &iter, fence) | |
428 | dma_resv_add_fence(dmabuf->resv, f, usage); | |
429 | } | |
59474049 | 430 | |
c19083c7 JE |
431 | dma_resv_unlock(dmabuf->resv); |
432 | } | |
59474049 JE |
433 | |
434 | dma_fence_put(fence); | |
435 | ||
436 | return ret; | |
437 | } | |
20e10881 JE |
438 | #endif |
439 | ||
c11e391d DV |
440 | static long dma_buf_ioctl(struct file *file, |
441 | unsigned int cmd, unsigned long arg) | |
442 | { | |
443 | struct dma_buf *dmabuf; | |
444 | struct dma_buf_sync sync; | |
445 | enum dma_data_direction direction; | |
18b862dc | 446 | int ret; |
c11e391d DV |
447 | |
448 | dmabuf = file->private_data; | |
449 | ||
450 | switch (cmd) { | |
451 | case DMA_BUF_IOCTL_SYNC: | |
452 | if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) | |
453 | return -EFAULT; | |
454 | ||
455 | if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) | |
456 | return -EINVAL; | |
457 | ||
458 | switch (sync.flags & DMA_BUF_SYNC_RW) { | |
459 | case DMA_BUF_SYNC_READ: | |
460 | direction = DMA_FROM_DEVICE; | |
461 | break; | |
462 | case DMA_BUF_SYNC_WRITE: | |
463 | direction = DMA_TO_DEVICE; | |
464 | break; | |
465 | case DMA_BUF_SYNC_RW: | |
466 | direction = DMA_BIDIRECTIONAL; | |
467 | break; | |
468 | default: | |
469 | return -EINVAL; | |
470 | } | |
471 | ||
472 | if (sync.flags & DMA_BUF_SYNC_END) | |
18b862dc | 473 | ret = dma_buf_end_cpu_access(dmabuf, direction); |
c11e391d | 474 | else |
18b862dc | 475 | ret = dma_buf_begin_cpu_access(dmabuf, direction); |
c11e391d | 476 | |
18b862dc | 477 | return ret; |
bb2bb903 | 478 | |
a5bff92e DV |
479 | case DMA_BUF_SET_NAME_A: |
480 | case DMA_BUF_SET_NAME_B: | |
bb2bb903 GH |
481 | return dma_buf_set_name(dmabuf, (const char __user *)arg); |
482 | ||
20e10881 JE |
483 | #if IS_ENABLED(CONFIG_SYNC_FILE) |
484 | case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: | |
485 | return dma_buf_export_sync_file(dmabuf, (void __user *)arg); | |
59474049 JE |
486 | case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: |
487 | return dma_buf_import_sync_file(dmabuf, (const void __user *)arg); | |
20e10881 JE |
488 | #endif |
489 | ||
c11e391d DV |
490 | default: |
491 | return -ENOTTY; | |
492 | } | |
493 | } | |
494 | ||
bcc07111 GH |
495 | static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) |
496 | { | |
497 | struct dma_buf *dmabuf = file->private_data; | |
498 | ||
499 | seq_printf(m, "size:\t%zu\n", dmabuf->size); | |
500 | /* Don't count the temporary reference taken inside procfs seq_show */ | |
501 | seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); | |
502 | seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); | |
6348dd29 | 503 | spin_lock(&dmabuf->name_lock); |
bcc07111 GH |
504 | if (dmabuf->name) |
505 | seq_printf(m, "name:\t%s\n", dmabuf->name); | |
6348dd29 | 506 | spin_unlock(&dmabuf->name_lock); |
bcc07111 GH |
507 | } |
508 | ||
d15bd7ee | 509 | static const struct file_operations dma_buf_fops = { |
05cd8469 | 510 | .release = dma_buf_file_release, |
4c78513e | 511 | .mmap = dma_buf_mmap_internal, |
19e8697b | 512 | .llseek = dma_buf_llseek, |
9b495a58 | 513 | .poll = dma_buf_poll, |
c11e391d | 514 | .unlocked_ioctl = dma_buf_ioctl, |
1832f2d8 | 515 | .compat_ioctl = compat_ptr_ioctl, |
bcc07111 | 516 | .show_fdinfo = dma_buf_show_fdinfo, |
d15bd7ee SS |
517 | }; |
518 | ||
519 | /* | |
520 | * is_dma_buf_file - Check if struct file* is associated with dma_buf | |
521 | */ | |
522 | static inline int is_dma_buf_file(struct file *file) | |
523 | { | |
524 | return file->f_op == &dma_buf_fops; | |
525 | } | |
526 | ||
f728a5ea | 527 | static struct file *dma_buf_getfile(size_t size, int flags) |
ed63bb1d | 528 | { |
370704e7 | 529 | static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); |
ed63bb1d | 530 | struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); |
f728a5ea | 531 | struct file *file; |
ed63bb1d GH |
532 | |
533 | if (IS_ERR(inode)) | |
534 | return ERR_CAST(inode); | |
535 | ||
f728a5ea CK |
536 | inode->i_size = size; |
537 | inode_set_bytes(inode, size); | |
ed63bb1d | 538 | |
370704e7 CTK |
539 | /* |
540 | * The ->i_ino acquired from get_next_ino() is not unique thus | |
541 | * not suitable for using it as dentry name by dmabuf stats. | |
542 | * Override ->i_ino with the unique and dmabuffs specific | |
543 | * value. | |
544 | */ | |
545 | inode->i_ino = atomic64_add_return(1, &dmabuf_inode); | |
47091e4e | 546 | flags &= O_ACCMODE | O_NONBLOCK; |
ed63bb1d GH |
547 | file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", |
548 | flags, &dma_buf_fops); | |
549 | if (IS_ERR(file)) | |
550 | goto err_alloc_file; | |
ed63bb1d GH |
551 | |
552 | return file; | |
553 | ||
554 | err_alloc_file: | |
555 | iput(inode); | |
556 | return file; | |
557 | } | |
558 | ||
2904a8c1 DV |
559 | /** |
560 | * DOC: dma buf device access | |
561 | * | |
562 | * For device DMA access to a shared DMA buffer the usual sequence of operations | |
563 | * is fairly simple: | |
564 | * | |
565 | * 1. The exporter defines his exporter instance using | |
566 | * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private | |
567 | * buffer object into a &dma_buf. It then exports that &dma_buf to userspace | |
568 | * as a file descriptor by calling dma_buf_fd(). | |
569 | * | |
570 | * 2. Userspace passes this file-descriptors to all drivers it wants this buffer | |
d791aec9 | 571 | * to share with: First the file descriptor is converted to a &dma_buf using |
c138782d | 572 | * dma_buf_get(). Then the buffer is attached to the device using |
2904a8c1 DV |
573 | * dma_buf_attach(). |
574 | * | |
575 | * Up to this stage the exporter is still free to migrate or reallocate the | |
576 | * backing storage. | |
577 | * | |
c138782d | 578 | * 3. Once the buffer is attached to all devices userspace can initiate DMA |
2904a8c1 DV |
579 | * access to the shared buffer. In the kernel this is done by calling |
580 | * dma_buf_map_attachment() and dma_buf_unmap_attachment(). | |
581 | * | |
582 | * 4. Once a driver is done with a shared buffer it needs to call | |
583 | * dma_buf_detach() (after cleaning up any mappings) and then release the | |
85804b70 | 584 | * reference acquired with dma_buf_get() by calling dma_buf_put(). |
2904a8c1 DV |
585 | * |
586 | * For the detailed semantics exporters are expected to implement see | |
587 | * &dma_buf_ops. | |
588 | */ | |
589 | ||
d15bd7ee | 590 | /** |
d8fbe341 | 591 | * dma_buf_export - Creates a new dma_buf, and associates an anon file |
d15bd7ee SS |
592 | * with this buffer, so it can be exported. |
593 | * Also connect the allocator specific data and ops to the buffer. | |
78df9695 | 594 | * Additionally, provide a name string for exporter; useful in debugging. |
d15bd7ee | 595 | * |
d8fbe341 | 596 | * @exp_info: [in] holds all the export related information provided |
f641d3b5 | 597 | * by the exporter. see &struct dma_buf_export_info |
d8fbe341 | 598 | * for further details. |
d15bd7ee | 599 | * |
85804b70 DV |
600 | * Returns, on success, a newly created struct dma_buf object, which wraps the |
601 | * supplied private data and operations for struct dma_buf_ops. On either | |
602 | * missing ops, or error in allocating struct dma_buf, will return negative | |
603 | * error. | |
d15bd7ee | 604 | * |
2904a8c1 DV |
605 | * For most cases the easiest way to create @exp_info is through the |
606 | * %DEFINE_DMA_BUF_EXPORT_INFO macro. | |
d15bd7ee | 607 | */ |
d8fbe341 | 608 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) |
d15bd7ee SS |
609 | { |
610 | struct dma_buf *dmabuf; | |
52791eee | 611 | struct dma_resv *resv = exp_info->resv; |
d15bd7ee | 612 | struct file *file; |
3aac4502 | 613 | size_t alloc_size = sizeof(struct dma_buf); |
a026df4c | 614 | int ret; |
5136629d | 615 | |
f728a5ea CK |
616 | if (WARN_ON(!exp_info->priv || !exp_info->ops |
617 | || !exp_info->ops->map_dma_buf | |
618 | || !exp_info->ops->unmap_dma_buf | |
619 | || !exp_info->ops->release)) | |
d15bd7ee | 620 | return ERR_PTR(-EINVAL); |
d15bd7ee | 621 | |
15fd552d | 622 | if (WARN_ON(exp_info->ops->cache_sgt_mapping && |
bd2275ee | 623 | (exp_info->ops->pin || exp_info->ops->unpin))) |
15fd552d CK |
624 | return ERR_PTR(-EINVAL); |
625 | ||
bd2275ee | 626 | if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) |
15fd552d CK |
627 | return ERR_PTR(-EINVAL); |
628 | ||
9abdffe2 SS |
629 | if (!try_module_get(exp_info->owner)) |
630 | return ERR_PTR(-ENOENT); | |
631 | ||
f728a5ea CK |
632 | file = dma_buf_getfile(exp_info->size, exp_info->flags); |
633 | if (IS_ERR(file)) { | |
634 | ret = PTR_ERR(file); | |
635 | goto err_module; | |
636 | } | |
637 | ||
638 | if (!exp_info->resv) | |
639 | alloc_size += sizeof(struct dma_resv); | |
640 | else | |
641 | /* prevent &dma_buf[1] == dma_buf->resv */ | |
642 | alloc_size += 1; | |
3aac4502 | 643 | dmabuf = kzalloc(alloc_size, GFP_KERNEL); |
9abdffe2 | 644 | if (!dmabuf) { |
a026df4c | 645 | ret = -ENOMEM; |
f728a5ea | 646 | goto err_file; |
9abdffe2 | 647 | } |
d15bd7ee | 648 | |
d8fbe341 SS |
649 | dmabuf->priv = exp_info->priv; |
650 | dmabuf->ops = exp_info->ops; | |
651 | dmabuf->size = exp_info->size; | |
652 | dmabuf->exp_name = exp_info->exp_name; | |
9abdffe2 | 653 | dmabuf->owner = exp_info->owner; |
6348dd29 | 654 | spin_lock_init(&dmabuf->name_lock); |
9b495a58 | 655 | init_waitqueue_head(&dmabuf->poll); |
6b51b02a CK |
656 | dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; |
657 | dmabuf->cb_in.active = dmabuf->cb_out.active = 0; | |
f728a5ea | 658 | INIT_LIST_HEAD(&dmabuf->attachments); |
9b495a58 | 659 | |
3aac4502 | 660 | if (!resv) { |
f728a5ea CK |
661 | dmabuf->resv = (struct dma_resv *)&dmabuf[1]; |
662 | dma_resv_init(dmabuf->resv); | |
663 | } else { | |
664 | dmabuf->resv = resv; | |
3aac4502 | 665 | } |
d15bd7ee | 666 | |
f728a5ea CK |
667 | ret = dma_buf_stats_setup(dmabuf, file); |
668 | if (ret) | |
a026df4c | 669 | goto err_dmabuf; |
19e8697b | 670 | |
f728a5ea CK |
671 | file->private_data = dmabuf; |
672 | file->f_path.dentry->d_fsdata = dmabuf; | |
d15bd7ee SS |
673 | dmabuf->file = file; |
674 | ||
b89e3563 SS |
675 | mutex_lock(&db_list.lock); |
676 | list_add(&dmabuf->list_node, &db_list.head); | |
677 | mutex_unlock(&db_list.lock); | |
678 | ||
d15bd7ee | 679 | return dmabuf; |
a026df4c CW |
680 | |
681 | err_dmabuf: | |
f728a5ea CK |
682 | if (!resv) |
683 | dma_resv_fini(dmabuf->resv); | |
a026df4c | 684 | kfree(dmabuf); |
f728a5ea CK |
685 | err_file: |
686 | fput(file); | |
a026df4c CW |
687 | err_module: |
688 | module_put(exp_info->owner); | |
689 | return ERR_PTR(ret); | |
d15bd7ee | 690 | } |
16b0314a | 691 | EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); |
d15bd7ee SS |
692 | |
693 | /** | |
85804b70 | 694 | * dma_buf_fd - returns a file descriptor for the given struct dma_buf |
d15bd7ee | 695 | * @dmabuf: [in] pointer to dma_buf for which fd is required. |
55c1c4ca | 696 | * @flags: [in] flags to give to fd |
d15bd7ee SS |
697 | * |
698 | * On success, returns an associated 'fd'. Else, returns error. | |
699 | */ | |
55c1c4ca | 700 | int dma_buf_fd(struct dma_buf *dmabuf, int flags) |
d15bd7ee | 701 | { |
f5e097f0 | 702 | int fd; |
d15bd7ee SS |
703 | |
704 | if (!dmabuf || !dmabuf->file) | |
705 | return -EINVAL; | |
706 | ||
f5e097f0 BP |
707 | fd = get_unused_fd_flags(flags); |
708 | if (fd < 0) | |
709 | return fd; | |
d15bd7ee SS |
710 | |
711 | fd_install(fd, dmabuf->file); | |
712 | ||
713 | return fd; | |
714 | } | |
16b0314a | 715 | EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); |
d15bd7ee SS |
716 | |
717 | /** | |
85804b70 DV |
718 | * dma_buf_get - returns the struct dma_buf related to an fd |
719 | * @fd: [in] fd associated with the struct dma_buf to be returned | |
d15bd7ee | 720 | * |
85804b70 | 721 | * On success, returns the struct dma_buf associated with an fd; uses |
d15bd7ee SS |
722 | * file's refcounting done by fget to increase refcount. returns ERR_PTR |
723 | * otherwise. | |
724 | */ | |
725 | struct dma_buf *dma_buf_get(int fd) | |
726 | { | |
727 | struct file *file; | |
728 | ||
729 | file = fget(fd); | |
730 | ||
731 | if (!file) | |
732 | return ERR_PTR(-EBADF); | |
733 | ||
734 | if (!is_dma_buf_file(file)) { | |
735 | fput(file); | |
736 | return ERR_PTR(-EINVAL); | |
737 | } | |
738 | ||
739 | return file->private_data; | |
740 | } | |
16b0314a | 741 | EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); |
d15bd7ee SS |
742 | |
743 | /** | |
744 | * dma_buf_put - decreases refcount of the buffer | |
745 | * @dmabuf: [in] buffer to reduce refcount of | |
746 | * | |
2904a8c1 DV |
747 | * Uses file's refcounting done implicitly by fput(). |
748 | * | |
749 | * If, as a result of this call, the refcount becomes 0, the 'release' file | |
e9b4d7b5 DV |
750 | * operation related to this fd is called. It calls &dma_buf_ops.release vfunc |
751 | * in turn, and frees the memory allocated for dmabuf when exported. | |
d15bd7ee SS |
752 | */ |
753 | void dma_buf_put(struct dma_buf *dmabuf) | |
754 | { | |
755 | if (WARN_ON(!dmabuf || !dmabuf->file)) | |
756 | return; | |
757 | ||
758 | fput(dmabuf->file); | |
759 | } | |
16b0314a | 760 | EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); |
d15bd7ee | 761 | |
84335675 DV |
762 | static void mangle_sg_table(struct sg_table *sg_table) |
763 | { | |
764 | #ifdef CONFIG_DMABUF_DEBUG | |
765 | int i; | |
766 | struct scatterlist *sg; | |
767 | ||
768 | /* To catch abuse of the underlying struct page by importers mix | |
769 | * up the bits, but take care to preserve the low SG_ bits to | |
770 | * not corrupt the sgt. The mixing is undone in __unmap_dma_buf | |
771 | * before passing the sgt back to the exporter. */ | |
772 | for_each_sgtable_sg(sg_table, sg, i) | |
773 | sg->page_link ^= ~0xffUL; | |
774 | #endif | |
775 | ||
776 | } | |
777 | static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, | |
778 | enum dma_data_direction direction) | |
779 | { | |
780 | struct sg_table *sg_table; | |
46b35b33 | 781 | signed long ret; |
84335675 DV |
782 | |
783 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); | |
46b35b33 CK |
784 | if (IS_ERR_OR_NULL(sg_table)) |
785 | return sg_table; | |
786 | ||
787 | if (!dma_buf_attachment_is_dynamic(attach)) { | |
788 | ret = dma_resv_wait_timeout(attach->dmabuf->resv, | |
789 | DMA_RESV_USAGE_KERNEL, true, | |
790 | MAX_SCHEDULE_TIMEOUT); | |
791 | if (ret < 0) { | |
792 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, | |
793 | direction); | |
794 | return ERR_PTR(ret); | |
795 | } | |
796 | } | |
84335675 | 797 | |
46b35b33 | 798 | mangle_sg_table(sg_table); |
84335675 DV |
799 | return sg_table; |
800 | } | |
801 | ||
ae2e7f28 DO |
802 | /** |
803 | * DOC: locking convention | |
804 | * | |
805 | * In order to avoid deadlock situations between dma-buf exports and importers, | |
806 | * all dma-buf API users must follow the common dma-buf locking convention. | |
807 | * | |
808 | * Convention for importers | |
809 | * | |
810 | * 1. Importers must hold the dma-buf reservation lock when calling these | |
811 | * functions: | |
812 | * | |
813 | * - dma_buf_pin() | |
814 | * - dma_buf_unpin() | |
815 | * - dma_buf_map_attachment() | |
816 | * - dma_buf_unmap_attachment() | |
817 | * - dma_buf_vmap() | |
818 | * - dma_buf_vunmap() | |
819 | * | |
820 | * 2. Importers must not hold the dma-buf reservation lock when calling these | |
821 | * functions: | |
822 | * | |
823 | * - dma_buf_attach() | |
824 | * - dma_buf_dynamic_attach() | |
825 | * - dma_buf_detach() | |
e3ecbd21 | 826 | * - dma_buf_export() |
ae2e7f28 DO |
827 | * - dma_buf_fd() |
828 | * - dma_buf_get() | |
829 | * - dma_buf_put() | |
830 | * - dma_buf_mmap() | |
831 | * - dma_buf_begin_cpu_access() | |
832 | * - dma_buf_end_cpu_access() | |
833 | * - dma_buf_map_attachment_unlocked() | |
834 | * - dma_buf_unmap_attachment_unlocked() | |
835 | * - dma_buf_vmap_unlocked() | |
836 | * - dma_buf_vunmap_unlocked() | |
837 | * | |
838 | * Convention for exporters | |
839 | * | |
840 | * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf | |
841 | * reservation and exporter can take the lock: | |
842 | * | |
843 | * - &dma_buf_ops.attach() | |
844 | * - &dma_buf_ops.detach() | |
845 | * - &dma_buf_ops.release() | |
846 | * - &dma_buf_ops.begin_cpu_access() | |
847 | * - &dma_buf_ops.end_cpu_access() | |
8021fa16 | 848 | * - &dma_buf_ops.mmap() |
ae2e7f28 DO |
849 | * |
850 | * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf | |
851 | * reservation and exporter can't take the lock: | |
852 | * | |
853 | * - &dma_buf_ops.pin() | |
854 | * - &dma_buf_ops.unpin() | |
855 | * - &dma_buf_ops.map_dma_buf() | |
856 | * - &dma_buf_ops.unmap_dma_buf() | |
ae2e7f28 DO |
857 | * - &dma_buf_ops.vmap() |
858 | * - &dma_buf_ops.vunmap() | |
859 | * | |
860 | * 3. Exporters must hold the dma-buf reservation lock when calling these | |
861 | * functions: | |
862 | * | |
863 | * - dma_buf_move_notify() | |
864 | */ | |
865 | ||
d15bd7ee | 866 | /** |
85804b70 | 867 | * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list |
15fd552d CK |
868 | * @dmabuf: [in] buffer to attach device to. |
869 | * @dev: [in] device to be attached. | |
6f49c251 RD |
870 | * @importer_ops: [in] importer operations for the attachment |
871 | * @importer_priv: [in] importer private pointer for the attachment | |
d15bd7ee | 872 | * |
2904a8c1 DV |
873 | * Returns struct dma_buf_attachment pointer for this attachment. Attachments |
874 | * must be cleaned up by calling dma_buf_detach(). | |
875 | * | |
85804b70 DV |
876 | * Optionally this calls &dma_buf_ops.attach to allow device-specific attach |
877 | * functionality. | |
878 | * | |
2904a8c1 DV |
879 | * Returns: |
880 | * | |
881 | * A pointer to newly created &dma_buf_attachment on success, or a negative | |
882 | * error code wrapped into a pointer on failure. | |
883 | * | |
884 | * Note that this can fail if the backing storage of @dmabuf is in a place not | |
885 | * accessible to @dev, and cannot be moved to a more suitable place. This is | |
886 | * indicated with the error code -EBUSY. | |
d15bd7ee | 887 | */ |
15fd552d CK |
888 | struct dma_buf_attachment * |
889 | dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, | |
bb42df46 CK |
890 | const struct dma_buf_attach_ops *importer_ops, |
891 | void *importer_priv) | |
d15bd7ee SS |
892 | { |
893 | struct dma_buf_attachment *attach; | |
894 | int ret; | |
895 | ||
d1aa06a1 | 896 | if (WARN_ON(!dmabuf || !dev)) |
d15bd7ee SS |
897 | return ERR_PTR(-EINVAL); |
898 | ||
4981cdb0 CK |
899 | if (WARN_ON(importer_ops && !importer_ops->move_notify)) |
900 | return ERR_PTR(-EINVAL); | |
901 | ||
db7942b6 | 902 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); |
34d84ec4 | 903 | if (!attach) |
a9fbc3b7 | 904 | return ERR_PTR(-ENOMEM); |
d15bd7ee | 905 | |
d15bd7ee SS |
906 | attach->dev = dev; |
907 | attach->dmabuf = dmabuf; | |
09606b54 CK |
908 | if (importer_ops) |
909 | attach->peer2peer = importer_ops->allow_peer2peer; | |
bb42df46 CK |
910 | attach->importer_ops = importer_ops; |
911 | attach->importer_priv = importer_priv; | |
2ed9201b | 912 | |
d15bd7ee | 913 | if (dmabuf->ops->attach) { |
a19741e5 | 914 | ret = dmabuf->ops->attach(dmabuf, attach); |
d15bd7ee SS |
915 | if (ret) |
916 | goto err_attach; | |
917 | } | |
15fd552d | 918 | dma_resv_lock(dmabuf->resv, NULL); |
d15bd7ee | 919 | list_add(&attach->node, &dmabuf->attachments); |
15fd552d | 920 | dma_resv_unlock(dmabuf->resv); |
d15bd7ee | 921 | |
15fd552d CK |
922 | /* When either the importer or the exporter can't handle dynamic |
923 | * mappings we cache the mapping here to avoid issues with the | |
924 | * reservation object lock. | |
925 | */ | |
926 | if (dma_buf_attachment_is_dynamic(attach) != | |
927 | dma_buf_is_dynamic(dmabuf)) { | |
928 | struct sg_table *sgt; | |
929 | ||
809d9c72 | 930 | dma_resv_lock(attach->dmabuf->resv, NULL); |
bb42df46 | 931 | if (dma_buf_is_dynamic(attach->dmabuf)) { |
7e008b02 | 932 | ret = dmabuf->ops->pin(attach); |
bb42df46 CK |
933 | if (ret) |
934 | goto err_unlock; | |
935 | } | |
15fd552d | 936 | |
84335675 | 937 | sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); |
15fd552d CK |
938 | if (!sgt) |
939 | sgt = ERR_PTR(-ENOMEM); | |
940 | if (IS_ERR(sgt)) { | |
941 | ret = PTR_ERR(sgt); | |
bb42df46 | 942 | goto err_unpin; |
15fd552d | 943 | } |
809d9c72 | 944 | dma_resv_unlock(attach->dmabuf->resv); |
15fd552d CK |
945 | attach->sgt = sgt; |
946 | attach->dir = DMA_BIDIRECTIONAL; | |
947 | } | |
948 | ||
d15bd7ee SS |
949 | return attach; |
950 | ||
d15bd7ee SS |
951 | err_attach: |
952 | kfree(attach); | |
d15bd7ee | 953 | return ERR_PTR(ret); |
15fd552d | 954 | |
bb42df46 CK |
955 | err_unpin: |
956 | if (dma_buf_is_dynamic(attach->dmabuf)) | |
7e008b02 | 957 | dmabuf->ops->unpin(attach); |
bb42df46 | 958 | |
15fd552d | 959 | err_unlock: |
809d9c72 | 960 | dma_resv_unlock(attach->dmabuf->resv); |
15fd552d CK |
961 | |
962 | dma_buf_detach(dmabuf, attach); | |
963 | return ERR_PTR(ret); | |
964 | } | |
16b0314a | 965 | EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); |
15fd552d CK |
966 | |
967 | /** | |
968 | * dma_buf_attach - Wrapper for dma_buf_dynamic_attach | |
969 | * @dmabuf: [in] buffer to attach device to. | |
970 | * @dev: [in] device to be attached. | |
971 | * | |
972 | * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static | |
973 | * mapping. | |
974 | */ | |
975 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | |
976 | struct device *dev) | |
977 | { | |
bb42df46 | 978 | return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); |
d15bd7ee | 979 | } |
16b0314a | 980 | EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); |
d15bd7ee | 981 | |
84335675 DV |
982 | static void __unmap_dma_buf(struct dma_buf_attachment *attach, |
983 | struct sg_table *sg_table, | |
984 | enum dma_data_direction direction) | |
985 | { | |
986 | /* uses XOR, hence this unmangles */ | |
987 | mangle_sg_table(sg_table); | |
988 | ||
989 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); | |
990 | } | |
991 | ||
d15bd7ee | 992 | /** |
85804b70 | 993 | * dma_buf_detach - Remove the given attachment from dmabuf's attachments list |
d15bd7ee SS |
994 | * @dmabuf: [in] buffer to detach from. |
995 | * @attach: [in] attachment to be detached; is free'd after this call. | |
996 | * | |
2904a8c1 | 997 | * Clean up a device attachment obtained by calling dma_buf_attach(). |
85804b70 DV |
998 | * |
999 | * Optionally this calls &dma_buf_ops.detach for device-specific detach. | |
d15bd7ee SS |
1000 | */ |
1001 | void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) | |
1002 | { | |
d3292dae | 1003 | if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf)) |
d15bd7ee SS |
1004 | return; |
1005 | ||
d3292dae | 1006 | dma_resv_lock(dmabuf->resv, NULL); |
809d9c72 | 1007 | |
15fd552d | 1008 | if (attach->sgt) { |
15fd552d | 1009 | |
84335675 | 1010 | __unmap_dma_buf(attach, attach->sgt, attach->dir); |
f13e143e | 1011 | |
809d9c72 | 1012 | if (dma_buf_is_dynamic(attach->dmabuf)) |
7e008b02 | 1013 | dmabuf->ops->unpin(attach); |
15fd552d | 1014 | } |
d15bd7ee | 1015 | list_del(&attach->node); |
809d9c72 | 1016 | |
15fd552d | 1017 | dma_resv_unlock(dmabuf->resv); |
809d9c72 | 1018 | |
d15bd7ee SS |
1019 | if (dmabuf->ops->detach) |
1020 | dmabuf->ops->detach(dmabuf, attach); | |
1021 | ||
d15bd7ee SS |
1022 | kfree(attach); |
1023 | } | |
16b0314a | 1024 | EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); |
d15bd7ee | 1025 | |
bb42df46 CK |
1026 | /** |
1027 | * dma_buf_pin - Lock down the DMA-buf | |
bb42df46 CK |
1028 | * @attach: [in] attachment which should be pinned |
1029 | * | |
c545781e DV |
1030 | * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may |
1031 | * call this, and only for limited use cases like scanout and not for temporary | |
1032 | * pin operations. It is not permitted to allow userspace to pin arbitrary | |
1033 | * amounts of buffers through this interface. | |
1034 | * | |
1035 | * Buffers must be unpinned by calling dma_buf_unpin(). | |
1036 | * | |
bb42df46 CK |
1037 | * Returns: |
1038 | * 0 on success, negative error code on failure. | |
1039 | */ | |
1040 | int dma_buf_pin(struct dma_buf_attachment *attach) | |
1041 | { | |
1042 | struct dma_buf *dmabuf = attach->dmabuf; | |
1043 | int ret = 0; | |
1044 | ||
c545781e DV |
1045 | WARN_ON(!dma_buf_attachment_is_dynamic(attach)); |
1046 | ||
bb42df46 CK |
1047 | dma_resv_assert_held(dmabuf->resv); |
1048 | ||
1049 | if (dmabuf->ops->pin) | |
1050 | ret = dmabuf->ops->pin(attach); | |
1051 | ||
1052 | return ret; | |
1053 | } | |
16b0314a | 1054 | EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); |
bb42df46 CK |
1055 | |
1056 | /** | |
c545781e | 1057 | * dma_buf_unpin - Unpin a DMA-buf |
bb42df46 | 1058 | * @attach: [in] attachment which should be unpinned |
c545781e DV |
1059 | * |
1060 | * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move | |
1061 | * any mapping of @attach again and inform the importer through | |
1062 | * &dma_buf_attach_ops.move_notify. | |
bb42df46 CK |
1063 | */ |
1064 | void dma_buf_unpin(struct dma_buf_attachment *attach) | |
1065 | { | |
1066 | struct dma_buf *dmabuf = attach->dmabuf; | |
1067 | ||
c545781e DV |
1068 | WARN_ON(!dma_buf_attachment_is_dynamic(attach)); |
1069 | ||
bb42df46 CK |
1070 | dma_resv_assert_held(dmabuf->resv); |
1071 | ||
1072 | if (dmabuf->ops->unpin) | |
1073 | dmabuf->ops->unpin(attach); | |
1074 | } | |
16b0314a | 1075 | EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); |
bb42df46 | 1076 | |
d15bd7ee SS |
1077 | /** |
1078 | * dma_buf_map_attachment - Returns the scatterlist table of the attachment; | |
1079 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the | |
1080 | * dma_buf_ops. | |
1081 | * @attach: [in] attachment whose scatterlist is to be returned | |
1082 | * @direction: [in] direction of DMA transfer | |
1083 | * | |
fee0c54e | 1084 | * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR |
2904a8c1 DV |
1085 | * on error. May return -EINTR if it is interrupted by a signal. |
1086 | * | |
ac80cd17 JX |
1087 | * On success, the DMA addresses and lengths in the returned scatterlist are |
1088 | * PAGE_SIZE aligned. | |
1089 | * | |
c138782d | 1090 | * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that |
2904a8c1 DV |
1091 | * the underlying backing storage is pinned for as long as a mapping exists, |
1092 | * therefore users/importers should not hold onto a mapping for undue amounts of | |
1093 | * time. | |
89bcadc8 DV |
1094 | * |
1095 | * Important: Dynamic importers must wait for the exclusive fence of the struct | |
1096 | * dma_resv attached to the DMA-BUF first. | |
d15bd7ee SS |
1097 | */ |
1098 | struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, | |
1099 | enum dma_data_direction direction) | |
1100 | { | |
531beb06 | 1101 | struct sg_table *sg_table; |
bb42df46 | 1102 | int r; |
d15bd7ee SS |
1103 | |
1104 | might_sleep(); | |
1105 | ||
d1aa06a1 | 1106 | if (WARN_ON(!attach || !attach->dmabuf)) |
d15bd7ee SS |
1107 | return ERR_PTR(-EINVAL); |
1108 | ||
47e982d5 | 1109 | dma_resv_assert_held(attach->dmabuf->resv); |
15fd552d | 1110 | |
f13e143e CK |
1111 | if (attach->sgt) { |
1112 | /* | |
1113 | * Two mappings with different directions for the same | |
1114 | * attachment are not allowed. | |
1115 | */ | |
1116 | if (attach->dir != direction && | |
1117 | attach->dir != DMA_BIDIRECTIONAL) | |
1118 | return ERR_PTR(-EBUSY); | |
1119 | ||
1120 | return attach->sgt; | |
1121 | } | |
1122 | ||
bb42df46 | 1123 | if (dma_buf_is_dynamic(attach->dmabuf)) { |
4981cdb0 | 1124 | if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { |
7e008b02 | 1125 | r = attach->dmabuf->ops->pin(attach); |
bb42df46 CK |
1126 | if (r) |
1127 | return ERR_PTR(r); | |
1128 | } | |
1129 | } | |
15fd552d | 1130 | |
84335675 | 1131 | sg_table = __map_dma_buf(attach, direction); |
fee0c54e CC |
1132 | if (!sg_table) |
1133 | sg_table = ERR_PTR(-ENOMEM); | |
d15bd7ee | 1134 | |
bb42df46 | 1135 | if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && |
4981cdb0 | 1136 | !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
7e008b02 | 1137 | attach->dmabuf->ops->unpin(attach); |
bb42df46 | 1138 | |
f13e143e CK |
1139 | if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { |
1140 | attach->sgt = sg_table; | |
1141 | attach->dir = direction; | |
1142 | } | |
1143 | ||
ac80cd17 | 1144 | #ifdef CONFIG_DMA_API_DEBUG |
00efd65a | 1145 | if (!IS_ERR(sg_table)) { |
ac80cd17 JX |
1146 | struct scatterlist *sg; |
1147 | u64 addr; | |
1148 | int len; | |
1149 | int i; | |
1150 | ||
1151 | for_each_sgtable_dma_sg(sg_table, sg, i) { | |
1152 | addr = sg_dma_address(sg); | |
1153 | len = sg_dma_len(sg); | |
1154 | if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { | |
1155 | pr_debug("%s: addr %llx or len %x is not page aligned!\n", | |
1156 | __func__, addr, len); | |
1157 | } | |
1158 | } | |
1159 | } | |
1160 | #endif /* CONFIG_DMA_API_DEBUG */ | |
d15bd7ee SS |
1161 | return sg_table; |
1162 | } | |
16b0314a | 1163 | EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); |
d15bd7ee | 1164 | |
19d6634d DO |
1165 | /** |
1166 | * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; | |
1167 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the | |
1168 | * dma_buf_ops. | |
1169 | * @attach: [in] attachment whose scatterlist is to be returned | |
1170 | * @direction: [in] direction of DMA transfer | |
1171 | * | |
1172 | * Unlocked variant of dma_buf_map_attachment(). | |
1173 | */ | |
1174 | struct sg_table * | |
1175 | dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, | |
1176 | enum dma_data_direction direction) | |
1177 | { | |
1178 | struct sg_table *sg_table; | |
1179 | ||
1180 | might_sleep(); | |
1181 | ||
1182 | if (WARN_ON(!attach || !attach->dmabuf)) | |
1183 | return ERR_PTR(-EINVAL); | |
1184 | ||
1185 | dma_resv_lock(attach->dmabuf->resv, NULL); | |
1186 | sg_table = dma_buf_map_attachment(attach, direction); | |
1187 | dma_resv_unlock(attach->dmabuf->resv); | |
1188 | ||
1189 | return sg_table; | |
1190 | } | |
1191 | EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF); | |
1192 | ||
d15bd7ee SS |
1193 | /** |
1194 | * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might | |
1195 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of | |
1196 | * dma_buf_ops. | |
1197 | * @attach: [in] attachment to unmap buffer from | |
1198 | * @sg_table: [in] scatterlist info of the buffer to unmap | |
33ea2dcb | 1199 | * @direction: [in] direction of DMA transfer |
d15bd7ee | 1200 | * |
2904a8c1 | 1201 | * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). |
d15bd7ee SS |
1202 | */ |
1203 | void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, | |
33ea2dcb SS |
1204 | struct sg_table *sg_table, |
1205 | enum dma_data_direction direction) | |
d15bd7ee | 1206 | { |
b6fa0cd6 RC |
1207 | might_sleep(); |
1208 | ||
d1aa06a1 | 1209 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
d15bd7ee SS |
1210 | return; |
1211 | ||
47e982d5 | 1212 | dma_resv_assert_held(attach->dmabuf->resv); |
15fd552d | 1213 | |
f13e143e CK |
1214 | if (attach->sgt == sg_table) |
1215 | return; | |
1216 | ||
84335675 | 1217 | __unmap_dma_buf(attach, sg_table, direction); |
bb42df46 CK |
1218 | |
1219 | if (dma_buf_is_dynamic(attach->dmabuf) && | |
4981cdb0 | 1220 | !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
bb42df46 | 1221 | dma_buf_unpin(attach); |
d15bd7ee | 1222 | } |
16b0314a | 1223 | EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); |
fc13020e | 1224 | |
19d6634d DO |
1225 | /** |
1226 | * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might | |
1227 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of | |
1228 | * dma_buf_ops. | |
1229 | * @attach: [in] attachment to unmap buffer from | |
1230 | * @sg_table: [in] scatterlist info of the buffer to unmap | |
1231 | * @direction: [in] direction of DMA transfer | |
1232 | * | |
1233 | * Unlocked variant of dma_buf_unmap_attachment(). | |
1234 | */ | |
1235 | void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, | |
1236 | struct sg_table *sg_table, | |
1237 | enum dma_data_direction direction) | |
1238 | { | |
1239 | might_sleep(); | |
1240 | ||
1241 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) | |
1242 | return; | |
1243 | ||
1244 | dma_resv_lock(attach->dmabuf->resv, NULL); | |
1245 | dma_buf_unmap_attachment(attach, sg_table, direction); | |
1246 | dma_resv_unlock(attach->dmabuf->resv); | |
1247 | } | |
1248 | EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF); | |
1249 | ||
bb42df46 CK |
1250 | /** |
1251 | * dma_buf_move_notify - notify attachments that DMA-buf is moving | |
1252 | * | |
1253 | * @dmabuf: [in] buffer which is moving | |
1254 | * | |
b56ffa58 | 1255 | * Informs all attachments that they need to destroy and recreate all their |
bb42df46 CK |
1256 | * mappings. |
1257 | */ | |
1258 | void dma_buf_move_notify(struct dma_buf *dmabuf) | |
1259 | { | |
1260 | struct dma_buf_attachment *attach; | |
1261 | ||
1262 | dma_resv_assert_held(dmabuf->resv); | |
1263 | ||
1264 | list_for_each_entry(attach, &dmabuf->attachments, node) | |
4981cdb0 | 1265 | if (attach->importer_ops) |
bb42df46 CK |
1266 | attach->importer_ops->move_notify(attach); |
1267 | } | |
16b0314a | 1268 | EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); |
bb42df46 | 1269 | |
0959a168 DV |
1270 | /** |
1271 | * DOC: cpu access | |
1272 | * | |
b56ffa58 | 1273 | * There are multiple reasons for supporting CPU access to a dma buffer object: |
0959a168 DV |
1274 | * |
1275 | * - Fallback operations in the kernel, for example when a device is connected | |
1276 | * over USB and the kernel needs to shuffle the data around first before | |
b56ffa58 | 1277 | * sending it away. Cache coherency is handled by bracketing any transactions |
0959a168 DV |
1278 | * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() |
1279 | * access. | |
1280 | * | |
7f0de8d8 DV |
1281 | * Since for most kernel internal dma-buf accesses need the entire buffer, a |
1282 | * vmap interface is introduced. Note that on very old 32-bit architectures | |
1283 | * vmalloc space might be limited and result in vmap calls failing. | |
0959a168 DV |
1284 | * |
1285 | * Interfaces:: | |
de9114ec | 1286 | * |
7938f421 LDM |
1287 | * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) |
1288 | * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) | |
0959a168 DV |
1289 | * |
1290 | * The vmap call can fail if there is no vmap support in the exporter, or if | |
de9114ec DV |
1291 | * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference |
1292 | * count for all vmap access and calls down into the exporter's vmap function | |
1293 | * only when no vmapping exists, and only unmaps it once. Protection against | |
1294 | * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. | |
0959a168 DV |
1295 | * |
1296 | * - For full compatibility on the importer side with existing userspace | |
1297 | * interfaces, which might already support mmap'ing buffers. This is needed in | |
1298 | * many processing pipelines (e.g. feeding a software rendered image into a | |
1299 | * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION | |
1300 | * framework already supported this and for DMA buffer file descriptors to | |
1301 | * replace ION buffers mmap support was needed. | |
1302 | * | |
1303 | * There is no special interfaces, userspace simply calls mmap on the dma-buf | |
b56ffa58 | 1304 | * fd. But like for CPU access there's a need to bracket the actual access, |
0959a168 DV |
1305 | * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that |
1306 | * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must | |
1307 | * be restarted. | |
1308 | * | |
1309 | * Some systems might need some sort of cache coherency management e.g. when | |
1310 | * CPU and GPU domains are being accessed through dma-buf at the same time. | |
1311 | * To circumvent this problem there are begin/end coherency markers, that | |
1312 | * forward directly to existing dma-buf device drivers vfunc hooks. Userspace | |
1313 | * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The | |
1314 | * sequence would be used like following: | |
1315 | * | |
1316 | * - mmap dma-buf fd | |
1317 | * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write | |
1318 | * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you | |
1319 | * want (with the new data being consumed by say the GPU or the scanout | |
1320 | * device) | |
1321 | * - munmap once you don't need the buffer any more | |
1322 | * | |
1323 | * For correctness and optimal performance, it is always required to use | |
1324 | * SYNC_START and SYNC_END before and after, respectively, when accessing the | |
1325 | * mapped address. Userspace cannot rely on coherent access, even when there | |
1326 | * are systems where it just works without calling these ioctls. | |
1327 | * | |
1328 | * - And as a CPU fallback in userspace processing pipelines. | |
1329 | * | |
1330 | * Similar to the motivation for kernel cpu access it is again important that | |
1331 | * the userspace code of a given importing subsystem can use the same | |
1332 | * interfaces with a imported dma-buf buffer object as with a native buffer | |
1333 | * object. This is especially important for drm where the userspace part of | |
1334 | * contemporary OpenGL, X, and other drivers is huge, and reworking them to | |
1335 | * use a different way to mmap a buffer rather invasive. | |
1336 | * | |
1337 | * The assumption in the current dma-buf interfaces is that redirecting the | |
1338 | * initial mmap is all that's needed. A survey of some of the existing | |
1339 | * subsystems shows that no driver seems to do any nefarious thing like | |
1340 | * syncing up with outstanding asynchronous processing on the device or | |
1341 | * allocating special resources at fault time. So hopefully this is good | |
1342 | * enough, since adding interfaces to intercept pagefaults and allow pte | |
1343 | * shootdowns would increase the complexity quite a bit. | |
1344 | * | |
1345 | * Interface:: | |
85804b70 | 1346 | * |
0959a168 DV |
1347 | * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, |
1348 | * unsigned long); | |
1349 | * | |
1350 | * If the importing subsystem simply provides a special-purpose mmap call to | |
85804b70 | 1351 | * set up a mapping in userspace, calling do_mmap with &dma_buf.file will |
0959a168 DV |
1352 | * equally achieve that for a dma-buf object. |
1353 | */ | |
1354 | ||
ae4e46b1 CW |
1355 | static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
1356 | enum dma_data_direction direction) | |
1357 | { | |
1358 | bool write = (direction == DMA_BIDIRECTIONAL || | |
1359 | direction == DMA_TO_DEVICE); | |
52791eee | 1360 | struct dma_resv *resv = dmabuf->resv; |
ae4e46b1 CW |
1361 | long ret; |
1362 | ||
1363 | /* Wait on any implicit rendering fences */ | |
7bc80a54 CK |
1364 | ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), |
1365 | true, MAX_SCHEDULE_TIMEOUT); | |
ae4e46b1 CW |
1366 | if (ret < 0) |
1367 | return ret; | |
1368 | ||
1369 | return 0; | |
1370 | } | |
fc13020e DV |
1371 | |
1372 | /** | |
1373 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the | |
1374 | * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific | |
1375 | * preparations. Coherency is only guaranteed in the specified range for the | |
1376 | * specified access direction. | |
efb4df82 | 1377 | * @dmabuf: [in] buffer to prepare cpu access for. |
b56ffa58 | 1378 | * @direction: [in] direction of access. |
fc13020e | 1379 | * |
0959a168 | 1380 | * After the cpu access is complete the caller should call |
b56ffa58 | 1381 | * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is |
0959a168 DV |
1382 | * it guaranteed to be coherent with other DMA access. |
1383 | * | |
de9114ec DV |
1384 | * This function will also wait for any DMA transactions tracked through |
1385 | * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit | |
1386 | * synchronization this function will only ensure cache coherency, callers must | |
1387 | * ensure synchronization with such DMA transactions on their own. | |
1388 | * | |
fc13020e DV |
1389 | * Can return negative error values, returns 0 on success. |
1390 | */ | |
831e9da7 | 1391 | int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
fc13020e DV |
1392 | enum dma_data_direction direction) |
1393 | { | |
1394 | int ret = 0; | |
1395 | ||
1396 | if (WARN_ON(!dmabuf)) | |
1397 | return -EINVAL; | |
1398 | ||
8ccf0a29 DV |
1399 | might_lock(&dmabuf->resv->lock.base); |
1400 | ||
fc13020e | 1401 | if (dmabuf->ops->begin_cpu_access) |
831e9da7 | 1402 | ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); |
fc13020e | 1403 | |
ae4e46b1 CW |
1404 | /* Ensure that all fences are waited upon - but we first allow |
1405 | * the native handler the chance to do so more efficiently if it | |
1406 | * chooses. A double invocation here will be reasonably cheap no-op. | |
1407 | */ | |
1408 | if (ret == 0) | |
1409 | ret = __dma_buf_begin_cpu_access(dmabuf, direction); | |
1410 | ||
fc13020e DV |
1411 | return ret; |
1412 | } | |
16b0314a | 1413 | EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); |
fc13020e DV |
1414 | |
1415 | /** | |
1416 | * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the | |
1417 | * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific | |
1418 | * actions. Coherency is only guaranteed in the specified range for the | |
1419 | * specified access direction. | |
efb4df82 | 1420 | * @dmabuf: [in] buffer to complete cpu access for. |
b56ffa58 | 1421 | * @direction: [in] direction of access. |
fc13020e | 1422 | * |
0959a168 DV |
1423 | * This terminates CPU access started with dma_buf_begin_cpu_access(). |
1424 | * | |
87e332d5 | 1425 | * Can return negative error values, returns 0 on success. |
fc13020e | 1426 | */ |
18b862dc CW |
1427 | int dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
1428 | enum dma_data_direction direction) | |
fc13020e | 1429 | { |
18b862dc CW |
1430 | int ret = 0; |
1431 | ||
fc13020e DV |
1432 | WARN_ON(!dmabuf); |
1433 | ||
8ccf0a29 DV |
1434 | might_lock(&dmabuf->resv->lock.base); |
1435 | ||
fc13020e | 1436 | if (dmabuf->ops->end_cpu_access) |
18b862dc CW |
1437 | ret = dmabuf->ops->end_cpu_access(dmabuf, direction); |
1438 | ||
1439 | return ret; | |
fc13020e | 1440 | } |
16b0314a | 1441 | EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); |
fc13020e | 1442 | |
4c78513e DV |
1443 | |
1444 | /** | |
1445 | * dma_buf_mmap - Setup up a userspace mmap with the given vma | |
12c4727e | 1446 | * @dmabuf: [in] buffer that should back the vma |
4c78513e DV |
1447 | * @vma: [in] vma for the mmap |
1448 | * @pgoff: [in] offset in pages where this mmap should start within the | |
5136629d | 1449 | * dma-buf buffer. |
4c78513e DV |
1450 | * |
1451 | * This function adjusts the passed in vma so that it points at the file of the | |
ecf1dbac | 1452 | * dma_buf operation. It also adjusts the starting pgoff and does bounds |
4c78513e DV |
1453 | * checking on the size of the vma. Then it calls the exporters mmap function to |
1454 | * set up the mapping. | |
1455 | * | |
1456 | * Can return negative error values, returns 0 on success. | |
1457 | */ | |
1458 | int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, | |
1459 | unsigned long pgoff) | |
1460 | { | |
1461 | if (WARN_ON(!dmabuf || !vma)) | |
1462 | return -EINVAL; | |
1463 | ||
e3a9d6c5 AD |
1464 | /* check if buffer supports mmap */ |
1465 | if (!dmabuf->ops->mmap) | |
1466 | return -EINVAL; | |
1467 | ||
4c78513e | 1468 | /* check for offset overflow */ |
b02da6f8 | 1469 | if (pgoff + vma_pages(vma) < pgoff) |
4c78513e DV |
1470 | return -EOVERFLOW; |
1471 | ||
1472 | /* check for overflowing the buffer's size */ | |
b02da6f8 | 1473 | if (pgoff + vma_pages(vma) > |
4c78513e DV |
1474 | dmabuf->size >> PAGE_SHIFT) |
1475 | return -EINVAL; | |
1476 | ||
1477 | /* readjust the vma */ | |
295992fb | 1478 | vma_set_file(vma, dmabuf->file); |
4c78513e DV |
1479 | vma->vm_pgoff = pgoff; |
1480 | ||
8021fa16 | 1481 | return dmabuf->ops->mmap(dmabuf, vma); |
4c78513e | 1482 | } |
16b0314a | 1483 | EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); |
98f86c9e DA |
1484 | |
1485 | /** | |
12c4727e SS |
1486 | * dma_buf_vmap - Create virtual mapping for the buffer object into kernel |
1487 | * address space. Same restrictions as for vmap and friends apply. | |
1488 | * @dmabuf: [in] buffer to vmap | |
6619ccf1 | 1489 | * @map: [out] returns the vmap pointer |
98f86c9e DA |
1490 | * |
1491 | * This call may fail due to lack of virtual mapping address space. | |
1492 | * These calls are optional in drivers. The intended use for them | |
1493 | * is for mapping objects linear in kernel space for high use objects. | |
de9114ec DV |
1494 | * |
1495 | * To ensure coherency users must call dma_buf_begin_cpu_access() and | |
1496 | * dma_buf_end_cpu_access() around any cpu access performed through this | |
1497 | * mapping. | |
fee0c54e | 1498 | * |
6619ccf1 | 1499 | * Returns 0 on success, or a negative errno code otherwise. |
98f86c9e | 1500 | */ |
7938f421 | 1501 | int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) |
98f86c9e | 1502 | { |
7938f421 | 1503 | struct iosys_map ptr; |
28743e25 | 1504 | int ret; |
6619ccf1 | 1505 | |
7938f421 | 1506 | iosys_map_clear(map); |
f00b4dad | 1507 | |
98f86c9e | 1508 | if (WARN_ON(!dmabuf)) |
6619ccf1 | 1509 | return -EINVAL; |
98f86c9e | 1510 | |
34c7797f DO |
1511 | dma_resv_assert_held(dmabuf->resv); |
1512 | ||
f00b4dad | 1513 | if (!dmabuf->ops->vmap) |
6619ccf1 | 1514 | return -EINVAL; |
f00b4dad | 1515 | |
f00b4dad DV |
1516 | if (dmabuf->vmapping_counter) { |
1517 | dmabuf->vmapping_counter++; | |
7938f421 | 1518 | BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); |
6619ccf1 | 1519 | *map = dmabuf->vmap_ptr; |
28743e25 | 1520 | return 0; |
f00b4dad DV |
1521 | } |
1522 | ||
7938f421 | 1523 | BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); |
f00b4dad | 1524 | |
6619ccf1 TZ |
1525 | ret = dmabuf->ops->vmap(dmabuf, &ptr); |
1526 | if (WARN_ON_ONCE(ret)) | |
28743e25 | 1527 | return ret; |
f00b4dad | 1528 | |
6619ccf1 | 1529 | dmabuf->vmap_ptr = ptr; |
f00b4dad DV |
1530 | dmabuf->vmapping_counter = 1; |
1531 | ||
6619ccf1 TZ |
1532 | *map = dmabuf->vmap_ptr; |
1533 | ||
28743e25 | 1534 | return 0; |
98f86c9e | 1535 | } |
16b0314a | 1536 | EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); |
98f86c9e | 1537 | |
56e5abba DO |
1538 | /** |
1539 | * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel | |
1540 | * address space. Same restrictions as for vmap and friends apply. | |
1541 | * @dmabuf: [in] buffer to vmap | |
1542 | * @map: [out] returns the vmap pointer | |
1543 | * | |
1544 | * Unlocked version of dma_buf_vmap() | |
1545 | * | |
1546 | * Returns 0 on success, or a negative errno code otherwise. | |
1547 | */ | |
1548 | int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) | |
1549 | { | |
1550 | int ret; | |
1551 | ||
1552 | iosys_map_clear(map); | |
1553 | ||
1554 | if (WARN_ON(!dmabuf)) | |
1555 | return -EINVAL; | |
1556 | ||
1557 | dma_resv_lock(dmabuf->resv, NULL); | |
1558 | ret = dma_buf_vmap(dmabuf, map); | |
1559 | dma_resv_unlock(dmabuf->resv); | |
1560 | ||
1561 | return ret; | |
1562 | } | |
1563 | EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF); | |
1564 | ||
98f86c9e DA |
1565 | /** |
1566 | * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. | |
12c4727e | 1567 | * @dmabuf: [in] buffer to vunmap |
20e76f1a | 1568 | * @map: [in] vmap pointer to vunmap |
98f86c9e | 1569 | */ |
7938f421 | 1570 | void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) |
98f86c9e DA |
1571 | { |
1572 | if (WARN_ON(!dmabuf)) | |
1573 | return; | |
1574 | ||
34c7797f DO |
1575 | dma_resv_assert_held(dmabuf->resv); |
1576 | ||
7938f421 | 1577 | BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); |
f00b4dad | 1578 | BUG_ON(dmabuf->vmapping_counter == 0); |
7938f421 | 1579 | BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); |
f00b4dad | 1580 | |
f00b4dad DV |
1581 | if (--dmabuf->vmapping_counter == 0) { |
1582 | if (dmabuf->ops->vunmap) | |
20e76f1a | 1583 | dmabuf->ops->vunmap(dmabuf, map); |
7938f421 | 1584 | iosys_map_clear(&dmabuf->vmap_ptr); |
f00b4dad | 1585 | } |
98f86c9e | 1586 | } |
16b0314a | 1587 | EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); |
b89e3563 | 1588 | |
56e5abba DO |
1589 | /** |
1590 | * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. | |
1591 | * @dmabuf: [in] buffer to vunmap | |
1592 | * @map: [in] vmap pointer to vunmap | |
1593 | */ | |
1594 | void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) | |
1595 | { | |
1596 | if (WARN_ON(!dmabuf)) | |
1597 | return; | |
1598 | ||
1599 | dma_resv_lock(dmabuf->resv, NULL); | |
1600 | dma_buf_vunmap(dmabuf, map); | |
1601 | dma_resv_unlock(dmabuf->resv); | |
1602 | } | |
1603 | EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF); | |
1604 | ||
b89e3563 | 1605 | #ifdef CONFIG_DEBUG_FS |
eb0b947e | 1606 | static int dma_buf_debug_show(struct seq_file *s, void *unused) |
b89e3563 | 1607 | { |
b89e3563 SS |
1608 | struct dma_buf *buf_obj; |
1609 | struct dma_buf_attachment *attach_obj; | |
63639d01 | 1610 | int count = 0, attach_count; |
b89e3563 | 1611 | size_t size = 0; |
680753dd | 1612 | int ret; |
b89e3563 SS |
1613 | |
1614 | ret = mutex_lock_interruptible(&db_list.lock); | |
1615 | ||
1616 | if (ret) | |
1617 | return ret; | |
1618 | ||
c0b00a52 | 1619 | seq_puts(s, "\nDma-buf Objects:\n"); |
6c01aa13 | 1620 | seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", |
ed63bb1d | 1621 | "size", "flags", "mode", "count", "ino"); |
b89e3563 SS |
1622 | |
1623 | list_for_each_entry(buf_obj, &db_list.head, list_node) { | |
15fd552d | 1624 | |
15fd552d CK |
1625 | ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); |
1626 | if (ret) | |
f45f57cc | 1627 | goto error_unlock; |
b89e3563 | 1628 | |
8c0fd126 GC |
1629 | |
1630 | spin_lock(&buf_obj->name_lock); | |
bb2bb903 | 1631 | seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", |
c0b00a52 | 1632 | buf_obj->size, |
b89e3563 | 1633 | buf_obj->file->f_flags, buf_obj->file->f_mode, |
a1f6dbac | 1634 | file_count(buf_obj->file), |
ed63bb1d | 1635 | buf_obj->exp_name, |
bb2bb903 | 1636 | file_inode(buf_obj->file)->i_ino, |
6c01aa13 | 1637 | buf_obj->name ?: "<none>"); |
8c0fd126 | 1638 | spin_unlock(&buf_obj->name_lock); |
b89e3563 | 1639 | |
a25efb38 | 1640 | dma_resv_describe(buf_obj->resv, s); |
5eb2c72c | 1641 | |
c0b00a52 | 1642 | seq_puts(s, "\tAttached Devices:\n"); |
b89e3563 SS |
1643 | attach_count = 0; |
1644 | ||
1645 | list_for_each_entry(attach_obj, &buf_obj->attachments, node) { | |
9eddb41d | 1646 | seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); |
b89e3563 SS |
1647 | attach_count++; |
1648 | } | |
15fd552d | 1649 | dma_resv_unlock(buf_obj->resv); |
b89e3563 | 1650 | |
c0b00a52 | 1651 | seq_printf(s, "Total %d devices attached\n\n", |
b89e3563 SS |
1652 | attach_count); |
1653 | ||
1654 | count++; | |
1655 | size += buf_obj->size; | |
b89e3563 SS |
1656 | } |
1657 | ||
1658 | seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); | |
1659 | ||
1660 | mutex_unlock(&db_list.lock); | |
1661 | return 0; | |
15fd552d | 1662 | |
f45f57cc | 1663 | error_unlock: |
15fd552d CK |
1664 | mutex_unlock(&db_list.lock); |
1665 | return ret; | |
b89e3563 SS |
1666 | } |
1667 | ||
2674305a | 1668 | DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); |
b89e3563 SS |
1669 | |
1670 | static struct dentry *dma_buf_debugfs_dir; | |
1671 | ||
1672 | static int dma_buf_init_debugfs(void) | |
1673 | { | |
bd3e2208 | 1674 | struct dentry *d; |
b89e3563 | 1675 | int err = 0; |
5136629d | 1676 | |
bd3e2208 MK |
1677 | d = debugfs_create_dir("dma_buf", NULL); |
1678 | if (IS_ERR(d)) | |
1679 | return PTR_ERR(d); | |
5136629d | 1680 | |
bd3e2208 | 1681 | dma_buf_debugfs_dir = d; |
b89e3563 | 1682 | |
bd3e2208 MK |
1683 | d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, |
1684 | NULL, &dma_buf_debug_fops); | |
1685 | if (IS_ERR(d)) { | |
b89e3563 | 1686 | pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); |
b7479990 MK |
1687 | debugfs_remove_recursive(dma_buf_debugfs_dir); |
1688 | dma_buf_debugfs_dir = NULL; | |
bd3e2208 | 1689 | err = PTR_ERR(d); |
b7479990 | 1690 | } |
b89e3563 SS |
1691 | |
1692 | return err; | |
1693 | } | |
1694 | ||
1695 | static void dma_buf_uninit_debugfs(void) | |
1696 | { | |
298b6a81 | 1697 | debugfs_remove_recursive(dma_buf_debugfs_dir); |
b89e3563 | 1698 | } |
b89e3563 SS |
1699 | #else |
1700 | static inline int dma_buf_init_debugfs(void) | |
1701 | { | |
1702 | return 0; | |
1703 | } | |
1704 | static inline void dma_buf_uninit_debugfs(void) | |
1705 | { | |
1706 | } | |
1707 | #endif | |
1708 | ||
1709 | static int __init dma_buf_init(void) | |
1710 | { | |
bdb8d06d HV |
1711 | int ret; |
1712 | ||
1713 | ret = dma_buf_init_sysfs_statistics(); | |
1714 | if (ret) | |
1715 | return ret; | |
1716 | ||
ed63bb1d GH |
1717 | dma_buf_mnt = kern_mount(&dma_buf_fs_type); |
1718 | if (IS_ERR(dma_buf_mnt)) | |
1719 | return PTR_ERR(dma_buf_mnt); | |
1720 | ||
b89e3563 SS |
1721 | mutex_init(&db_list.lock); |
1722 | INIT_LIST_HEAD(&db_list.head); | |
1723 | dma_buf_init_debugfs(); | |
1724 | return 0; | |
1725 | } | |
1726 | subsys_initcall(dma_buf_init); | |
1727 | ||
1728 | static void __exit dma_buf_deinit(void) | |
1729 | { | |
1730 | dma_buf_uninit_debugfs(); | |
ed63bb1d | 1731 | kern_unmount(dma_buf_mnt); |
bdb8d06d | 1732 | dma_buf_uninit_sysfs_statistics(); |
b89e3563 SS |
1733 | } |
1734 | __exitcall(dma_buf_deinit); |