3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
42 #include "compat_ion.h"
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
54 struct miscdevice dev;
55 struct rb_root buffers;
56 struct mutex buffer_lock;
57 struct rw_semaphore lock;
58 struct plist_head heaps;
59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
61 struct rb_root clients;
62 struct dentry *debug_root;
63 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
85 struct ion_device *dev;
86 struct rb_root handles;
92 struct task_struct *task;
94 struct dentry *debug_root;
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
104 * @id: client-unique id allocated by client->idr
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
111 struct ion_client *client;
112 struct ion_buffer *buffer;
114 unsigned int kmap_cnt;
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
120 return (buffer->flags & ION_FLAG_CACHED) &&
121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
124 bool ion_buffer_cached(struct ion_buffer *buffer)
126 return !!(buffer->flags & ION_FLAG_CACHED);
129 static inline struct page *ion_buffer_page(struct page *page)
131 return (struct page *)((unsigned long)page & ~(1UL));
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
136 return !!((unsigned long)page & 1UL);
139 static inline void ion_buffer_page_dirty(struct page **page)
141 *page = (struct page *)((unsigned long)(*page) | 1UL);
144 static inline void ion_buffer_page_clean(struct page **page)
146 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151 struct ion_buffer *buffer)
153 struct rb_node **p = &dev->buffers.rb_node;
154 struct rb_node *parent = NULL;
155 struct ion_buffer *entry;
159 entry = rb_entry(parent, struct ion_buffer, node);
161 if (buffer < entry) {
163 } else if (buffer > entry) {
166 pr_err("%s: buffer already found.", __func__);
171 rb_link_node(&buffer->node, parent, p);
172 rb_insert_color(&buffer->node, &dev->buffers);
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177 struct ion_device *dev,
182 struct ion_buffer *buffer;
183 struct sg_table *table;
184 struct scatterlist *sg;
187 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
189 return ERR_PTR(-ENOMEM);
192 buffer->flags = flags;
193 kref_init(&buffer->ref);
195 ret = heap->ops->allocate(heap, buffer, len, align, flags);
198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
201 ion_heap_freelist_drain(heap, 0);
202 ret = heap->ops->allocate(heap, buffer, len, align,
211 table = heap->ops->map_dma(heap, buffer);
212 if (WARN_ONCE(table == NULL,
213 "heap->ops->map_dma should return ERR_PTR on error"))
214 table = ERR_PTR(-EINVAL);
220 buffer->sg_table = table;
221 if (ion_buffer_fault_user_mappings(buffer)) {
222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223 struct scatterlist *sg;
226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227 if (!buffer->pages) {
232 for_each_sg(table->sgl, sg, table->nents, i) {
233 struct page *page = sg_page(sg);
235 for (j = 0; j < sg->length / PAGE_SIZE; j++)
236 buffer->pages[k++] = page++;
242 INIT_LIST_HEAD(&buffer->vmas);
243 mutex_init(&buffer->lock);
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
254 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
255 sg_dma_address(sg) = sg_phys(sg);
256 sg_dma_len(sg) = sg->length;
258 mutex_lock(&dev->buffer_lock);
259 ion_buffer_add(dev, buffer);
260 mutex_unlock(&dev->buffer_lock);
264 heap->ops->unmap_dma(heap, buffer);
266 heap->ops->free(buffer);
272 void ion_buffer_destroy(struct ion_buffer *buffer)
274 if (WARN_ON(buffer->kmap_cnt > 0))
275 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277 buffer->heap->ops->free(buffer);
278 vfree(buffer->pages);
282 static void _ion_buffer_destroy(struct kref *kref)
284 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
285 struct ion_heap *heap = buffer->heap;
286 struct ion_device *dev = buffer->dev;
288 mutex_lock(&dev->buffer_lock);
289 rb_erase(&buffer->node, &dev->buffers);
290 mutex_unlock(&dev->buffer_lock);
292 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
293 ion_heap_freelist_add(heap, buffer);
295 ion_buffer_destroy(buffer);
298 static void ion_buffer_get(struct ion_buffer *buffer)
300 kref_get(&buffer->ref);
303 static int ion_buffer_put(struct ion_buffer *buffer)
305 return kref_put(&buffer->ref, _ion_buffer_destroy);
308 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
310 mutex_lock(&buffer->lock);
311 buffer->handle_count++;
312 mutex_unlock(&buffer->lock);
315 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
318 * when a buffer is removed from a handle, if it is not in
319 * any other handles, copy the taskcomm and the pid of the
320 * process it's being removed from into the buffer. At this
321 * point there will be no way to track what processes this buffer is
322 * being used by, it only exists as a dma_buf file descriptor.
323 * The taskcomm and pid can provide a debug hint as to where this fd
326 mutex_lock(&buffer->lock);
327 buffer->handle_count--;
328 BUG_ON(buffer->handle_count < 0);
329 if (!buffer->handle_count) {
330 struct task_struct *task;
332 task = current->group_leader;
333 get_task_comm(buffer->task_comm, task);
334 buffer->pid = task_pid_nr(task);
336 mutex_unlock(&buffer->lock);
339 static struct ion_handle *ion_handle_create(struct ion_client *client,
340 struct ion_buffer *buffer)
342 struct ion_handle *handle;
344 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
346 return ERR_PTR(-ENOMEM);
347 kref_init(&handle->ref);
348 RB_CLEAR_NODE(&handle->node);
349 handle->client = client;
350 ion_buffer_get(buffer);
351 ion_buffer_add_to_handle(buffer);
352 handle->buffer = buffer;
357 static void ion_handle_kmap_put(struct ion_handle *);
359 static void ion_handle_destroy(struct kref *kref)
361 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
362 struct ion_client *client = handle->client;
363 struct ion_buffer *buffer = handle->buffer;
365 mutex_lock(&buffer->lock);
366 while (handle->kmap_cnt)
367 ion_handle_kmap_put(handle);
368 mutex_unlock(&buffer->lock);
370 idr_remove(&client->idr, handle->id);
371 if (!RB_EMPTY_NODE(&handle->node))
372 rb_erase(&handle->node, &client->handles);
374 ion_buffer_remove_from_handle(buffer);
375 ion_buffer_put(buffer);
380 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
382 return handle->buffer;
385 static void ion_handle_get(struct ion_handle *handle)
387 kref_get(&handle->ref);
390 static int ion_handle_put_nolock(struct ion_handle *handle)
394 ret = kref_put(&handle->ref, ion_handle_destroy);
399 int ion_handle_put(struct ion_handle *handle)
401 struct ion_client *client = handle->client;
404 mutex_lock(&client->lock);
405 ret = ion_handle_put_nolock(handle);
406 mutex_unlock(&client->lock);
411 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
412 struct ion_buffer *buffer)
414 struct rb_node *n = client->handles.rb_node;
417 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
419 if (buffer < entry->buffer)
421 else if (buffer > entry->buffer)
426 return ERR_PTR(-EINVAL);
429 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
432 struct ion_handle *handle;
434 handle = idr_find(&client->idr, id);
436 ion_handle_get(handle);
438 return handle ? handle : ERR_PTR(-EINVAL);
441 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
444 struct ion_handle *handle;
446 mutex_lock(&client->lock);
447 handle = ion_handle_get_by_id_nolock(client, id);
448 mutex_unlock(&client->lock);
453 static bool ion_handle_validate(struct ion_client *client,
454 struct ion_handle *handle)
456 WARN_ON(!mutex_is_locked(&client->lock));
457 return idr_find(&client->idr, handle->id) == handle;
460 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
463 struct rb_node **p = &client->handles.rb_node;
464 struct rb_node *parent = NULL;
465 struct ion_handle *entry;
467 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
475 entry = rb_entry(parent, struct ion_handle, node);
477 if (handle->buffer < entry->buffer)
479 else if (handle->buffer > entry->buffer)
482 WARN(1, "%s: buffer already found.", __func__);
485 rb_link_node(&handle->node, parent, p);
486 rb_insert_color(&handle->node, &client->handles);
491 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
492 size_t align, unsigned int heap_id_mask,
495 struct ion_handle *handle;
496 struct ion_device *dev = client->dev;
497 struct ion_buffer *buffer = NULL;
498 struct ion_heap *heap;
501 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
502 len, align, heap_id_mask, flags);
504 * traverse the list of heaps available in this system in priority
505 * order. If the heap type is supported by the client, and matches the
506 * request of the caller allocate from it. Repeat until allocate has
507 * succeeded or all heaps have been tried
509 len = PAGE_ALIGN(len);
512 return ERR_PTR(-EINVAL);
514 down_read(&dev->lock);
515 plist_for_each_entry(heap, &dev->heaps, node) {
516 /* if the caller didn't specify this heap id */
517 if (!((1 << heap->id) & heap_id_mask))
519 buffer = ion_buffer_create(heap, dev, len, align, flags);
526 return ERR_PTR(-ENODEV);
529 return ERR_CAST(buffer);
531 handle = ion_handle_create(client, buffer);
534 * ion_buffer_create will create a buffer with a ref_cnt of 1,
535 * and ion_handle_create will take a second reference, drop one here
537 ion_buffer_put(buffer);
542 mutex_lock(&client->lock);
543 ret = ion_handle_add(client, handle);
544 mutex_unlock(&client->lock);
546 ion_handle_put(handle);
547 handle = ERR_PTR(ret);
552 EXPORT_SYMBOL(ion_alloc);
554 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
558 BUG_ON(client != handle->client);
560 valid_handle = ion_handle_validate(client, handle);
563 WARN(1, "%s: invalid handle passed to free.\n", __func__);
566 ion_handle_put_nolock(handle);
569 void ion_free(struct ion_client *client, struct ion_handle *handle)
571 BUG_ON(client != handle->client);
573 mutex_lock(&client->lock);
574 ion_free_nolock(client, handle);
575 mutex_unlock(&client->lock);
577 EXPORT_SYMBOL(ion_free);
579 int ion_phys(struct ion_client *client, struct ion_handle *handle,
580 ion_phys_addr_t *addr, size_t *len)
582 struct ion_buffer *buffer;
585 mutex_lock(&client->lock);
586 if (!ion_handle_validate(client, handle)) {
587 mutex_unlock(&client->lock);
591 buffer = handle->buffer;
593 if (!buffer->heap->ops->phys) {
594 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
595 __func__, buffer->heap->name, buffer->heap->type);
596 mutex_unlock(&client->lock);
599 mutex_unlock(&client->lock);
600 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
603 EXPORT_SYMBOL(ion_phys);
605 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
609 if (buffer->kmap_cnt) {
611 return buffer->vaddr;
613 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
614 if (WARN_ONCE(vaddr == NULL,
615 "heap->ops->map_kernel should return ERR_PTR on error"))
616 return ERR_PTR(-EINVAL);
619 buffer->vaddr = vaddr;
624 static void *ion_handle_kmap_get(struct ion_handle *handle)
626 struct ion_buffer *buffer = handle->buffer;
629 if (handle->kmap_cnt) {
631 return buffer->vaddr;
633 vaddr = ion_buffer_kmap_get(buffer);
640 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
643 if (!buffer->kmap_cnt) {
644 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
645 buffer->vaddr = NULL;
649 static void ion_handle_kmap_put(struct ion_handle *handle)
651 struct ion_buffer *buffer = handle->buffer;
653 if (!handle->kmap_cnt) {
654 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
658 if (!handle->kmap_cnt)
659 ion_buffer_kmap_put(buffer);
662 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
664 struct ion_buffer *buffer;
667 mutex_lock(&client->lock);
668 if (!ion_handle_validate(client, handle)) {
669 pr_err("%s: invalid handle passed to map_kernel.\n",
671 mutex_unlock(&client->lock);
672 return ERR_PTR(-EINVAL);
675 buffer = handle->buffer;
677 if (!handle->buffer->heap->ops->map_kernel) {
678 pr_err("%s: map_kernel is not implemented by this heap.\n",
680 mutex_unlock(&client->lock);
681 return ERR_PTR(-ENODEV);
684 mutex_lock(&buffer->lock);
685 vaddr = ion_handle_kmap_get(handle);
686 mutex_unlock(&buffer->lock);
687 mutex_unlock(&client->lock);
690 EXPORT_SYMBOL(ion_map_kernel);
692 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
694 struct ion_buffer *buffer;
696 mutex_lock(&client->lock);
697 buffer = handle->buffer;
698 mutex_lock(&buffer->lock);
699 ion_handle_kmap_put(handle);
700 mutex_unlock(&buffer->lock);
701 mutex_unlock(&client->lock);
703 EXPORT_SYMBOL(ion_unmap_kernel);
705 static struct mutex debugfs_mutex;
706 static struct rb_root *ion_root_client;
707 static int is_client_alive(struct ion_client *client)
709 struct rb_node *node;
710 struct ion_client *tmp;
711 struct ion_device *dev;
713 node = ion_root_client->rb_node;
714 dev = container_of(ion_root_client, struct ion_device, clients);
716 down_read(&dev->lock);
718 tmp = rb_entry(node, struct ion_client, node);
720 node = node->rb_left;
721 } else if (client > tmp) {
722 node = node->rb_right;
733 static int ion_debug_client_show(struct seq_file *s, void *unused)
735 struct ion_client *client = s->private;
737 size_t sizes[ION_NUM_HEAP_IDS] = {0};
738 const char *names[ION_NUM_HEAP_IDS] = {NULL};
741 mutex_lock(&debugfs_mutex);
742 if (!is_client_alive(client)) {
743 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
745 mutex_unlock(&debugfs_mutex);
749 mutex_lock(&client->lock);
750 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
751 struct ion_handle *handle = rb_entry(n, struct ion_handle,
753 unsigned int id = handle->buffer->heap->id;
756 names[id] = handle->buffer->heap->name;
757 sizes[id] += handle->buffer->size;
759 mutex_unlock(&client->lock);
760 mutex_unlock(&debugfs_mutex);
762 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
763 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
766 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
771 static int ion_debug_client_open(struct inode *inode, struct file *file)
773 return single_open(file, ion_debug_client_show, inode->i_private);
776 static const struct file_operations debug_client_fops = {
777 .open = ion_debug_client_open,
780 .release = single_release,
783 static int ion_get_client_serial(const struct rb_root *root,
784 const unsigned char *name)
787 struct rb_node *node;
789 for (node = rb_first(root); node; node = rb_next(node)) {
790 struct ion_client *client = rb_entry(node, struct ion_client,
793 if (strcmp(client->name, name))
795 serial = max(serial, client->display_serial);
800 struct ion_client *ion_client_create(struct ion_device *dev,
803 struct ion_client *client;
804 struct task_struct *task;
806 struct rb_node *parent = NULL;
807 struct ion_client *entry;
811 pr_err("%s: Name cannot be null\n", __func__);
812 return ERR_PTR(-EINVAL);
815 get_task_struct(current->group_leader);
816 task_lock(current->group_leader);
817 pid = task_pid_nr(current->group_leader);
819 * don't bother to store task struct for kernel threads,
820 * they can't be killed anyway
822 if (current->group_leader->flags & PF_KTHREAD) {
823 put_task_struct(current->group_leader);
826 task = current->group_leader;
828 task_unlock(current->group_leader);
830 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
832 goto err_put_task_struct;
835 client->handles = RB_ROOT;
836 idr_init(&client->idr);
837 mutex_init(&client->lock);
840 client->name = kstrdup(name, GFP_KERNEL);
842 goto err_free_client;
844 down_write(&dev->lock);
845 client->display_serial = ion_get_client_serial(&dev->clients, name);
846 client->display_name = kasprintf(
847 GFP_KERNEL, "%s-%d", name, client->display_serial);
848 if (!client->display_name) {
849 up_write(&dev->lock);
850 goto err_free_client_name;
852 p = &dev->clients.rb_node;
855 entry = rb_entry(parent, struct ion_client, node);
859 else if (client > entry)
862 rb_link_node(&client->node, parent, p);
863 rb_insert_color(&client->node, &dev->clients);
865 client->debug_root = debugfs_create_file(client->display_name, 0664,
866 dev->clients_debug_root,
867 client, &debug_client_fops);
868 if (!client->debug_root) {
869 char buf[256], *path;
871 path = dentry_path(dev->clients_debug_root, buf, 256);
872 pr_err("Failed to create client debugfs at %s/%s\n",
873 path, client->display_name);
876 up_write(&dev->lock);
880 err_free_client_name:
886 put_task_struct(current->group_leader);
887 return ERR_PTR(-ENOMEM);
889 EXPORT_SYMBOL(ion_client_create);
891 void ion_client_destroy(struct ion_client *client)
893 struct ion_device *dev = client->dev;
896 pr_debug("%s: %d\n", __func__, __LINE__);
897 mutex_lock(&debugfs_mutex);
898 while ((n = rb_first(&client->handles))) {
899 struct ion_handle *handle = rb_entry(n, struct ion_handle,
901 ion_handle_destroy(&handle->ref);
904 idr_destroy(&client->idr);
906 down_write(&dev->lock);
908 put_task_struct(client->task);
909 rb_erase(&client->node, &dev->clients);
910 debugfs_remove_recursive(client->debug_root);
911 up_write(&dev->lock);
913 kfree(client->display_name);
916 mutex_unlock(&debugfs_mutex);
918 EXPORT_SYMBOL(ion_client_destroy);
920 struct sg_table *ion_sg_table(struct ion_client *client,
921 struct ion_handle *handle)
923 struct ion_buffer *buffer;
924 struct sg_table *table;
926 mutex_lock(&client->lock);
927 if (!ion_handle_validate(client, handle)) {
928 pr_err("%s: invalid handle passed to map_dma.\n",
930 mutex_unlock(&client->lock);
931 return ERR_PTR(-EINVAL);
933 buffer = handle->buffer;
934 table = buffer->sg_table;
935 mutex_unlock(&client->lock);
938 EXPORT_SYMBOL(ion_sg_table);
940 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
942 enum dma_data_direction direction);
944 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
945 enum dma_data_direction direction)
947 struct dma_buf *dmabuf = attachment->dmabuf;
948 struct ion_buffer *buffer = dmabuf->priv;
950 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
951 return buffer->sg_table;
954 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
955 struct sg_table *table,
956 enum dma_data_direction direction)
960 void ion_pages_sync_for_device(struct device *dev, struct page *page,
961 size_t size, enum dma_data_direction dir)
963 struct scatterlist sg;
965 sg_init_table(&sg, 1);
966 sg_set_page(&sg, page, size, 0);
968 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
969 * for the targeted device, but this works on the currently targeted
972 sg_dma_address(&sg) = page_to_phys(page);
973 dma_sync_sg_for_device(dev, &sg, 1, dir);
976 struct ion_vma_list {
977 struct list_head list;
978 struct vm_area_struct *vma;
981 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
983 enum dma_data_direction dir)
985 struct ion_vma_list *vma_list;
986 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
989 pr_debug("%s: syncing for device %s\n", __func__,
990 dev ? dev_name(dev) : "null");
992 if (!ion_buffer_fault_user_mappings(buffer))
995 mutex_lock(&buffer->lock);
996 for (i = 0; i < pages; i++) {
997 struct page *page = buffer->pages[i];
999 if (ion_buffer_page_is_dirty(page))
1000 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1003 ion_buffer_page_clean(buffer->pages + i);
1005 list_for_each_entry(vma_list, &buffer->vmas, list) {
1006 struct vm_area_struct *vma = vma_list->vma;
1008 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1011 mutex_unlock(&buffer->lock);
1014 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1016 struct ion_buffer *buffer = vma->vm_private_data;
1020 mutex_lock(&buffer->lock);
1021 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1022 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1024 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1025 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1026 mutex_unlock(&buffer->lock);
1028 return VM_FAULT_ERROR;
1030 return VM_FAULT_NOPAGE;
1033 static void ion_vm_open(struct vm_area_struct *vma)
1035 struct ion_buffer *buffer = vma->vm_private_data;
1036 struct ion_vma_list *vma_list;
1038 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1041 vma_list->vma = vma;
1042 mutex_lock(&buffer->lock);
1043 list_add(&vma_list->list, &buffer->vmas);
1044 mutex_unlock(&buffer->lock);
1045 pr_debug("%s: adding %p\n", __func__, vma);
1048 static void ion_vm_close(struct vm_area_struct *vma)
1050 struct ion_buffer *buffer = vma->vm_private_data;
1051 struct ion_vma_list *vma_list, *tmp;
1053 pr_debug("%s\n", __func__);
1054 mutex_lock(&buffer->lock);
1055 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1056 if (vma_list->vma != vma)
1058 list_del(&vma_list->list);
1060 pr_debug("%s: deleting %p\n", __func__, vma);
1063 mutex_unlock(&buffer->lock);
1066 static const struct vm_operations_struct ion_vma_ops = {
1067 .open = ion_vm_open,
1068 .close = ion_vm_close,
1069 .fault = ion_vm_fault,
1072 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1074 struct ion_buffer *buffer = dmabuf->priv;
1077 if (!buffer->heap->ops->map_user) {
1078 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1083 if (ion_buffer_fault_user_mappings(buffer)) {
1084 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1086 vma->vm_private_data = buffer;
1087 vma->vm_ops = &ion_vma_ops;
1092 if (!(buffer->flags & ION_FLAG_CACHED))
1093 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1095 mutex_lock(&buffer->lock);
1096 /* now map it to userspace */
1097 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1098 mutex_unlock(&buffer->lock);
1101 pr_err("%s: failure mapping buffer to userspace\n",
1107 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1109 struct ion_buffer *buffer = dmabuf->priv;
1111 ion_buffer_put(buffer);
1114 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1116 struct ion_buffer *buffer = dmabuf->priv;
1118 return buffer->vaddr + offset * PAGE_SIZE;
1121 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1126 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1127 enum dma_data_direction direction)
1129 struct ion_buffer *buffer = dmabuf->priv;
1132 if (!buffer->heap->ops->map_kernel) {
1133 pr_err("%s: map kernel is not implemented by this heap.\n",
1138 mutex_lock(&buffer->lock);
1139 vaddr = ion_buffer_kmap_get(buffer);
1140 mutex_unlock(&buffer->lock);
1141 return PTR_ERR_OR_ZERO(vaddr);
1144 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1145 enum dma_data_direction direction)
1147 struct ion_buffer *buffer = dmabuf->priv;
1149 mutex_lock(&buffer->lock);
1150 ion_buffer_kmap_put(buffer);
1151 mutex_unlock(&buffer->lock);
1154 static struct dma_buf_ops dma_buf_ops = {
1155 .map_dma_buf = ion_map_dma_buf,
1156 .unmap_dma_buf = ion_unmap_dma_buf,
1158 .release = ion_dma_buf_release,
1159 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1160 .end_cpu_access = ion_dma_buf_end_cpu_access,
1161 .kmap_atomic = ion_dma_buf_kmap,
1162 .kunmap_atomic = ion_dma_buf_kunmap,
1163 .kmap = ion_dma_buf_kmap,
1164 .kunmap = ion_dma_buf_kunmap,
1167 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1168 struct ion_handle *handle)
1170 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1171 struct ion_buffer *buffer;
1172 struct dma_buf *dmabuf;
1175 mutex_lock(&client->lock);
1176 valid_handle = ion_handle_validate(client, handle);
1177 if (!valid_handle) {
1178 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1179 mutex_unlock(&client->lock);
1180 return ERR_PTR(-EINVAL);
1182 buffer = handle->buffer;
1183 ion_buffer_get(buffer);
1184 mutex_unlock(&client->lock);
1186 exp_info.ops = &dma_buf_ops;
1187 exp_info.size = buffer->size;
1188 exp_info.flags = O_RDWR;
1189 exp_info.priv = buffer;
1191 dmabuf = dma_buf_export(&exp_info);
1192 if (IS_ERR(dmabuf)) {
1193 ion_buffer_put(buffer);
1199 EXPORT_SYMBOL(ion_share_dma_buf);
1201 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1203 struct dma_buf *dmabuf;
1206 dmabuf = ion_share_dma_buf(client, handle);
1208 return PTR_ERR(dmabuf);
1210 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1212 dma_buf_put(dmabuf);
1216 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1218 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1219 struct dma_buf *dmabuf)
1221 struct ion_buffer *buffer;
1222 struct ion_handle *handle;
1225 /* if this memory came from ion */
1227 if (dmabuf->ops != &dma_buf_ops) {
1228 pr_err("%s: can not import dmabuf from another exporter\n",
1230 return ERR_PTR(-EINVAL);
1232 buffer = dmabuf->priv;
1234 mutex_lock(&client->lock);
1235 /* if a handle exists for this buffer just take a reference to it */
1236 handle = ion_handle_lookup(client, buffer);
1237 if (!IS_ERR(handle)) {
1238 ion_handle_get(handle);
1239 mutex_unlock(&client->lock);
1243 handle = ion_handle_create(client, buffer);
1244 if (IS_ERR(handle)) {
1245 mutex_unlock(&client->lock);
1249 ret = ion_handle_add(client, handle);
1250 mutex_unlock(&client->lock);
1252 ion_handle_put(handle);
1253 handle = ERR_PTR(ret);
1259 EXPORT_SYMBOL(ion_import_dma_buf);
1261 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1263 struct dma_buf *dmabuf;
1264 struct ion_handle *handle;
1266 dmabuf = dma_buf_get(fd);
1268 return ERR_CAST(dmabuf);
1270 handle = ion_import_dma_buf(client, dmabuf);
1271 dma_buf_put(dmabuf);
1274 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1276 static int ion_sync_for_device(struct ion_client *client, int fd)
1278 struct dma_buf *dmabuf;
1279 struct ion_buffer *buffer;
1281 dmabuf = dma_buf_get(fd);
1283 return PTR_ERR(dmabuf);
1285 /* if this memory came from ion */
1286 if (dmabuf->ops != &dma_buf_ops) {
1287 pr_err("%s: can not sync dmabuf from another exporter\n",
1289 dma_buf_put(dmabuf);
1292 buffer = dmabuf->priv;
1294 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1295 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1296 dma_buf_put(dmabuf);
1300 /* fix up the cases where the ioctl direction bits are incorrect */
1301 static unsigned int ion_ioctl_dir(unsigned int cmd)
1306 case ION_IOC_CUSTOM:
1309 return _IOC_DIR(cmd);
1313 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1315 struct ion_client *client = filp->private_data;
1316 struct ion_device *dev = client->dev;
1317 struct ion_handle *cleanup_handle = NULL;
1322 struct ion_fd_data fd;
1323 struct ion_allocation_data allocation;
1324 struct ion_handle_data handle;
1325 struct ion_custom_data custom;
1328 dir = ion_ioctl_dir(cmd);
1330 if (_IOC_SIZE(cmd) > sizeof(data))
1333 if (dir & _IOC_WRITE)
1334 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1340 struct ion_handle *handle;
1342 handle = ion_alloc(client, data.allocation.len,
1343 data.allocation.align,
1344 data.allocation.heap_id_mask,
1345 data.allocation.flags);
1347 return PTR_ERR(handle);
1349 data.allocation.handle = handle->id;
1351 cleanup_handle = handle;
1356 struct ion_handle *handle;
1358 mutex_lock(&client->lock);
1359 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1360 if (IS_ERR(handle)) {
1361 mutex_unlock(&client->lock);
1362 return PTR_ERR(handle);
1364 ion_free_nolock(client, handle);
1365 ion_handle_put_nolock(handle);
1366 mutex_unlock(&client->lock);
1372 struct ion_handle *handle;
1374 handle = ion_handle_get_by_id(client, data.handle.handle);
1376 return PTR_ERR(handle);
1377 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1378 ion_handle_put(handle);
1383 case ION_IOC_IMPORT:
1385 struct ion_handle *handle;
1387 handle = ion_import_dma_buf_fd(client, data.fd.fd);
1389 ret = PTR_ERR(handle);
1391 data.handle.handle = handle->id;
1396 ret = ion_sync_for_device(client, data.fd.fd);
1399 case ION_IOC_CUSTOM:
1401 if (!dev->custom_ioctl)
1403 ret = dev->custom_ioctl(client, data.custom.cmd,
1411 if (dir & _IOC_READ) {
1412 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1414 ion_free(client, cleanup_handle);
1421 static int ion_release(struct inode *inode, struct file *file)
1423 struct ion_client *client = file->private_data;
1425 pr_debug("%s: %d\n", __func__, __LINE__);
1426 ion_client_destroy(client);
1430 static int ion_open(struct inode *inode, struct file *file)
1432 struct miscdevice *miscdev = file->private_data;
1433 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1434 struct ion_client *client;
1435 char debug_name[64];
1437 pr_debug("%s: %d\n", __func__, __LINE__);
1438 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1439 client = ion_client_create(dev, debug_name);
1441 return PTR_ERR(client);
1442 file->private_data = client;
1447 static const struct file_operations ion_fops = {
1448 .owner = THIS_MODULE,
1450 .release = ion_release,
1451 .unlocked_ioctl = ion_ioctl,
1452 .compat_ioctl = compat_ion_ioctl,
1455 static size_t ion_debug_heap_total(struct ion_client *client,
1461 mutex_lock(&client->lock);
1462 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1463 struct ion_handle *handle = rb_entry(n,
1466 if (handle->buffer->heap->id == id)
1467 size += handle->buffer->size;
1469 mutex_unlock(&client->lock);
1473 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1475 struct ion_heap *heap = s->private;
1476 struct ion_device *dev = heap->dev;
1478 size_t total_size = 0;
1479 size_t total_orphaned_size = 0;
1481 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1482 seq_puts(s, "----------------------------------------------------\n");
1484 mutex_lock(&debugfs_mutex);
1485 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1486 struct ion_client *client = rb_entry(n, struct ion_client,
1488 size_t size = ion_debug_heap_total(client, heap->id);
1493 char task_comm[TASK_COMM_LEN];
1495 get_task_comm(task_comm, client->task);
1496 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1499 seq_printf(s, "%16s %16u %16zu\n", client->name,
1503 mutex_unlock(&debugfs_mutex);
1505 seq_puts(s, "----------------------------------------------------\n");
1506 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1507 mutex_lock(&dev->buffer_lock);
1508 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1509 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1511 if (buffer->heap->id != heap->id)
1513 total_size += buffer->size;
1514 if (!buffer->handle_count) {
1515 seq_printf(s, "%16s %16u %16zu %d %d\n",
1516 buffer->task_comm, buffer->pid,
1517 buffer->size, buffer->kmap_cnt,
1518 atomic_read(&buffer->ref.refcount));
1519 total_orphaned_size += buffer->size;
1522 mutex_unlock(&dev->buffer_lock);
1523 seq_puts(s, "----------------------------------------------------\n");
1524 seq_printf(s, "%16s %16zu\n", "total orphaned",
1525 total_orphaned_size);
1526 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1527 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1528 seq_printf(s, "%16s %16zu\n", "deferred free",
1529 heap->free_list_size);
1530 seq_puts(s, "----------------------------------------------------\n");
1532 if (heap->debug_show)
1533 heap->debug_show(heap, s, unused);
1538 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1540 return single_open(file, ion_debug_heap_show, inode->i_private);
1543 static const struct file_operations debug_heap_fops = {
1544 .open = ion_debug_heap_open,
1546 .llseek = seq_lseek,
1547 .release = single_release,
1550 static int debug_shrink_set(void *data, u64 val)
1552 struct ion_heap *heap = data;
1553 struct shrink_control sc;
1556 sc.gfp_mask = GFP_HIGHUSER;
1557 sc.nr_to_scan = val;
1560 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1561 sc.nr_to_scan = objs;
1564 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1568 static int debug_shrink_get(void *data, u64 *val)
1570 struct ion_heap *heap = data;
1571 struct shrink_control sc;
1574 sc.gfp_mask = GFP_HIGHUSER;
1577 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1582 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1583 debug_shrink_set, "%llu\n");
1585 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1587 struct dentry *debug_file;
1589 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1590 !heap->ops->unmap_dma)
1591 pr_err("%s: can not add heap with invalid ops struct.\n",
1594 spin_lock_init(&heap->free_lock);
1595 heap->free_list_size = 0;
1597 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1598 ion_heap_init_deferred_free(heap);
1600 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1601 ion_heap_init_shrinker(heap);
1604 down_write(&dev->lock);
1606 * use negative heap->id to reverse the priority -- when traversing
1607 * the list later attempt higher id numbers first
1609 plist_node_init(&heap->node, -heap->id);
1610 plist_add(&heap->node, &dev->heaps);
1611 debug_file = debugfs_create_file(heap->name, 0664,
1612 dev->heaps_debug_root, heap,
1616 char buf[256], *path;
1618 path = dentry_path(dev->heaps_debug_root, buf, 256);
1619 pr_err("Failed to create heap debugfs at %s/%s\n",
1623 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1624 char debug_name[64];
1626 snprintf(debug_name, 64, "%s_shrink", heap->name);
1627 debug_file = debugfs_create_file(
1628 debug_name, 0644, dev->heaps_debug_root, heap,
1629 &debug_shrink_fops);
1631 char buf[256], *path;
1633 path = dentry_path(dev->heaps_debug_root, buf, 256);
1634 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1639 up_write(&dev->lock);
1641 EXPORT_SYMBOL(ion_device_add_heap);
1643 struct ion_device *ion_device_create(long (*custom_ioctl)
1644 (struct ion_client *client,
1648 struct ion_device *idev;
1651 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1653 return ERR_PTR(-ENOMEM);
1655 idev->dev.minor = MISC_DYNAMIC_MINOR;
1656 idev->dev.name = "ion";
1657 idev->dev.fops = &ion_fops;
1658 idev->dev.parent = NULL;
1659 ret = misc_register(&idev->dev);
1661 pr_err("ion: failed to register misc device.\n");
1663 return ERR_PTR(ret);
1666 idev->debug_root = debugfs_create_dir("ion", NULL);
1667 if (!idev->debug_root) {
1668 pr_err("ion: failed to create debugfs root directory.\n");
1671 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1672 if (!idev->heaps_debug_root) {
1673 pr_err("ion: failed to create debugfs heaps directory.\n");
1676 idev->clients_debug_root = debugfs_create_dir("clients",
1678 if (!idev->clients_debug_root)
1679 pr_err("ion: failed to create debugfs clients directory.\n");
1683 idev->custom_ioctl = custom_ioctl;
1684 idev->buffers = RB_ROOT;
1685 mutex_init(&idev->buffer_lock);
1686 init_rwsem(&idev->lock);
1687 plist_head_init(&idev->heaps);
1688 idev->clients = RB_ROOT;
1689 ion_root_client = &idev->clients;
1690 mutex_init(&debugfs_mutex);
1693 EXPORT_SYMBOL(ion_device_create);
1695 void ion_device_destroy(struct ion_device *dev)
1697 misc_deregister(&dev->dev);
1698 debugfs_remove_recursive(dev->debug_root);
1699 /* XXX need to free the heaps and clients ? */
1702 EXPORT_SYMBOL(ion_device_destroy);
1704 void __init ion_reserve(struct ion_platform_data *data)
1708 for (i = 0; i < data->nr; i++) {
1709 if (data->heaps[i].size == 0)
1712 if (data->heaps[i].base == 0) {
1715 paddr = memblock_alloc_base(data->heaps[i].size,
1716 data->heaps[i].align,
1717 MEMBLOCK_ALLOC_ANYWHERE);
1719 pr_err("%s: error allocating memblock for heap %d\n",
1723 data->heaps[i].base = paddr;
1725 int ret = memblock_reserve(data->heaps[i].base,
1726 data->heaps[i].size);
1728 pr_err("memblock reserve of %zx@%lx failed\n",
1729 data->heaps[i].size,
1730 data->heaps[i].base);
1732 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1733 data->heaps[i].name,
1734 data->heaps[i].base,
1735 data->heaps[i].size);