2 * drivers/staging/android/ion/ion_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29 struct ion_buffer *buffer)
31 struct scatterlist *sg;
35 struct sg_table *table = buffer->sg_table;
36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37 struct page **pages = vmalloc(sizeof(struct page *) * npages);
38 struct page **tmp = pages;
43 if (buffer->flags & ION_FLAG_CACHED)
46 pgprot = pgprot_writecombine(PAGE_KERNEL);
48 for_each_sg(table->sgl, sg, table->nents, i) {
49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50 struct page *page = sg_page(sg);
52 for (j = 0; j < npages_this_entry; j++)
55 vaddr = vmap(pages, npages, VM_MAP, pgprot);
59 return ERR_PTR(-ENOMEM);
64 void ion_heap_unmap_kernel(struct ion_heap *heap,
65 struct ion_buffer *buffer)
67 vunmap(buffer->vaddr);
70 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
71 struct vm_area_struct *vma)
73 struct sg_table *table = buffer->sg_table;
74 unsigned long addr = vma->vm_start;
75 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
76 struct scatterlist *sg;
80 for_each_sg(table->sgl, sg, table->nents, i) {
81 struct page *page = sg_page(sg);
82 unsigned long remainder = vma->vm_end - addr;
83 unsigned long len = sg->length;
85 if (offset >= sg->length) {
89 page += offset / PAGE_SIZE;
90 len = sg->length - offset;
93 len = min(len, remainder);
94 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
99 if (addr >= vma->vm_end)
105 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107 void *addr = vm_map_ram(pages, num, -1, pgprot);
110 memset(addr, 0, PAGE_SIZE * num);
111 vm_unmap_ram(addr, num);
116 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
121 struct sg_page_iter piter;
122 struct page *pages[32];
124 for_each_sg_page(sgl, &piter, nents, 0) {
125 pages[p++] = sg_page_iter_page(&piter);
126 if (p == ARRAY_SIZE(pages)) {
127 ret = ion_heap_clear_pages(pages, p, pgprot);
134 ret = ion_heap_clear_pages(pages, p, pgprot);
139 int ion_heap_buffer_zero(struct ion_buffer *buffer)
141 struct sg_table *table = buffer->sg_table;
144 if (buffer->flags & ION_FLAG_CACHED)
145 pgprot = PAGE_KERNEL;
147 pgprot = pgprot_writecombine(PAGE_KERNEL);
149 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
152 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
154 struct scatterlist sg;
156 sg_init_table(&sg, 1);
157 sg_set_page(&sg, page, size, 0);
158 return ion_heap_sglist_zero(&sg, 1, pgprot);
161 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
163 spin_lock(&heap->free_lock);
164 list_add(&buffer->list, &heap->free_list);
165 heap->free_list_size += buffer->size;
166 spin_unlock(&heap->free_lock);
167 wake_up(&heap->waitqueue);
170 size_t ion_heap_freelist_size(struct ion_heap *heap)
174 spin_lock(&heap->free_lock);
175 size = heap->free_list_size;
176 spin_unlock(&heap->free_lock);
181 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
183 struct ion_buffer *buffer;
184 size_t total_drained = 0;
186 if (ion_heap_freelist_size(heap) == 0)
189 spin_lock(&heap->free_lock);
191 size = heap->free_list_size;
193 while (!list_empty(&heap->free_list)) {
194 if (total_drained >= size)
196 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
198 list_del(&buffer->list);
199 heap->free_list_size -= buffer->size;
200 total_drained += buffer->size;
201 spin_unlock(&heap->free_lock);
202 ion_buffer_destroy(buffer);
203 spin_lock(&heap->free_lock);
205 spin_unlock(&heap->free_lock);
207 return total_drained;
210 static int ion_heap_deferred_free(void *data)
212 struct ion_heap *heap = data;
215 struct ion_buffer *buffer;
217 wait_event_freezable(heap->waitqueue,
218 ion_heap_freelist_size(heap) > 0);
220 spin_lock(&heap->free_lock);
221 if (list_empty(&heap->free_list)) {
222 spin_unlock(&heap->free_lock);
225 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
227 list_del(&buffer->list);
228 heap->free_list_size -= buffer->size;
229 spin_unlock(&heap->free_lock);
230 ion_buffer_destroy(buffer);
236 int ion_heap_init_deferred_free(struct ion_heap *heap)
238 struct sched_param param = { .sched_priority = 0 };
240 INIT_LIST_HEAD(&heap->free_list);
241 heap->free_list_size = 0;
242 spin_lock_init(&heap->free_lock);
243 init_waitqueue_head(&heap->waitqueue);
244 heap->task = kthread_run(ion_heap_deferred_free, heap,
246 sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
247 if (IS_ERR(heap->task)) {
248 pr_err("%s: creating thread for deferred free failed\n",
250 return PTR_RET(heap->task);
255 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
257 struct ion_heap *heap = NULL;
259 switch (heap_data->type) {
260 case ION_HEAP_TYPE_SYSTEM_CONTIG:
261 heap = ion_system_contig_heap_create(heap_data);
263 case ION_HEAP_TYPE_SYSTEM:
264 heap = ion_system_heap_create(heap_data);
266 case ION_HEAP_TYPE_CARVEOUT:
267 heap = ion_carveout_heap_create(heap_data);
269 case ION_HEAP_TYPE_CHUNK:
270 heap = ion_chunk_heap_create(heap_data);
272 case ION_HEAP_TYPE_DMA:
273 heap = ion_cma_heap_create(heap_data);
276 pr_err("%s: Invalid heap type %d\n", __func__,
278 return ERR_PTR(-EINVAL);
281 if (IS_ERR_OR_NULL(heap)) {
282 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
283 __func__, heap_data->name, heap_data->type,
284 heap_data->base, heap_data->size);
285 return ERR_PTR(-EINVAL);
288 heap->name = heap_data->name;
289 heap->id = heap_data->id;
293 void ion_heap_destroy(struct ion_heap *heap)
298 switch (heap->type) {
299 case ION_HEAP_TYPE_SYSTEM_CONTIG:
300 ion_system_contig_heap_destroy(heap);
302 case ION_HEAP_TYPE_SYSTEM:
303 ion_system_heap_destroy(heap);
305 case ION_HEAP_TYPE_CARVEOUT:
306 ion_carveout_heap_destroy(heap);
308 case ION_HEAP_TYPE_CHUNK:
309 ion_chunk_heap_destroy(heap);
311 case ION_HEAP_TYPE_DMA:
312 ion_cma_heap_destroy(heap);
315 pr_err("%s: Invalid heap type %d\n", __func__,