Commit | Line | Data |
---|---|---|
c30707be RSZ |
1 | /* |
2 | * drivers/staging/android/ion/ion_heap.c | |
3 | * | |
4 | * Copyright (C) 2011 Google, Inc. | |
5 | * | |
6 | * This software is licensed under the terms of the GNU General Public | |
7 | * License version 2, as published by the Free Software Foundation, and | |
8 | * may be copied, distributed, and modified under those terms. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/err.h> | |
ea313b5f RSZ |
18 | #include <linux/freezer.h> |
19 | #include <linux/kthread.h> | |
8898227e | 20 | #include <linux/mm.h> |
ea313b5f RSZ |
21 | #include <linux/rtmutex.h> |
22 | #include <linux/sched.h> | |
8898227e RSZ |
23 | #include <linux/scatterlist.h> |
24 | #include <linux/vmalloc.h> | |
c30707be RSZ |
25 | #include "ion.h" |
26 | #include "ion_priv.h" | |
27 | ||
8898227e RSZ |
28 | void *ion_heap_map_kernel(struct ion_heap *heap, |
29 | struct ion_buffer *buffer) | |
30 | { | |
31 | struct scatterlist *sg; | |
32 | int i, j; | |
33 | void *vaddr; | |
34 | pgprot_t pgprot; | |
35 | struct sg_table *table = buffer->sg_table; | |
36 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; | |
37 | struct page **pages = vmalloc(sizeof(struct page *) * npages); | |
38 | struct page **tmp = pages; | |
39 | ||
40 | if (!pages) | |
f63958d8 | 41 | return NULL; |
8898227e RSZ |
42 | |
43 | if (buffer->flags & ION_FLAG_CACHED) | |
44 | pgprot = PAGE_KERNEL; | |
45 | else | |
46 | pgprot = pgprot_writecombine(PAGE_KERNEL); | |
47 | ||
48 | for_each_sg(table->sgl, sg, table->nents, i) { | |
06e0dcae | 49 | int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; |
8898227e RSZ |
50 | struct page *page = sg_page(sg); |
51 | BUG_ON(i >= npages); | |
e1d855b0 | 52 | for (j = 0; j < npages_this_entry; j++) |
8898227e | 53 | *(tmp++) = page++; |
8898227e RSZ |
54 | } |
55 | vaddr = vmap(pages, npages, VM_MAP, pgprot); | |
56 | vfree(pages); | |
57 | ||
dfc4a9b1 CC |
58 | if (vaddr == NULL) |
59 | return ERR_PTR(-ENOMEM); | |
60 | ||
8898227e RSZ |
61 | return vaddr; |
62 | } | |
63 | ||
64 | void ion_heap_unmap_kernel(struct ion_heap *heap, | |
65 | struct ion_buffer *buffer) | |
66 | { | |
67 | vunmap(buffer->vaddr); | |
68 | } | |
69 | ||
70 | int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | |
71 | struct vm_area_struct *vma) | |
72 | { | |
73 | struct sg_table *table = buffer->sg_table; | |
74 | unsigned long addr = vma->vm_start; | |
75 | unsigned long offset = vma->vm_pgoff * PAGE_SIZE; | |
76 | struct scatterlist *sg; | |
77 | int i; | |
e460bc5e | 78 | int ret; |
8898227e RSZ |
79 | |
80 | for_each_sg(table->sgl, sg, table->nents, i) { | |
81 | struct page *page = sg_page(sg); | |
82 | unsigned long remainder = vma->vm_end - addr; | |
06e0dcae | 83 | unsigned long len = sg->length; |
8898227e | 84 | |
06e0dcae CC |
85 | if (offset >= sg->length) { |
86 | offset -= sg->length; | |
8898227e RSZ |
87 | continue; |
88 | } else if (offset) { | |
89 | page += offset / PAGE_SIZE; | |
06e0dcae | 90 | len = sg->length - offset; |
8898227e RSZ |
91 | offset = 0; |
92 | } | |
93 | len = min(len, remainder); | |
e460bc5e | 94 | ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, |
8898227e | 95 | vma->vm_page_prot); |
e460bc5e CC |
96 | if (ret) |
97 | return ret; | |
8898227e RSZ |
98 | addr += len; |
99 | if (addr >= vma->vm_end) | |
100 | return 0; | |
101 | } | |
102 | return 0; | |
103 | } | |
104 | ||
8b312bb9 CC |
105 | static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) |
106 | { | |
107 | void *addr = vm_map_ram(pages, num, -1, pgprot); | |
108 | if (!addr) | |
109 | return -ENOMEM; | |
110 | memset(addr, 0, PAGE_SIZE * num); | |
111 | vm_unmap_ram(addr, num); | |
112 | ||
113 | return 0; | |
114 | } | |
115 | ||
df6cf5c8 CC |
116 | static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, |
117 | pgprot_t pgprot) | |
118 | { | |
119 | int p = 0; | |
120 | int ret = 0; | |
121 | struct sg_page_iter piter; | |
122 | struct page *pages[32]; | |
123 | ||
124 | for_each_sg_page(sgl, &piter, nents, 0) { | |
125 | pages[p++] = sg_page_iter_page(&piter); | |
126 | if (p == ARRAY_SIZE(pages)) { | |
127 | ret = ion_heap_clear_pages(pages, p, pgprot); | |
128 | if (ret) | |
129 | return ret; | |
130 | p = 0; | |
131 | } | |
132 | } | |
133 | if (p) | |
134 | ret = ion_heap_clear_pages(pages, p, pgprot); | |
135 | ||
136 | return ret; | |
137 | } | |
138 | ||
0b6b2cde RSZ |
139 | int ion_heap_buffer_zero(struct ion_buffer *buffer) |
140 | { | |
141 | struct sg_table *table = buffer->sg_table; | |
142 | pgprot_t pgprot; | |
0b6b2cde RSZ |
143 | |
144 | if (buffer->flags & ION_FLAG_CACHED) | |
145 | pgprot = PAGE_KERNEL; | |
146 | else | |
147 | pgprot = pgprot_writecombine(PAGE_KERNEL); | |
148 | ||
df6cf5c8 CC |
149 | return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); |
150 | } | |
0b6b2cde | 151 | |
df6cf5c8 CC |
152 | int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) |
153 | { | |
154 | struct scatterlist sg; | |
155 | ||
156 | sg_init_table(&sg, 1); | |
157 | sg_set_page(&sg, page, size, 0); | |
158 | return ion_heap_sglist_zero(&sg, 1, pgprot); | |
0b6b2cde RSZ |
159 | } |
160 | ||
e1d855b0 | 161 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) |
ea313b5f | 162 | { |
6a72a700 | 163 | spin_lock(&heap->free_lock); |
ea313b5f RSZ |
164 | list_add(&buffer->list, &heap->free_list); |
165 | heap->free_list_size += buffer->size; | |
6a72a700 | 166 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
167 | wake_up(&heap->waitqueue); |
168 | } | |
169 | ||
170 | size_t ion_heap_freelist_size(struct ion_heap *heap) | |
171 | { | |
172 | size_t size; | |
173 | ||
6a72a700 | 174 | spin_lock(&heap->free_lock); |
ea313b5f | 175 | size = heap->free_list_size; |
6a72a700 | 176 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
177 | |
178 | return size; | |
179 | } | |
180 | ||
181 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) | |
182 | { | |
6a72a700 | 183 | struct ion_buffer *buffer; |
ea313b5f RSZ |
184 | size_t total_drained = 0; |
185 | ||
186 | if (ion_heap_freelist_size(heap) == 0) | |
187 | return 0; | |
188 | ||
6a72a700 | 189 | spin_lock(&heap->free_lock); |
ea313b5f RSZ |
190 | if (size == 0) |
191 | size = heap->free_list_size; | |
192 | ||
6a72a700 | 193 | while (!list_empty(&heap->free_list)) { |
ea313b5f RSZ |
194 | if (total_drained >= size) |
195 | break; | |
6a72a700 JS |
196 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, |
197 | list); | |
ea313b5f | 198 | list_del(&buffer->list); |
ea313b5f RSZ |
199 | heap->free_list_size -= buffer->size; |
200 | total_drained += buffer->size; | |
6a72a700 | 201 | spin_unlock(&heap->free_lock); |
f020b443 | 202 | ion_buffer_destroy(buffer); |
6a72a700 | 203 | spin_lock(&heap->free_lock); |
ea313b5f | 204 | } |
6a72a700 | 205 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
206 | |
207 | return total_drained; | |
208 | } | |
209 | ||
f63958d8 | 210 | static int ion_heap_deferred_free(void *data) |
ea313b5f RSZ |
211 | { |
212 | struct ion_heap *heap = data; | |
213 | ||
214 | while (true) { | |
215 | struct ion_buffer *buffer; | |
216 | ||
217 | wait_event_freezable(heap->waitqueue, | |
218 | ion_heap_freelist_size(heap) > 0); | |
219 | ||
6a72a700 | 220 | spin_lock(&heap->free_lock); |
ea313b5f | 221 | if (list_empty(&heap->free_list)) { |
6a72a700 | 222 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
223 | continue; |
224 | } | |
225 | buffer = list_first_entry(&heap->free_list, struct ion_buffer, | |
226 | list); | |
227 | list_del(&buffer->list); | |
228 | heap->free_list_size -= buffer->size; | |
6a72a700 | 229 | spin_unlock(&heap->free_lock); |
ea313b5f RSZ |
230 | ion_buffer_destroy(buffer); |
231 | } | |
232 | ||
233 | return 0; | |
234 | } | |
235 | ||
236 | int ion_heap_init_deferred_free(struct ion_heap *heap) | |
237 | { | |
238 | struct sched_param param = { .sched_priority = 0 }; | |
239 | ||
240 | INIT_LIST_HEAD(&heap->free_list); | |
241 | heap->free_list_size = 0; | |
6a72a700 | 242 | spin_lock_init(&heap->free_lock); |
ea313b5f RSZ |
243 | init_waitqueue_head(&heap->waitqueue); |
244 | heap->task = kthread_run(ion_heap_deferred_free, heap, | |
245 | "%s", heap->name); | |
ea313b5f RSZ |
246 | if (IS_ERR(heap->task)) { |
247 | pr_err("%s: creating thread for deferred free failed\n", | |
248 | __func__); | |
249 | return PTR_RET(heap->task); | |
250 | } | |
54de9af9 | 251 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); |
ea313b5f RSZ |
252 | return 0; |
253 | } | |
254 | ||
c30707be RSZ |
255 | struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) |
256 | { | |
257 | struct ion_heap *heap = NULL; | |
258 | ||
259 | switch (heap_data->type) { | |
260 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | |
261 | heap = ion_system_contig_heap_create(heap_data); | |
262 | break; | |
263 | case ION_HEAP_TYPE_SYSTEM: | |
264 | heap = ion_system_heap_create(heap_data); | |
265 | break; | |
266 | case ION_HEAP_TYPE_CARVEOUT: | |
267 | heap = ion_carveout_heap_create(heap_data); | |
268 | break; | |
e3c2eb7c RSZ |
269 | case ION_HEAP_TYPE_CHUNK: |
270 | heap = ion_chunk_heap_create(heap_data); | |
271 | break; | |
349c9e13 BG |
272 | case ION_HEAP_TYPE_DMA: |
273 | heap = ion_cma_heap_create(heap_data); | |
274 | break; | |
c30707be RSZ |
275 | default: |
276 | pr_err("%s: Invalid heap type %d\n", __func__, | |
277 | heap_data->type); | |
278 | return ERR_PTR(-EINVAL); | |
279 | } | |
280 | ||
281 | if (IS_ERR_OR_NULL(heap)) { | |
e61fc915 | 282 | pr_err("%s: error creating heap %s type %d base %lu size %zu\n", |
c30707be RSZ |
283 | __func__, heap_data->name, heap_data->type, |
284 | heap_data->base, heap_data->size); | |
285 | return ERR_PTR(-EINVAL); | |
286 | } | |
287 | ||
288 | heap->name = heap_data->name; | |
289 | heap->id = heap_data->id; | |
290 | return heap; | |
291 | } | |
292 | ||
293 | void ion_heap_destroy(struct ion_heap *heap) | |
294 | { | |
295 | if (!heap) | |
296 | return; | |
297 | ||
298 | switch (heap->type) { | |
299 | case ION_HEAP_TYPE_SYSTEM_CONTIG: | |
300 | ion_system_contig_heap_destroy(heap); | |
301 | break; | |
302 | case ION_HEAP_TYPE_SYSTEM: | |
303 | ion_system_heap_destroy(heap); | |
304 | break; | |
305 | case ION_HEAP_TYPE_CARVEOUT: | |
306 | ion_carveout_heap_destroy(heap); | |
307 | break; | |
e3c2eb7c RSZ |
308 | case ION_HEAP_TYPE_CHUNK: |
309 | ion_chunk_heap_destroy(heap); | |
310 | break; | |
349c9e13 BG |
311 | case ION_HEAP_TYPE_DMA: |
312 | ion_cma_heap_destroy(heap); | |
313 | break; | |
c30707be RSZ |
314 | default: |
315 | pr_err("%s: Invalid heap type %d\n", __func__, | |
316 | heap->type); | |
317 | } | |
318 | } |