Commit | Line | Data |
---|---|---|
0c972a05 TK |
1 | /* binder_alloc.c |
2 | * | |
3 | * Android IPC Subsystem | |
4 | * | |
5 | * Copyright (C) 2007-2017 Google, Inc. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
19 | ||
20 | #include <asm/cacheflush.h> | |
21 | #include <linux/list.h> | |
22 | #include <linux/sched/mm.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/rtmutex.h> | |
25 | #include <linux/rbtree.h> | |
26 | #include <linux/seq_file.h> | |
27 | #include <linux/vmalloc.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/sched.h> | |
30 | #include "binder_alloc.h" | |
31 | #include "binder_trace.h" | |
32 | ||
33 | static DEFINE_MUTEX(binder_alloc_mmap_lock); | |
34 | ||
35 | enum { | |
36 | BINDER_DEBUG_OPEN_CLOSE = 1U << 1, | |
37 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, | |
38 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, | |
39 | }; | |
40 | static uint32_t binder_alloc_debug_mask; | |
41 | ||
42 | module_param_named(debug_mask, binder_alloc_debug_mask, | |
43 | uint, 0644); | |
44 | ||
45 | #define binder_alloc_debug(mask, x...) \ | |
46 | do { \ | |
47 | if (binder_alloc_debug_mask & mask) \ | |
48 | pr_info(x); \ | |
49 | } while (0) | |
50 | ||
51 | static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, | |
52 | struct binder_buffer *buffer) | |
53 | { | |
54 | if (list_is_last(&buffer->entry, &alloc->buffers)) | |
55 | return alloc->buffer + | |
56 | alloc->buffer_size - (void *)buffer->data; | |
57 | return (size_t)list_entry(buffer->entry.next, | |
58 | struct binder_buffer, entry) - (size_t)buffer->data; | |
59 | } | |
60 | ||
61 | static void binder_insert_free_buffer(struct binder_alloc *alloc, | |
62 | struct binder_buffer *new_buffer) | |
63 | { | |
64 | struct rb_node **p = &alloc->free_buffers.rb_node; | |
65 | struct rb_node *parent = NULL; | |
66 | struct binder_buffer *buffer; | |
67 | size_t buffer_size; | |
68 | size_t new_buffer_size; | |
69 | ||
70 | BUG_ON(!new_buffer->free); | |
71 | ||
72 | new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); | |
73 | ||
74 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
75 | "%d: add free buffer, size %zd, at %pK\n", | |
76 | alloc->pid, new_buffer_size, new_buffer); | |
77 | ||
78 | while (*p) { | |
79 | parent = *p; | |
80 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
81 | BUG_ON(!buffer->free); | |
82 | ||
83 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
84 | ||
85 | if (new_buffer_size < buffer_size) | |
86 | p = &parent->rb_left; | |
87 | else | |
88 | p = &parent->rb_right; | |
89 | } | |
90 | rb_link_node(&new_buffer->rb_node, parent, p); | |
91 | rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); | |
92 | } | |
93 | ||
94 | static void binder_insert_allocated_buffer_locked( | |
95 | struct binder_alloc *alloc, struct binder_buffer *new_buffer) | |
96 | { | |
97 | struct rb_node **p = &alloc->allocated_buffers.rb_node; | |
98 | struct rb_node *parent = NULL; | |
99 | struct binder_buffer *buffer; | |
100 | ||
101 | BUG_ON(new_buffer->free); | |
102 | ||
103 | while (*p) { | |
104 | parent = *p; | |
105 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
106 | BUG_ON(buffer->free); | |
107 | ||
108 | if (new_buffer < buffer) | |
109 | p = &parent->rb_left; | |
110 | else if (new_buffer > buffer) | |
111 | p = &parent->rb_right; | |
112 | else | |
113 | BUG(); | |
114 | } | |
115 | rb_link_node(&new_buffer->rb_node, parent, p); | |
116 | rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); | |
117 | } | |
118 | ||
119 | static struct binder_buffer *binder_alloc_buffer_lookup_locked( | |
120 | struct binder_alloc *alloc, | |
121 | uintptr_t user_ptr) | |
122 | { | |
123 | struct rb_node *n = alloc->allocated_buffers.rb_node; | |
124 | struct binder_buffer *buffer; | |
125 | struct binder_buffer *kern_ptr; | |
126 | ||
127 | kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset | |
128 | - offsetof(struct binder_buffer, data)); | |
129 | ||
130 | while (n) { | |
131 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
132 | BUG_ON(buffer->free); | |
133 | ||
134 | if (kern_ptr < buffer) | |
135 | n = n->rb_left; | |
136 | else if (kern_ptr > buffer) | |
137 | n = n->rb_right; | |
138 | else | |
139 | return buffer; | |
140 | } | |
141 | return NULL; | |
142 | } | |
143 | ||
144 | /** | |
145 | * binder_alloc_buffer_lookup() - get buffer given user ptr | |
146 | * @alloc: binder_alloc for this proc | |
147 | * @user_ptr: User pointer to buffer data | |
148 | * | |
149 | * Validate userspace pointer to buffer data and return buffer corresponding to | |
150 | * that user pointer. Search the rb tree for buffer that matches user data | |
151 | * pointer. | |
152 | * | |
153 | * Return: Pointer to buffer or NULL | |
154 | */ | |
155 | struct binder_buffer *binder_alloc_buffer_lookup(struct binder_alloc *alloc, | |
156 | uintptr_t user_ptr) | |
157 | { | |
158 | struct binder_buffer *buffer; | |
159 | ||
160 | mutex_lock(&alloc->mutex); | |
161 | buffer = binder_alloc_buffer_lookup_locked(alloc, user_ptr); | |
162 | mutex_unlock(&alloc->mutex); | |
163 | return buffer; | |
164 | } | |
165 | ||
166 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |
167 | void *start, void *end, | |
168 | struct vm_area_struct *vma) | |
169 | { | |
170 | void *page_addr; | |
171 | unsigned long user_page_addr; | |
172 | struct page **page; | |
173 | struct mm_struct *mm; | |
174 | ||
175 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
176 | "%d: %s pages %pK-%pK\n", alloc->pid, | |
177 | allocate ? "allocate" : "free", start, end); | |
178 | ||
179 | if (end <= start) | |
180 | return 0; | |
181 | ||
182 | trace_binder_update_page_range(alloc, allocate, start, end); | |
183 | ||
184 | if (vma) | |
185 | mm = NULL; | |
186 | else | |
187 | mm = get_task_mm(alloc->tsk); | |
188 | ||
189 | if (mm) { | |
190 | down_write(&mm->mmap_sem); | |
191 | vma = alloc->vma; | |
192 | if (vma && mm != alloc->vma_vm_mm) { | |
193 | pr_err("%d: vma mm and task mm mismatch\n", | |
194 | alloc->pid); | |
195 | vma = NULL; | |
196 | } | |
197 | } | |
198 | ||
199 | if (allocate == 0) | |
200 | goto free_range; | |
201 | ||
202 | if (vma == NULL) { | |
203 | pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", | |
204 | alloc->pid); | |
205 | goto err_no_vma; | |
206 | } | |
207 | ||
208 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
209 | int ret; | |
210 | ||
211 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; | |
212 | ||
213 | BUG_ON(*page); | |
214 | *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | |
215 | if (*page == NULL) { | |
216 | pr_err("%d: binder_alloc_buf failed for page at %pK\n", | |
217 | alloc->pid, page_addr); | |
218 | goto err_alloc_page_failed; | |
219 | } | |
220 | ret = map_kernel_range_noflush((unsigned long)page_addr, | |
221 | PAGE_SIZE, PAGE_KERNEL, page); | |
222 | flush_cache_vmap((unsigned long)page_addr, | |
223 | (unsigned long)page_addr + PAGE_SIZE); | |
224 | if (ret != 1) { | |
225 | pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", | |
226 | alloc->pid, page_addr); | |
227 | goto err_map_kernel_failed; | |
228 | } | |
229 | user_page_addr = | |
230 | (uintptr_t)page_addr + alloc->user_buffer_offset; | |
231 | ret = vm_insert_page(vma, user_page_addr, page[0]); | |
232 | if (ret) { | |
233 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", | |
234 | alloc->pid, user_page_addr); | |
235 | goto err_vm_insert_page_failed; | |
236 | } | |
237 | /* vm_insert_page does not seem to increment the refcount */ | |
238 | } | |
239 | if (mm) { | |
240 | up_write(&mm->mmap_sem); | |
241 | mmput(mm); | |
242 | } | |
243 | return 0; | |
244 | ||
245 | free_range: | |
246 | for (page_addr = end - PAGE_SIZE; page_addr >= start; | |
247 | page_addr -= PAGE_SIZE) { | |
248 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; | |
249 | if (vma) | |
250 | zap_page_range(vma, (uintptr_t)page_addr + | |
251 | alloc->user_buffer_offset, PAGE_SIZE); | |
252 | err_vm_insert_page_failed: | |
253 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | |
254 | err_map_kernel_failed: | |
255 | __free_page(*page); | |
256 | *page = NULL; | |
257 | err_alloc_page_failed: | |
258 | ; | |
259 | } | |
260 | err_no_vma: | |
261 | if (mm) { | |
262 | up_write(&mm->mmap_sem); | |
263 | mmput(mm); | |
264 | } | |
265 | return -ENOMEM; | |
266 | } | |
267 | ||
268 | struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, | |
269 | size_t data_size, | |
270 | size_t offsets_size, | |
271 | size_t extra_buffers_size, | |
272 | int is_async) | |
273 | { | |
274 | struct rb_node *n = alloc->free_buffers.rb_node; | |
275 | struct binder_buffer *buffer; | |
276 | size_t buffer_size; | |
277 | struct rb_node *best_fit = NULL; | |
278 | void *has_page_addr; | |
279 | void *end_page_addr; | |
280 | size_t size, data_offsets_size; | |
281 | ||
282 | if (alloc->vma == NULL) { | |
283 | pr_err("%d: binder_alloc_buf, no vma\n", | |
284 | alloc->pid); | |
285 | return NULL; | |
286 | } | |
287 | ||
288 | data_offsets_size = ALIGN(data_size, sizeof(void *)) + | |
289 | ALIGN(offsets_size, sizeof(void *)); | |
290 | ||
291 | if (data_offsets_size < data_size || data_offsets_size < offsets_size) { | |
292 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
293 | "%d: got transaction with invalid size %zd-%zd\n", | |
294 | alloc->pid, data_size, offsets_size); | |
295 | return NULL; | |
296 | } | |
297 | size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); | |
298 | if (size < data_offsets_size || size < extra_buffers_size) { | |
299 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
300 | "%d: got transaction with invalid extra_buffers_size %zd\n", | |
301 | alloc->pid, extra_buffers_size); | |
302 | return NULL; | |
303 | } | |
304 | if (is_async && | |
305 | alloc->free_async_space < size + sizeof(struct binder_buffer)) { | |
306 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
307 | "%d: binder_alloc_buf size %zd failed, no async space left\n", | |
308 | alloc->pid, size); | |
309 | return NULL; | |
310 | } | |
311 | ||
312 | while (n) { | |
313 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
314 | BUG_ON(!buffer->free); | |
315 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
316 | ||
317 | if (size < buffer_size) { | |
318 | best_fit = n; | |
319 | n = n->rb_left; | |
320 | } else if (size > buffer_size) | |
321 | n = n->rb_right; | |
322 | else { | |
323 | best_fit = n; | |
324 | break; | |
325 | } | |
326 | } | |
327 | if (best_fit == NULL) { | |
328 | pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", | |
329 | alloc->pid, size); | |
330 | return NULL; | |
331 | } | |
332 | if (n == NULL) { | |
333 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); | |
334 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
335 | } | |
336 | ||
337 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
338 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", | |
339 | alloc->pid, size, buffer, buffer_size); | |
340 | ||
341 | has_page_addr = | |
342 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); | |
343 | if (n == NULL) { | |
344 | if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) | |
345 | buffer_size = size; /* no room for other buffers */ | |
346 | else | |
347 | buffer_size = size + sizeof(struct binder_buffer); | |
348 | } | |
349 | end_page_addr = | |
350 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); | |
351 | if (end_page_addr > has_page_addr) | |
352 | end_page_addr = has_page_addr; | |
353 | if (binder_update_page_range(alloc, 1, | |
354 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) | |
355 | return NULL; | |
356 | ||
357 | rb_erase(best_fit, &alloc->free_buffers); | |
358 | buffer->free = 0; | |
359 | binder_insert_allocated_buffer_locked(alloc, buffer); | |
360 | if (buffer_size != size) { | |
361 | struct binder_buffer *new_buffer = (void *)buffer->data + size; | |
362 | ||
363 | list_add(&new_buffer->entry, &buffer->entry); | |
364 | new_buffer->free = 1; | |
365 | binder_insert_free_buffer(alloc, new_buffer); | |
366 | } | |
367 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
368 | "%d: binder_alloc_buf size %zd got %pK\n", | |
369 | alloc->pid, size, buffer); | |
370 | buffer->data_size = data_size; | |
371 | buffer->offsets_size = offsets_size; | |
372 | buffer->async_transaction = is_async; | |
373 | buffer->extra_buffers_size = extra_buffers_size; | |
374 | if (is_async) { | |
375 | alloc->free_async_space -= size + sizeof(struct binder_buffer); | |
376 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
377 | "%d: binder_alloc_buf size %zd async free %zd\n", | |
378 | alloc->pid, size, alloc->free_async_space); | |
379 | } | |
380 | return buffer; | |
381 | } | |
382 | ||
383 | /** | |
384 | * binder_alloc_new_buf() - Allocate a new binder buffer | |
385 | * @alloc: binder_alloc for this proc | |
386 | * @data_size: size of user data buffer | |
387 | * @offsets_size: user specified buffer offset | |
388 | * @extra_buffers_size: size of extra space for meta-data (eg, security context) | |
389 | * @is_async: buffer for async transaction | |
390 | * | |
391 | * Allocate a new buffer given the requested sizes. Returns | |
392 | * the kernel version of the buffer pointer. The size allocated | |
393 | * is the sum of the three given sizes (each rounded up to | |
394 | * pointer-sized boundary) | |
395 | * | |
396 | * Return: The allocated buffer or %NULL if error | |
397 | */ | |
398 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, | |
399 | size_t data_size, | |
400 | size_t offsets_size, | |
401 | size_t extra_buffers_size, | |
402 | int is_async) | |
403 | { | |
404 | struct binder_buffer *buffer; | |
405 | ||
406 | mutex_lock(&alloc->mutex); | |
407 | buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, | |
408 | extra_buffers_size, is_async); | |
409 | mutex_unlock(&alloc->mutex); | |
410 | return buffer; | |
411 | } | |
412 | ||
413 | static void *buffer_start_page(struct binder_buffer *buffer) | |
414 | { | |
415 | return (void *)((uintptr_t)buffer & PAGE_MASK); | |
416 | } | |
417 | ||
418 | static void *buffer_end_page(struct binder_buffer *buffer) | |
419 | { | |
420 | return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); | |
421 | } | |
422 | ||
423 | static void binder_delete_free_buffer(struct binder_alloc *alloc, | |
424 | struct binder_buffer *buffer) | |
425 | { | |
426 | struct binder_buffer *prev, *next = NULL; | |
427 | int free_page_end = 1; | |
428 | int free_page_start = 1; | |
429 | ||
430 | BUG_ON(alloc->buffers.next == &buffer->entry); | |
431 | prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); | |
432 | BUG_ON(!prev->free); | |
433 | if (buffer_end_page(prev) == buffer_start_page(buffer)) { | |
434 | free_page_start = 0; | |
435 | if (buffer_end_page(prev) == buffer_end_page(buffer)) | |
436 | free_page_end = 0; | |
437 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
438 | "%d: merge free, buffer %pK share page with %pK\n", | |
439 | alloc->pid, buffer, prev); | |
440 | } | |
441 | ||
442 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
443 | next = list_entry(buffer->entry.next, | |
444 | struct binder_buffer, entry); | |
445 | if (buffer_start_page(next) == buffer_end_page(buffer)) { | |
446 | free_page_end = 0; | |
447 | if (buffer_start_page(next) == | |
448 | buffer_start_page(buffer)) | |
449 | free_page_start = 0; | |
450 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
451 | "%d: merge free, buffer %pK share page with %pK\n", | |
452 | alloc->pid, buffer, prev); | |
453 | } | |
454 | } | |
455 | list_del(&buffer->entry); | |
456 | if (free_page_start || free_page_end) { | |
457 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
458 | "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", | |
459 | alloc->pid, buffer, free_page_start ? "" : " end", | |
460 | free_page_end ? "" : " start", prev, next); | |
461 | binder_update_page_range(alloc, 0, free_page_start ? | |
462 | buffer_start_page(buffer) : buffer_end_page(buffer), | |
463 | (free_page_end ? buffer_end_page(buffer) : | |
464 | buffer_start_page(buffer)) + PAGE_SIZE, NULL); | |
465 | } | |
466 | } | |
467 | ||
468 | static void binder_free_buf_locked(struct binder_alloc *alloc, | |
469 | struct binder_buffer *buffer) | |
470 | { | |
471 | size_t size, buffer_size; | |
472 | ||
473 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
474 | ||
475 | size = ALIGN(buffer->data_size, sizeof(void *)) + | |
476 | ALIGN(buffer->offsets_size, sizeof(void *)) + | |
477 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); | |
478 | ||
479 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
480 | "%d: binder_free_buf %pK size %zd buffer_size %zd\n", | |
481 | alloc->pid, buffer, size, buffer_size); | |
482 | ||
483 | BUG_ON(buffer->free); | |
484 | BUG_ON(size > buffer_size); | |
485 | BUG_ON(buffer->transaction != NULL); | |
486 | BUG_ON((void *)buffer < alloc->buffer); | |
487 | BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size); | |
488 | ||
489 | if (buffer->async_transaction) { | |
490 | alloc->free_async_space += size + sizeof(struct binder_buffer); | |
491 | ||
492 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
493 | "%d: binder_free_buf size %zd async free %zd\n", | |
494 | alloc->pid, size, alloc->free_async_space); | |
495 | } | |
496 | ||
497 | binder_update_page_range(alloc, 0, | |
498 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | |
499 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), | |
500 | NULL); | |
501 | ||
502 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); | |
503 | buffer->free = 1; | |
504 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
505 | struct binder_buffer *next = list_entry(buffer->entry.next, | |
506 | struct binder_buffer, entry); | |
507 | ||
508 | if (next->free) { | |
509 | rb_erase(&next->rb_node, &alloc->free_buffers); | |
510 | binder_delete_free_buffer(alloc, next); | |
511 | } | |
512 | } | |
513 | if (alloc->buffers.next != &buffer->entry) { | |
514 | struct binder_buffer *prev = list_entry(buffer->entry.prev, | |
515 | struct binder_buffer, entry); | |
516 | ||
517 | if (prev->free) { | |
518 | binder_delete_free_buffer(alloc, buffer); | |
519 | rb_erase(&prev->rb_node, &alloc->free_buffers); | |
520 | buffer = prev; | |
521 | } | |
522 | } | |
523 | binder_insert_free_buffer(alloc, buffer); | |
524 | } | |
525 | ||
526 | /** | |
527 | * binder_alloc_free_buf() - free a binder buffer | |
528 | * @alloc: binder_alloc for this proc | |
529 | * @buffer: kernel pointer to buffer | |
530 | * | |
531 | * Free the buffer allocated via binder_alloc_new_buffer() | |
532 | */ | |
533 | void binder_alloc_free_buf(struct binder_alloc *alloc, | |
534 | struct binder_buffer *buffer) | |
535 | { | |
536 | mutex_lock(&alloc->mutex); | |
537 | binder_free_buf_locked(alloc, buffer); | |
538 | mutex_unlock(&alloc->mutex); | |
539 | } | |
540 | ||
541 | /** | |
542 | * binder_alloc_mmap_handler() - map virtual address space for proc | |
543 | * @alloc: alloc structure for this proc | |
544 | * @vma: vma passed to mmap() | |
545 | * | |
546 | * Called by binder_mmap() to initialize the space specified in | |
547 | * vma for allocating binder buffers | |
548 | * | |
549 | * Return: | |
550 | * 0 = success | |
551 | * -EBUSY = address space already mapped | |
552 | * -ENOMEM = failed to map memory to given address space | |
553 | */ | |
554 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |
555 | struct vm_area_struct *vma) | |
556 | { | |
557 | int ret; | |
558 | struct vm_struct *area; | |
559 | const char *failure_string; | |
560 | struct binder_buffer *buffer; | |
561 | ||
562 | mutex_lock(&binder_alloc_mmap_lock); | |
563 | if (alloc->buffer) { | |
564 | ret = -EBUSY; | |
565 | failure_string = "already mapped"; | |
566 | goto err_already_mapped; | |
567 | } | |
568 | ||
569 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); | |
570 | if (area == NULL) { | |
571 | ret = -ENOMEM; | |
572 | failure_string = "get_vm_area"; | |
573 | goto err_get_vm_area_failed; | |
574 | } | |
575 | alloc->buffer = area->addr; | |
576 | alloc->user_buffer_offset = | |
577 | vma->vm_start - (uintptr_t)alloc->buffer; | |
578 | mutex_unlock(&binder_alloc_mmap_lock); | |
579 | ||
580 | #ifdef CONFIG_CPU_CACHE_VIPT | |
581 | if (cache_is_vipt_aliasing()) { | |
582 | while (CACHE_COLOUR( | |
583 | (vma->vm_start ^ (uint32_t)alloc->buffer))) { | |
584 | pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", | |
585 | __func__, alloc->pid, vma->vm_start, | |
586 | vma->vm_end, alloc->buffer); | |
587 | vma->vm_start += PAGE_SIZE; | |
588 | } | |
589 | } | |
590 | #endif | |
591 | alloc->pages = kzalloc(sizeof(alloc->pages[0]) * | |
592 | ((vma->vm_end - vma->vm_start) / PAGE_SIZE), | |
593 | GFP_KERNEL); | |
594 | if (alloc->pages == NULL) { | |
595 | ret = -ENOMEM; | |
596 | failure_string = "alloc page array"; | |
597 | goto err_alloc_pages_failed; | |
598 | } | |
599 | alloc->buffer_size = vma->vm_end - vma->vm_start; | |
600 | ||
601 | if (binder_update_page_range(alloc, 1, alloc->buffer, | |
602 | alloc->buffer + PAGE_SIZE, vma)) { | |
603 | ret = -ENOMEM; | |
604 | failure_string = "alloc small buf"; | |
605 | goto err_alloc_small_buf_failed; | |
606 | } | |
607 | buffer = alloc->buffer; | |
608 | INIT_LIST_HEAD(&alloc->buffers); | |
609 | list_add(&buffer->entry, &alloc->buffers); | |
610 | buffer->free = 1; | |
611 | binder_insert_free_buffer(alloc, buffer); | |
612 | alloc->free_async_space = alloc->buffer_size / 2; | |
613 | barrier(); | |
614 | alloc->vma = vma; | |
615 | alloc->vma_vm_mm = vma->vm_mm; | |
616 | ||
617 | return 0; | |
618 | ||
619 | err_alloc_small_buf_failed: | |
620 | kfree(alloc->pages); | |
621 | alloc->pages = NULL; | |
622 | err_alloc_pages_failed: | |
623 | mutex_lock(&binder_alloc_mmap_lock); | |
624 | vfree(alloc->buffer); | |
625 | alloc->buffer = NULL; | |
626 | err_get_vm_area_failed: | |
627 | err_already_mapped: | |
628 | mutex_unlock(&binder_alloc_mmap_lock); | |
629 | pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, | |
630 | alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); | |
631 | return ret; | |
632 | } | |
633 | ||
634 | ||
635 | void binder_alloc_deferred_release(struct binder_alloc *alloc) | |
636 | { | |
637 | struct rb_node *n; | |
638 | int buffers, page_count; | |
639 | ||
640 | BUG_ON(alloc->vma); | |
641 | ||
642 | buffers = 0; | |
643 | mutex_lock(&alloc->mutex); | |
644 | while ((n = rb_first(&alloc->allocated_buffers))) { | |
645 | struct binder_buffer *buffer; | |
646 | ||
647 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
648 | ||
649 | /* Transaction should already have been freed */ | |
650 | BUG_ON(buffer->transaction); | |
651 | ||
652 | binder_free_buf_locked(alloc, buffer); | |
653 | buffers++; | |
654 | } | |
655 | ||
656 | page_count = 0; | |
657 | if (alloc->pages) { | |
658 | int i; | |
659 | ||
660 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
661 | void *page_addr; | |
662 | ||
663 | if (!alloc->pages[i]) | |
664 | continue; | |
665 | ||
666 | page_addr = alloc->buffer + i * PAGE_SIZE; | |
667 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
668 | "%s: %d: page %d at %pK not freed\n", | |
669 | __func__, alloc->pid, i, page_addr); | |
670 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | |
671 | __free_page(alloc->pages[i]); | |
672 | page_count++; | |
673 | } | |
674 | kfree(alloc->pages); | |
675 | vfree(alloc->buffer); | |
676 | } | |
677 | mutex_unlock(&alloc->mutex); | |
678 | ||
679 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, | |
680 | "%s: %d buffers %d, pages %d\n", | |
681 | __func__, alloc->pid, buffers, page_count); | |
682 | } | |
683 | ||
684 | static void print_binder_buffer(struct seq_file *m, const char *prefix, | |
685 | struct binder_buffer *buffer) | |
686 | { | |
687 | seq_printf(m, "%s %d: %pK size %zd:%zd %s\n", | |
688 | prefix, buffer->debug_id, buffer->data, | |
689 | buffer->data_size, buffer->offsets_size, | |
690 | buffer->transaction ? "active" : "delivered"); | |
691 | } | |
692 | ||
693 | /** | |
694 | * binder_alloc_print_allocated() - print buffer info | |
695 | * @m: seq_file for output via seq_printf() | |
696 | * @alloc: binder_alloc for this proc | |
697 | * | |
698 | * Prints information about every buffer associated with | |
699 | * the binder_alloc state to the given seq_file | |
700 | */ | |
701 | void binder_alloc_print_allocated(struct seq_file *m, | |
702 | struct binder_alloc *alloc) | |
703 | { | |
704 | struct rb_node *n; | |
705 | ||
706 | mutex_lock(&alloc->mutex); | |
707 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
708 | print_binder_buffer(m, " buffer", | |
709 | rb_entry(n, struct binder_buffer, rb_node)); | |
710 | mutex_unlock(&alloc->mutex); | |
711 | } | |
712 | ||
713 | /** | |
714 | * binder_alloc_get_allocated_count() - return count of buffers | |
715 | * @alloc: binder_alloc for this proc | |
716 | * | |
717 | * Return: count of allocated buffers | |
718 | */ | |
719 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |
720 | { | |
721 | struct rb_node *n; | |
722 | int count = 0; | |
723 | ||
724 | mutex_lock(&alloc->mutex); | |
725 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
726 | count++; | |
727 | mutex_unlock(&alloc->mutex); | |
728 | return count; | |
729 | } | |
730 | ||
731 | ||
732 | /** | |
733 | * binder_alloc_vma_close() - invalidate address space | |
734 | * @alloc: binder_alloc for this proc | |
735 | * | |
736 | * Called from binder_vma_close() when releasing address space. | |
737 | * Clears alloc->vma to prevent new incoming transactions from | |
738 | * allocating more buffers. | |
739 | */ | |
740 | void binder_alloc_vma_close(struct binder_alloc *alloc) | |
741 | { | |
742 | WRITE_ONCE(alloc->vma, NULL); | |
743 | WRITE_ONCE(alloc->vma_vm_mm, NULL); | |
744 | } | |
745 | ||
746 | /** | |
747 | * binder_alloc_init() - called by binder_open() for per-proc initialization | |
748 | * @alloc: binder_alloc for this proc | |
749 | * | |
750 | * Called from binder_open() to initialize binder_alloc fields for | |
751 | * new binder proc | |
752 | */ | |
753 | void binder_alloc_init(struct binder_alloc *alloc) | |
754 | { | |
755 | alloc->tsk = current->group_leader; | |
756 | alloc->pid = current->group_leader->pid; | |
757 | mutex_init(&alloc->mutex); | |
758 | } | |
759 |