Commit | Line | Data |
---|---|---|
0c972a05 TK |
1 | /* binder_alloc.c |
2 | * | |
3 | * Android IPC Subsystem | |
4 | * | |
5 | * Copyright (C) 2007-2017 Google, Inc. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
19 | ||
0c972a05 TK |
20 | #include <linux/list.h> |
21 | #include <linux/sched/mm.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/rtmutex.h> | |
24 | #include <linux/rbtree.h> | |
25 | #include <linux/seq_file.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/sched.h> | |
f2517eb7 | 29 | #include <linux/list_lru.h> |
128f3804 | 30 | #include <linux/ratelimit.h> |
1e81c57b | 31 | #include <asm/cacheflush.h> |
0c972a05 TK |
32 | #include "binder_alloc.h" |
33 | #include "binder_trace.h" | |
34 | ||
f2517eb7 SY |
35 | struct list_lru binder_alloc_lru; |
36 | ||
0c972a05 TK |
37 | static DEFINE_MUTEX(binder_alloc_mmap_lock); |
38 | ||
39 | enum { | |
128f3804 | 40 | BINDER_DEBUG_USER_ERROR = 1U << 0, |
0c972a05 TK |
41 | BINDER_DEBUG_OPEN_CLOSE = 1U << 1, |
42 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, | |
43 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, | |
44 | }; | |
128f3804 | 45 | static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; |
0c972a05 TK |
46 | |
47 | module_param_named(debug_mask, binder_alloc_debug_mask, | |
48 | uint, 0644); | |
49 | ||
50 | #define binder_alloc_debug(mask, x...) \ | |
51 | do { \ | |
52 | if (binder_alloc_debug_mask & mask) \ | |
128f3804 | 53 | pr_info_ratelimited(x); \ |
0c972a05 TK |
54 | } while (0) |
55 | ||
e2176219 SY |
56 | static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) |
57 | { | |
58 | return list_entry(buffer->entry.next, struct binder_buffer, entry); | |
59 | } | |
60 | ||
61 | static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) | |
62 | { | |
63 | return list_entry(buffer->entry.prev, struct binder_buffer, entry); | |
64 | } | |
65 | ||
0c972a05 TK |
66 | static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, |
67 | struct binder_buffer *buffer) | |
68 | { | |
69 | if (list_is_last(&buffer->entry, &alloc->buffers)) | |
74310e06 SY |
70 | return (u8 *)alloc->buffer + |
71 | alloc->buffer_size - (u8 *)buffer->data; | |
72 | return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; | |
0c972a05 TK |
73 | } |
74 | ||
75 | static void binder_insert_free_buffer(struct binder_alloc *alloc, | |
76 | struct binder_buffer *new_buffer) | |
77 | { | |
78 | struct rb_node **p = &alloc->free_buffers.rb_node; | |
79 | struct rb_node *parent = NULL; | |
80 | struct binder_buffer *buffer; | |
81 | size_t buffer_size; | |
82 | size_t new_buffer_size; | |
83 | ||
84 | BUG_ON(!new_buffer->free); | |
85 | ||
86 | new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); | |
87 | ||
88 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
89 | "%d: add free buffer, size %zd, at %pK\n", | |
90 | alloc->pid, new_buffer_size, new_buffer); | |
91 | ||
92 | while (*p) { | |
93 | parent = *p; | |
94 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
95 | BUG_ON(!buffer->free); | |
96 | ||
97 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
98 | ||
99 | if (new_buffer_size < buffer_size) | |
100 | p = &parent->rb_left; | |
101 | else | |
102 | p = &parent->rb_right; | |
103 | } | |
104 | rb_link_node(&new_buffer->rb_node, parent, p); | |
105 | rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); | |
106 | } | |
107 | ||
108 | static void binder_insert_allocated_buffer_locked( | |
109 | struct binder_alloc *alloc, struct binder_buffer *new_buffer) | |
110 | { | |
111 | struct rb_node **p = &alloc->allocated_buffers.rb_node; | |
112 | struct rb_node *parent = NULL; | |
113 | struct binder_buffer *buffer; | |
114 | ||
115 | BUG_ON(new_buffer->free); | |
116 | ||
117 | while (*p) { | |
118 | parent = *p; | |
119 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
120 | BUG_ON(buffer->free); | |
121 | ||
74310e06 | 122 | if (new_buffer->data < buffer->data) |
0c972a05 | 123 | p = &parent->rb_left; |
74310e06 | 124 | else if (new_buffer->data > buffer->data) |
0c972a05 TK |
125 | p = &parent->rb_right; |
126 | else | |
127 | BUG(); | |
128 | } | |
129 | rb_link_node(&new_buffer->rb_node, parent, p); | |
130 | rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); | |
131 | } | |
132 | ||
53d311cf | 133 | static struct binder_buffer *binder_alloc_prepare_to_free_locked( |
0c972a05 TK |
134 | struct binder_alloc *alloc, |
135 | uintptr_t user_ptr) | |
136 | { | |
137 | struct rb_node *n = alloc->allocated_buffers.rb_node; | |
138 | struct binder_buffer *buffer; | |
74310e06 | 139 | void *kern_ptr; |
0c972a05 | 140 | |
74310e06 | 141 | kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); |
0c972a05 TK |
142 | |
143 | while (n) { | |
144 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
145 | BUG_ON(buffer->free); | |
146 | ||
74310e06 | 147 | if (kern_ptr < buffer->data) |
0c972a05 | 148 | n = n->rb_left; |
74310e06 | 149 | else if (kern_ptr > buffer->data) |
0c972a05 | 150 | n = n->rb_right; |
53d311cf TK |
151 | else { |
152 | /* | |
153 | * Guard against user threads attempting to | |
154 | * free the buffer twice | |
155 | */ | |
156 | if (buffer->free_in_progress) { | |
128f3804 SY |
157 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
158 | "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", | |
159 | alloc->pid, current->pid, | |
160 | (u64)user_ptr); | |
53d311cf TK |
161 | return NULL; |
162 | } | |
163 | buffer->free_in_progress = 1; | |
0c972a05 | 164 | return buffer; |
53d311cf | 165 | } |
0c972a05 TK |
166 | } |
167 | return NULL; | |
168 | } | |
169 | ||
170 | /** | |
171 | * binder_alloc_buffer_lookup() - get buffer given user ptr | |
172 | * @alloc: binder_alloc for this proc | |
173 | * @user_ptr: User pointer to buffer data | |
174 | * | |
175 | * Validate userspace pointer to buffer data and return buffer corresponding to | |
176 | * that user pointer. Search the rb tree for buffer that matches user data | |
177 | * pointer. | |
178 | * | |
179 | * Return: Pointer to buffer or NULL | |
180 | */ | |
53d311cf TK |
181 | struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, |
182 | uintptr_t user_ptr) | |
0c972a05 TK |
183 | { |
184 | struct binder_buffer *buffer; | |
185 | ||
186 | mutex_lock(&alloc->mutex); | |
53d311cf | 187 | buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); |
0c972a05 TK |
188 | mutex_unlock(&alloc->mutex); |
189 | return buffer; | |
190 | } | |
191 | ||
192 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |
6ae33b9c | 193 | void *start, void *end) |
0c972a05 TK |
194 | { |
195 | void *page_addr; | |
196 | unsigned long user_page_addr; | |
f2517eb7 | 197 | struct binder_lru_page *page; |
6ae33b9c | 198 | struct vm_area_struct *vma = NULL; |
f2517eb7 SY |
199 | struct mm_struct *mm = NULL; |
200 | bool need_mm = false; | |
0c972a05 TK |
201 | |
202 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
203 | "%d: %s pages %pK-%pK\n", alloc->pid, | |
204 | allocate ? "allocate" : "free", start, end); | |
205 | ||
206 | if (end <= start) | |
207 | return 0; | |
208 | ||
209 | trace_binder_update_page_range(alloc, allocate, start, end); | |
210 | ||
f2517eb7 SY |
211 | if (allocate == 0) |
212 | goto free_range; | |
213 | ||
214 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
215 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; | |
216 | if (!page->page_ptr) { | |
217 | need_mm = true; | |
218 | break; | |
219 | } | |
220 | } | |
221 | ||
6fbf248a | 222 | if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) |
a0c2baaf | 223 | mm = alloc->vma_vm_mm; |
0c972a05 TK |
224 | |
225 | if (mm) { | |
720c2419 | 226 | down_read(&mm->mmap_sem); |
0c972a05 | 227 | vma = alloc->vma; |
0c972a05 TK |
228 | } |
229 | ||
f2517eb7 | 230 | if (!vma && need_mm) { |
128f3804 SY |
231 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
232 | "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", | |
233 | alloc->pid); | |
0c972a05 TK |
234 | goto err_no_vma; |
235 | } | |
236 | ||
237 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
238 | int ret; | |
f2517eb7 | 239 | bool on_lru; |
e41e164c | 240 | size_t index; |
0c972a05 | 241 | |
e41e164c SY |
242 | index = (page_addr - alloc->buffer) / PAGE_SIZE; |
243 | page = &alloc->pages[index]; | |
0c972a05 | 244 | |
f2517eb7 | 245 | if (page->page_ptr) { |
e41e164c SY |
246 | trace_binder_alloc_lru_start(alloc, index); |
247 | ||
f2517eb7 SY |
248 | on_lru = list_lru_del(&binder_alloc_lru, &page->lru); |
249 | WARN_ON(!on_lru); | |
e41e164c SY |
250 | |
251 | trace_binder_alloc_lru_end(alloc, index); | |
f2517eb7 SY |
252 | continue; |
253 | } | |
254 | ||
255 | if (WARN_ON(!vma)) | |
256 | goto err_page_ptr_cleared; | |
257 | ||
e41e164c | 258 | trace_binder_alloc_page_start(alloc, index); |
f2517eb7 SY |
259 | page->page_ptr = alloc_page(GFP_KERNEL | |
260 | __GFP_HIGHMEM | | |
261 | __GFP_ZERO); | |
262 | if (!page->page_ptr) { | |
0c972a05 TK |
263 | pr_err("%d: binder_alloc_buf failed for page at %pK\n", |
264 | alloc->pid, page_addr); | |
265 | goto err_alloc_page_failed; | |
266 | } | |
f2517eb7 SY |
267 | page->alloc = alloc; |
268 | INIT_LIST_HEAD(&page->lru); | |
269 | ||
0c972a05 | 270 | ret = map_kernel_range_noflush((unsigned long)page_addr, |
f2517eb7 SY |
271 | PAGE_SIZE, PAGE_KERNEL, |
272 | &page->page_ptr); | |
0c972a05 TK |
273 | flush_cache_vmap((unsigned long)page_addr, |
274 | (unsigned long)page_addr + PAGE_SIZE); | |
275 | if (ret != 1) { | |
276 | pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", | |
277 | alloc->pid, page_addr); | |
278 | goto err_map_kernel_failed; | |
279 | } | |
280 | user_page_addr = | |
281 | (uintptr_t)page_addr + alloc->user_buffer_offset; | |
f2517eb7 | 282 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); |
0c972a05 TK |
283 | if (ret) { |
284 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", | |
285 | alloc->pid, user_page_addr); | |
286 | goto err_vm_insert_page_failed; | |
287 | } | |
e41e164c | 288 | |
8d9a3ab6 MC |
289 | if (index + 1 > alloc->pages_high) |
290 | alloc->pages_high = index + 1; | |
291 | ||
e41e164c | 292 | trace_binder_alloc_page_end(alloc, index); |
0c972a05 TK |
293 | /* vm_insert_page does not seem to increment the refcount */ |
294 | } | |
295 | if (mm) { | |
720c2419 | 296 | up_read(&mm->mmap_sem); |
0c972a05 TK |
297 | mmput(mm); |
298 | } | |
299 | return 0; | |
300 | ||
301 | free_range: | |
302 | for (page_addr = end - PAGE_SIZE; page_addr >= start; | |
303 | page_addr -= PAGE_SIZE) { | |
f2517eb7 | 304 | bool ret; |
e41e164c | 305 | size_t index; |
f2517eb7 | 306 | |
e41e164c SY |
307 | index = (page_addr - alloc->buffer) / PAGE_SIZE; |
308 | page = &alloc->pages[index]; | |
309 | ||
310 | trace_binder_free_lru_start(alloc, index); | |
f2517eb7 SY |
311 | |
312 | ret = list_lru_add(&binder_alloc_lru, &page->lru); | |
313 | WARN_ON(!ret); | |
e41e164c SY |
314 | |
315 | trace_binder_free_lru_end(alloc, index); | |
f2517eb7 SY |
316 | continue; |
317 | ||
0c972a05 TK |
318 | err_vm_insert_page_failed: |
319 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | |
320 | err_map_kernel_failed: | |
f2517eb7 SY |
321 | __free_page(page->page_ptr); |
322 | page->page_ptr = NULL; | |
0c972a05 | 323 | err_alloc_page_failed: |
f2517eb7 | 324 | err_page_ptr_cleared: |
0c972a05 TK |
325 | ; |
326 | } | |
327 | err_no_vma: | |
328 | if (mm) { | |
720c2419 | 329 | up_read(&mm->mmap_sem); |
0c972a05 TK |
330 | mmput(mm); |
331 | } | |
57ada2fb | 332 | return vma ? -ENOMEM : -ESRCH; |
0c972a05 TK |
333 | } |
334 | ||
da1b9564 MK |
335 | |
336 | static inline void binder_alloc_set_vma(struct binder_alloc *alloc, | |
337 | struct vm_area_struct *vma) | |
338 | { | |
339 | if (vma) | |
340 | alloc->vma_vm_mm = vma->vm_mm; | |
341 | /* | |
342 | * If we see alloc->vma is not NULL, buffer data structures set up | |
343 | * completely. Look at smp_rmb side binder_alloc_get_vma. | |
344 | * We also want to guarantee new alloc->vma_vm_mm is always visible | |
345 | * if alloc->vma is set. | |
346 | */ | |
347 | smp_wmb(); | |
348 | alloc->vma = vma; | |
349 | } | |
350 | ||
351 | static inline struct vm_area_struct *binder_alloc_get_vma( | |
352 | struct binder_alloc *alloc) | |
353 | { | |
354 | struct vm_area_struct *vma = NULL; | |
355 | ||
356 | if (alloc->vma) { | |
357 | /* Look at description in binder_alloc_set_vma */ | |
358 | smp_rmb(); | |
359 | vma = alloc->vma; | |
360 | } | |
361 | return vma; | |
362 | } | |
363 | ||
3f827245 XS |
364 | static struct binder_buffer *binder_alloc_new_buf_locked( |
365 | struct binder_alloc *alloc, | |
366 | size_t data_size, | |
367 | size_t offsets_size, | |
368 | size_t extra_buffers_size, | |
369 | int is_async) | |
0c972a05 TK |
370 | { |
371 | struct rb_node *n = alloc->free_buffers.rb_node; | |
372 | struct binder_buffer *buffer; | |
373 | size_t buffer_size; | |
374 | struct rb_node *best_fit = NULL; | |
375 | void *has_page_addr; | |
376 | void *end_page_addr; | |
377 | size_t size, data_offsets_size; | |
57ada2fb | 378 | int ret; |
0c972a05 | 379 | |
da1b9564 | 380 | if (!binder_alloc_get_vma(alloc)) { |
128f3804 SY |
381 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
382 | "%d: binder_alloc_buf, no vma\n", | |
383 | alloc->pid); | |
57ada2fb | 384 | return ERR_PTR(-ESRCH); |
0c972a05 TK |
385 | } |
386 | ||
387 | data_offsets_size = ALIGN(data_size, sizeof(void *)) + | |
388 | ALIGN(offsets_size, sizeof(void *)); | |
389 | ||
390 | if (data_offsets_size < data_size || data_offsets_size < offsets_size) { | |
391 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
392 | "%d: got transaction with invalid size %zd-%zd\n", | |
393 | alloc->pid, data_size, offsets_size); | |
57ada2fb | 394 | return ERR_PTR(-EINVAL); |
0c972a05 TK |
395 | } |
396 | size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); | |
397 | if (size < data_offsets_size || size < extra_buffers_size) { | |
398 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
399 | "%d: got transaction with invalid extra_buffers_size %zd\n", | |
400 | alloc->pid, extra_buffers_size); | |
57ada2fb | 401 | return ERR_PTR(-EINVAL); |
0c972a05 TK |
402 | } |
403 | if (is_async && | |
404 | alloc->free_async_space < size + sizeof(struct binder_buffer)) { | |
405 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
406 | "%d: binder_alloc_buf size %zd failed, no async space left\n", | |
407 | alloc->pid, size); | |
57ada2fb | 408 | return ERR_PTR(-ENOSPC); |
0c972a05 TK |
409 | } |
410 | ||
74310e06 SY |
411 | /* Pad 0-size buffers so they get assigned unique addresses */ |
412 | size = max(size, sizeof(void *)); | |
413 | ||
0c972a05 TK |
414 | while (n) { |
415 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
416 | BUG_ON(!buffer->free); | |
417 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
418 | ||
419 | if (size < buffer_size) { | |
420 | best_fit = n; | |
421 | n = n->rb_left; | |
422 | } else if (size > buffer_size) | |
423 | n = n->rb_right; | |
424 | else { | |
425 | best_fit = n; | |
426 | break; | |
427 | } | |
428 | } | |
429 | if (best_fit == NULL) { | |
b05a68e9 MC |
430 | size_t allocated_buffers = 0; |
431 | size_t largest_alloc_size = 0; | |
432 | size_t total_alloc_size = 0; | |
433 | size_t free_buffers = 0; | |
434 | size_t largest_free_size = 0; | |
435 | size_t total_free_size = 0; | |
436 | ||
437 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; | |
438 | n = rb_next(n)) { | |
439 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
440 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
441 | allocated_buffers++; | |
442 | total_alloc_size += buffer_size; | |
443 | if (buffer_size > largest_alloc_size) | |
444 | largest_alloc_size = buffer_size; | |
445 | } | |
446 | for (n = rb_first(&alloc->free_buffers); n != NULL; | |
447 | n = rb_next(n)) { | |
448 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
449 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
450 | free_buffers++; | |
451 | total_free_size += buffer_size; | |
452 | if (buffer_size > largest_free_size) | |
453 | largest_free_size = buffer_size; | |
454 | } | |
128f3804 SY |
455 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
456 | "%d: binder_alloc_buf size %zd failed, no address space\n", | |
457 | alloc->pid, size); | |
458 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, | |
459 | "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", | |
460 | total_alloc_size, allocated_buffers, | |
461 | largest_alloc_size, total_free_size, | |
462 | free_buffers, largest_free_size); | |
57ada2fb | 463 | return ERR_PTR(-ENOSPC); |
0c972a05 TK |
464 | } |
465 | if (n == NULL) { | |
466 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); | |
467 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
468 | } | |
469 | ||
470 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
471 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", | |
472 | alloc->pid, size, buffer, buffer_size); | |
473 | ||
474 | has_page_addr = | |
475 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); | |
74310e06 | 476 | WARN_ON(n && buffer_size != size); |
0c972a05 | 477 | end_page_addr = |
74310e06 | 478 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); |
0c972a05 TK |
479 | if (end_page_addr > has_page_addr) |
480 | end_page_addr = has_page_addr; | |
57ada2fb | 481 | ret = binder_update_page_range(alloc, 1, |
6ae33b9c | 482 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); |
57ada2fb TK |
483 | if (ret) |
484 | return ERR_PTR(ret); | |
0c972a05 | 485 | |
0c972a05 | 486 | if (buffer_size != size) { |
74310e06 | 487 | struct binder_buffer *new_buffer; |
0c972a05 | 488 | |
74310e06 SY |
489 | new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
490 | if (!new_buffer) { | |
491 | pr_err("%s: %d failed to alloc new buffer struct\n", | |
492 | __func__, alloc->pid); | |
493 | goto err_alloc_buf_struct_failed; | |
494 | } | |
495 | new_buffer->data = (u8 *)buffer->data + size; | |
0c972a05 TK |
496 | list_add(&new_buffer->entry, &buffer->entry); |
497 | new_buffer->free = 1; | |
498 | binder_insert_free_buffer(alloc, new_buffer); | |
499 | } | |
74310e06 SY |
500 | |
501 | rb_erase(best_fit, &alloc->free_buffers); | |
502 | buffer->free = 0; | |
503 | buffer->free_in_progress = 0; | |
504 | binder_insert_allocated_buffer_locked(alloc, buffer); | |
0c972a05 TK |
505 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
506 | "%d: binder_alloc_buf size %zd got %pK\n", | |
507 | alloc->pid, size, buffer); | |
508 | buffer->data_size = data_size; | |
509 | buffer->offsets_size = offsets_size; | |
510 | buffer->async_transaction = is_async; | |
511 | buffer->extra_buffers_size = extra_buffers_size; | |
512 | if (is_async) { | |
513 | alloc->free_async_space -= size + sizeof(struct binder_buffer); | |
514 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
515 | "%d: binder_alloc_buf size %zd async free %zd\n", | |
516 | alloc->pid, size, alloc->free_async_space); | |
517 | } | |
518 | return buffer; | |
74310e06 SY |
519 | |
520 | err_alloc_buf_struct_failed: | |
521 | binder_update_page_range(alloc, 0, | |
522 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | |
6ae33b9c | 523 | end_page_addr); |
74310e06 | 524 | return ERR_PTR(-ENOMEM); |
0c972a05 TK |
525 | } |
526 | ||
527 | /** | |
528 | * binder_alloc_new_buf() - Allocate a new binder buffer | |
529 | * @alloc: binder_alloc for this proc | |
530 | * @data_size: size of user data buffer | |
531 | * @offsets_size: user specified buffer offset | |
532 | * @extra_buffers_size: size of extra space for meta-data (eg, security context) | |
533 | * @is_async: buffer for async transaction | |
534 | * | |
535 | * Allocate a new buffer given the requested sizes. Returns | |
536 | * the kernel version of the buffer pointer. The size allocated | |
537 | * is the sum of the three given sizes (each rounded up to | |
538 | * pointer-sized boundary) | |
539 | * | |
540 | * Return: The allocated buffer or %NULL if error | |
541 | */ | |
542 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, | |
543 | size_t data_size, | |
544 | size_t offsets_size, | |
545 | size_t extra_buffers_size, | |
546 | int is_async) | |
547 | { | |
548 | struct binder_buffer *buffer; | |
549 | ||
550 | mutex_lock(&alloc->mutex); | |
551 | buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, | |
552 | extra_buffers_size, is_async); | |
553 | mutex_unlock(&alloc->mutex); | |
554 | return buffer; | |
555 | } | |
556 | ||
557 | static void *buffer_start_page(struct binder_buffer *buffer) | |
558 | { | |
74310e06 | 559 | return (void *)((uintptr_t)buffer->data & PAGE_MASK); |
0c972a05 TK |
560 | } |
561 | ||
74310e06 | 562 | static void *prev_buffer_end_page(struct binder_buffer *buffer) |
0c972a05 | 563 | { |
74310e06 | 564 | return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); |
0c972a05 TK |
565 | } |
566 | ||
567 | static void binder_delete_free_buffer(struct binder_alloc *alloc, | |
568 | struct binder_buffer *buffer) | |
569 | { | |
570 | struct binder_buffer *prev, *next = NULL; | |
74310e06 | 571 | bool to_free = true; |
0c972a05 | 572 | BUG_ON(alloc->buffers.next == &buffer->entry); |
e2176219 | 573 | prev = binder_buffer_prev(buffer); |
0c972a05 | 574 | BUG_ON(!prev->free); |
74310e06 SY |
575 | if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { |
576 | to_free = false; | |
0c972a05 | 577 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
74310e06 SY |
578 | "%d: merge free, buffer %pK share page with %pK\n", |
579 | alloc->pid, buffer->data, prev->data); | |
0c972a05 TK |
580 | } |
581 | ||
582 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
e2176219 | 583 | next = binder_buffer_next(buffer); |
74310e06 SY |
584 | if (buffer_start_page(next) == buffer_start_page(buffer)) { |
585 | to_free = false; | |
0c972a05 | 586 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
74310e06 SY |
587 | "%d: merge free, buffer %pK share page with %pK\n", |
588 | alloc->pid, | |
589 | buffer->data, | |
590 | next->data); | |
0c972a05 TK |
591 | } |
592 | } | |
74310e06 SY |
593 | |
594 | if (PAGE_ALIGNED(buffer->data)) { | |
595 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
596 | "%d: merge free, buffer start %pK is page aligned\n", | |
597 | alloc->pid, buffer->data); | |
598 | to_free = false; | |
599 | } | |
600 | ||
601 | if (to_free) { | |
0c972a05 | 602 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
74310e06 SY |
603 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", |
604 | alloc->pid, buffer->data, | |
ae65c851 | 605 | prev->data, next ? next->data : NULL); |
74310e06 | 606 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), |
6ae33b9c | 607 | buffer_start_page(buffer) + PAGE_SIZE); |
0c972a05 | 608 | } |
74310e06 SY |
609 | list_del(&buffer->entry); |
610 | kfree(buffer); | |
0c972a05 TK |
611 | } |
612 | ||
613 | static void binder_free_buf_locked(struct binder_alloc *alloc, | |
614 | struct binder_buffer *buffer) | |
615 | { | |
616 | size_t size, buffer_size; | |
617 | ||
618 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
619 | ||
620 | size = ALIGN(buffer->data_size, sizeof(void *)) + | |
621 | ALIGN(buffer->offsets_size, sizeof(void *)) + | |
622 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); | |
623 | ||
624 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
625 | "%d: binder_free_buf %pK size %zd buffer_size %zd\n", | |
626 | alloc->pid, buffer, size, buffer_size); | |
627 | ||
628 | BUG_ON(buffer->free); | |
629 | BUG_ON(size > buffer_size); | |
630 | BUG_ON(buffer->transaction != NULL); | |
74310e06 SY |
631 | BUG_ON(buffer->data < alloc->buffer); |
632 | BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); | |
0c972a05 TK |
633 | |
634 | if (buffer->async_transaction) { | |
635 | alloc->free_async_space += size + sizeof(struct binder_buffer); | |
636 | ||
637 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
638 | "%d: binder_free_buf size %zd async free %zd\n", | |
639 | alloc->pid, size, alloc->free_async_space); | |
640 | } | |
641 | ||
642 | binder_update_page_range(alloc, 0, | |
643 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | |
6ae33b9c | 644 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); |
0c972a05 TK |
645 | |
646 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); | |
647 | buffer->free = 1; | |
648 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
e2176219 | 649 | struct binder_buffer *next = binder_buffer_next(buffer); |
0c972a05 TK |
650 | |
651 | if (next->free) { | |
652 | rb_erase(&next->rb_node, &alloc->free_buffers); | |
653 | binder_delete_free_buffer(alloc, next); | |
654 | } | |
655 | } | |
656 | if (alloc->buffers.next != &buffer->entry) { | |
e2176219 | 657 | struct binder_buffer *prev = binder_buffer_prev(buffer); |
0c972a05 TK |
658 | |
659 | if (prev->free) { | |
660 | binder_delete_free_buffer(alloc, buffer); | |
661 | rb_erase(&prev->rb_node, &alloc->free_buffers); | |
662 | buffer = prev; | |
663 | } | |
664 | } | |
665 | binder_insert_free_buffer(alloc, buffer); | |
666 | } | |
667 | ||
668 | /** | |
669 | * binder_alloc_free_buf() - free a binder buffer | |
670 | * @alloc: binder_alloc for this proc | |
671 | * @buffer: kernel pointer to buffer | |
672 | * | |
673 | * Free the buffer allocated via binder_alloc_new_buffer() | |
674 | */ | |
675 | void binder_alloc_free_buf(struct binder_alloc *alloc, | |
676 | struct binder_buffer *buffer) | |
677 | { | |
678 | mutex_lock(&alloc->mutex); | |
679 | binder_free_buf_locked(alloc, buffer); | |
680 | mutex_unlock(&alloc->mutex); | |
681 | } | |
682 | ||
683 | /** | |
684 | * binder_alloc_mmap_handler() - map virtual address space for proc | |
685 | * @alloc: alloc structure for this proc | |
686 | * @vma: vma passed to mmap() | |
687 | * | |
688 | * Called by binder_mmap() to initialize the space specified in | |
689 | * vma for allocating binder buffers | |
690 | * | |
691 | * Return: | |
692 | * 0 = success | |
693 | * -EBUSY = address space already mapped | |
694 | * -ENOMEM = failed to map memory to given address space | |
695 | */ | |
696 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |
697 | struct vm_area_struct *vma) | |
698 | { | |
699 | int ret; | |
700 | struct vm_struct *area; | |
701 | const char *failure_string; | |
702 | struct binder_buffer *buffer; | |
703 | ||
704 | mutex_lock(&binder_alloc_mmap_lock); | |
705 | if (alloc->buffer) { | |
706 | ret = -EBUSY; | |
707 | failure_string = "already mapped"; | |
708 | goto err_already_mapped; | |
709 | } | |
710 | ||
aac6830e | 711 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); |
0c972a05 TK |
712 | if (area == NULL) { |
713 | ret = -ENOMEM; | |
714 | failure_string = "get_vm_area"; | |
715 | goto err_get_vm_area_failed; | |
716 | } | |
717 | alloc->buffer = area->addr; | |
718 | alloc->user_buffer_offset = | |
719 | vma->vm_start - (uintptr_t)alloc->buffer; | |
720 | mutex_unlock(&binder_alloc_mmap_lock); | |
721 | ||
722 | #ifdef CONFIG_CPU_CACHE_VIPT | |
723 | if (cache_is_vipt_aliasing()) { | |
724 | while (CACHE_COLOUR( | |
725 | (vma->vm_start ^ (uint32_t)alloc->buffer))) { | |
726 | pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", | |
727 | __func__, alloc->pid, vma->vm_start, | |
728 | vma->vm_end, alloc->buffer); | |
729 | vma->vm_start += PAGE_SIZE; | |
730 | } | |
731 | } | |
732 | #endif | |
6396bb22 KC |
733 | alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, |
734 | sizeof(alloc->pages[0]), | |
0c972a05 TK |
735 | GFP_KERNEL); |
736 | if (alloc->pages == NULL) { | |
737 | ret = -ENOMEM; | |
738 | failure_string = "alloc page array"; | |
739 | goto err_alloc_pages_failed; | |
740 | } | |
741 | alloc->buffer_size = vma->vm_end - vma->vm_start; | |
742 | ||
74310e06 SY |
743 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
744 | if (!buffer) { | |
0c972a05 | 745 | ret = -ENOMEM; |
74310e06 SY |
746 | failure_string = "alloc buffer struct"; |
747 | goto err_alloc_buf_struct_failed; | |
0c972a05 | 748 | } |
74310e06 SY |
749 | |
750 | buffer->data = alloc->buffer; | |
0c972a05 TK |
751 | list_add(&buffer->entry, &alloc->buffers); |
752 | buffer->free = 1; | |
753 | binder_insert_free_buffer(alloc, buffer); | |
754 | alloc->free_async_space = alloc->buffer_size / 2; | |
da1b9564 | 755 | binder_alloc_set_vma(alloc, vma); |
a0c2baaf | 756 | mmgrab(alloc->vma_vm_mm); |
0c972a05 TK |
757 | |
758 | return 0; | |
759 | ||
74310e06 | 760 | err_alloc_buf_struct_failed: |
0c972a05 TK |
761 | kfree(alloc->pages); |
762 | alloc->pages = NULL; | |
763 | err_alloc_pages_failed: | |
764 | mutex_lock(&binder_alloc_mmap_lock); | |
765 | vfree(alloc->buffer); | |
766 | alloc->buffer = NULL; | |
767 | err_get_vm_area_failed: | |
768 | err_already_mapped: | |
769 | mutex_unlock(&binder_alloc_mmap_lock); | |
128f3804 SY |
770 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
771 | "%s: %d %lx-%lx %s failed %d\n", __func__, | |
772 | alloc->pid, vma->vm_start, vma->vm_end, | |
773 | failure_string, ret); | |
0c972a05 TK |
774 | return ret; |
775 | } | |
776 | ||
777 | ||
778 | void binder_alloc_deferred_release(struct binder_alloc *alloc) | |
779 | { | |
780 | struct rb_node *n; | |
781 | int buffers, page_count; | |
74310e06 | 782 | struct binder_buffer *buffer; |
0c972a05 | 783 | |
0c972a05 TK |
784 | buffers = 0; |
785 | mutex_lock(&alloc->mutex); | |
da1b9564 MK |
786 | BUG_ON(alloc->vma); |
787 | ||
0c972a05 | 788 | while ((n = rb_first(&alloc->allocated_buffers))) { |
0c972a05 TK |
789 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
790 | ||
791 | /* Transaction should already have been freed */ | |
792 | BUG_ON(buffer->transaction); | |
793 | ||
794 | binder_free_buf_locked(alloc, buffer); | |
795 | buffers++; | |
796 | } | |
797 | ||
74310e06 SY |
798 | while (!list_empty(&alloc->buffers)) { |
799 | buffer = list_first_entry(&alloc->buffers, | |
800 | struct binder_buffer, entry); | |
801 | WARN_ON(!buffer->free); | |
802 | ||
803 | list_del(&buffer->entry); | |
804 | WARN_ON_ONCE(!list_empty(&alloc->buffers)); | |
805 | kfree(buffer); | |
806 | } | |
807 | ||
0c972a05 TK |
808 | page_count = 0; |
809 | if (alloc->pages) { | |
810 | int i; | |
811 | ||
812 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
813 | void *page_addr; | |
f2517eb7 | 814 | bool on_lru; |
0c972a05 | 815 | |
f2517eb7 | 816 | if (!alloc->pages[i].page_ptr) |
0c972a05 TK |
817 | continue; |
818 | ||
f2517eb7 SY |
819 | on_lru = list_lru_del(&binder_alloc_lru, |
820 | &alloc->pages[i].lru); | |
0c972a05 TK |
821 | page_addr = alloc->buffer + i * PAGE_SIZE; |
822 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
f2517eb7 SY |
823 | "%s: %d: page %d at %pK %s\n", |
824 | __func__, alloc->pid, i, page_addr, | |
825 | on_lru ? "on lru" : "active"); | |
0c972a05 | 826 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
f2517eb7 | 827 | __free_page(alloc->pages[i].page_ptr); |
0c972a05 TK |
828 | page_count++; |
829 | } | |
830 | kfree(alloc->pages); | |
831 | vfree(alloc->buffer); | |
832 | } | |
833 | mutex_unlock(&alloc->mutex); | |
a0c2baaf SY |
834 | if (alloc->vma_vm_mm) |
835 | mmdrop(alloc->vma_vm_mm); | |
0c972a05 TK |
836 | |
837 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, | |
838 | "%s: %d buffers %d, pages %d\n", | |
839 | __func__, alloc->pid, buffers, page_count); | |
840 | } | |
841 | ||
842 | static void print_binder_buffer(struct seq_file *m, const char *prefix, | |
843 | struct binder_buffer *buffer) | |
844 | { | |
b05a68e9 | 845 | seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", |
0c972a05 TK |
846 | prefix, buffer->debug_id, buffer->data, |
847 | buffer->data_size, buffer->offsets_size, | |
b05a68e9 | 848 | buffer->extra_buffers_size, |
0c972a05 TK |
849 | buffer->transaction ? "active" : "delivered"); |
850 | } | |
851 | ||
852 | /** | |
853 | * binder_alloc_print_allocated() - print buffer info | |
854 | * @m: seq_file for output via seq_printf() | |
855 | * @alloc: binder_alloc for this proc | |
856 | * | |
857 | * Prints information about every buffer associated with | |
858 | * the binder_alloc state to the given seq_file | |
859 | */ | |
860 | void binder_alloc_print_allocated(struct seq_file *m, | |
861 | struct binder_alloc *alloc) | |
862 | { | |
863 | struct rb_node *n; | |
864 | ||
865 | mutex_lock(&alloc->mutex); | |
866 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
867 | print_binder_buffer(m, " buffer", | |
868 | rb_entry(n, struct binder_buffer, rb_node)); | |
869 | mutex_unlock(&alloc->mutex); | |
870 | } | |
871 | ||
8ef4665a SY |
872 | /** |
873 | * binder_alloc_print_pages() - print page usage | |
874 | * @m: seq_file for output via seq_printf() | |
875 | * @alloc: binder_alloc for this proc | |
876 | */ | |
877 | void binder_alloc_print_pages(struct seq_file *m, | |
878 | struct binder_alloc *alloc) | |
879 | { | |
880 | struct binder_lru_page *page; | |
881 | int i; | |
882 | int active = 0; | |
883 | int lru = 0; | |
884 | int free = 0; | |
885 | ||
886 | mutex_lock(&alloc->mutex); | |
887 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
888 | page = &alloc->pages[i]; | |
889 | if (!page->page_ptr) | |
890 | free++; | |
891 | else if (list_empty(&page->lru)) | |
892 | active++; | |
893 | else | |
894 | lru++; | |
895 | } | |
896 | mutex_unlock(&alloc->mutex); | |
897 | seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); | |
8d9a3ab6 | 898 | seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); |
8ef4665a SY |
899 | } |
900 | ||
0c972a05 TK |
901 | /** |
902 | * binder_alloc_get_allocated_count() - return count of buffers | |
903 | * @alloc: binder_alloc for this proc | |
904 | * | |
905 | * Return: count of allocated buffers | |
906 | */ | |
907 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |
908 | { | |
909 | struct rb_node *n; | |
910 | int count = 0; | |
911 | ||
912 | mutex_lock(&alloc->mutex); | |
913 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
914 | count++; | |
915 | mutex_unlock(&alloc->mutex); | |
916 | return count; | |
917 | } | |
918 | ||
919 | ||
920 | /** | |
921 | * binder_alloc_vma_close() - invalidate address space | |
922 | * @alloc: binder_alloc for this proc | |
923 | * | |
924 | * Called from binder_vma_close() when releasing address space. | |
925 | * Clears alloc->vma to prevent new incoming transactions from | |
926 | * allocating more buffers. | |
927 | */ | |
928 | void binder_alloc_vma_close(struct binder_alloc *alloc) | |
929 | { | |
da1b9564 | 930 | binder_alloc_set_vma(alloc, NULL); |
0c972a05 TK |
931 | } |
932 | ||
f2517eb7 SY |
933 | /** |
934 | * binder_alloc_free_page() - shrinker callback to free pages | |
935 | * @item: item to free | |
936 | * @lock: lock protecting the item | |
937 | * @cb_arg: callback argument | |
938 | * | |
939 | * Called from list_lru_walk() in binder_shrink_scan() to free | |
940 | * up pages when the system is under memory pressure. | |
941 | */ | |
942 | enum lru_status binder_alloc_free_page(struct list_head *item, | |
943 | struct list_lru_one *lru, | |
944 | spinlock_t *lock, | |
945 | void *cb_arg) | |
946 | { | |
947 | struct mm_struct *mm = NULL; | |
948 | struct binder_lru_page *page = container_of(item, | |
949 | struct binder_lru_page, | |
950 | lru); | |
951 | struct binder_alloc *alloc; | |
952 | uintptr_t page_addr; | |
953 | size_t index; | |
a1b2289c | 954 | struct vm_area_struct *vma; |
f2517eb7 SY |
955 | |
956 | alloc = page->alloc; | |
957 | if (!mutex_trylock(&alloc->mutex)) | |
958 | goto err_get_alloc_mutex_failed; | |
959 | ||
960 | if (!page->page_ptr) | |
961 | goto err_page_already_freed; | |
962 | ||
963 | index = page - alloc->pages; | |
964 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | |
da1b9564 | 965 | vma = binder_alloc_get_vma(alloc); |
a1b2289c | 966 | if (vma) { |
a0c2baaf SY |
967 | if (!mmget_not_zero(alloc->vma_vm_mm)) |
968 | goto err_mmget; | |
969 | mm = alloc->vma_vm_mm; | |
f2517eb7 SY |
970 | if (!down_write_trylock(&mm->mmap_sem)) |
971 | goto err_down_write_mmap_sem_failed; | |
a1b2289c SY |
972 | } |
973 | ||
974 | list_lru_isolate(lru, item); | |
975 | spin_unlock(lock); | |
f2517eb7 | 976 | |
a1b2289c | 977 | if (vma) { |
e41e164c SY |
978 | trace_binder_unmap_user_start(alloc, index); |
979 | ||
a1b2289c | 980 | zap_page_range(vma, |
f2517eb7 SY |
981 | page_addr + alloc->user_buffer_offset, |
982 | PAGE_SIZE); | |
983 | ||
e41e164c SY |
984 | trace_binder_unmap_user_end(alloc, index); |
985 | ||
f2517eb7 SY |
986 | up_write(&mm->mmap_sem); |
987 | mmput(mm); | |
988 | } | |
989 | ||
e41e164c SY |
990 | trace_binder_unmap_kernel_start(alloc, index); |
991 | ||
f2517eb7 SY |
992 | unmap_kernel_range(page_addr, PAGE_SIZE); |
993 | __free_page(page->page_ptr); | |
994 | page->page_ptr = NULL; | |
995 | ||
e41e164c SY |
996 | trace_binder_unmap_kernel_end(alloc, index); |
997 | ||
a1b2289c | 998 | spin_lock(lock); |
f2517eb7 | 999 | mutex_unlock(&alloc->mutex); |
a1b2289c | 1000 | return LRU_REMOVED_RETRY; |
f2517eb7 SY |
1001 | |
1002 | err_down_write_mmap_sem_failed: | |
a1b2289c | 1003 | mmput_async(mm); |
a0c2baaf | 1004 | err_mmget: |
f2517eb7 SY |
1005 | err_page_already_freed: |
1006 | mutex_unlock(&alloc->mutex); | |
1007 | err_get_alloc_mutex_failed: | |
1008 | return LRU_SKIP; | |
1009 | } | |
1010 | ||
1011 | static unsigned long | |
1012 | binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
1013 | { | |
1014 | unsigned long ret = list_lru_count(&binder_alloc_lru); | |
1015 | return ret; | |
1016 | } | |
1017 | ||
1018 | static unsigned long | |
1019 | binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1020 | { | |
1021 | unsigned long ret; | |
1022 | ||
1023 | ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, | |
1024 | NULL, sc->nr_to_scan); | |
1025 | return ret; | |
1026 | } | |
1027 | ||
de7bbe3d | 1028 | static struct shrinker binder_shrinker = { |
f2517eb7 SY |
1029 | .count_objects = binder_shrink_count, |
1030 | .scan_objects = binder_shrink_scan, | |
1031 | .seeks = DEFAULT_SEEKS, | |
1032 | }; | |
1033 | ||
0c972a05 TK |
1034 | /** |
1035 | * binder_alloc_init() - called by binder_open() for per-proc initialization | |
1036 | * @alloc: binder_alloc for this proc | |
1037 | * | |
1038 | * Called from binder_open() to initialize binder_alloc fields for | |
1039 | * new binder proc | |
1040 | */ | |
1041 | void binder_alloc_init(struct binder_alloc *alloc) | |
1042 | { | |
0c972a05 TK |
1043 | alloc->pid = current->group_leader->pid; |
1044 | mutex_init(&alloc->mutex); | |
957ccc2b | 1045 | INIT_LIST_HEAD(&alloc->buffers); |
0c972a05 TK |
1046 | } |
1047 | ||
533dfb25 | 1048 | int binder_alloc_shrinker_init(void) |
f2517eb7 | 1049 | { |
533dfb25 TH |
1050 | int ret = list_lru_init(&binder_alloc_lru); |
1051 | ||
1052 | if (ret == 0) { | |
1053 | ret = register_shrinker(&binder_shrinker); | |
1054 | if (ret) | |
1055 | list_lru_destroy(&binder_alloc_lru); | |
1056 | } | |
1057 | return ret; | |
f2517eb7 | 1058 | } |