Commit | Line | Data |
---|---|---|
9c92ab61 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0c972a05 TK |
2 | /* binder_alloc.c |
3 | * | |
4 | * Android IPC Subsystem | |
5 | * | |
6 | * Copyright (C) 2007-2017 Google, Inc. | |
0c972a05 TK |
7 | */ |
8 | ||
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
10 | ||
0c972a05 TK |
11 | #include <linux/list.h> |
12 | #include <linux/sched/mm.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/rtmutex.h> | |
15 | #include <linux/rbtree.h> | |
16 | #include <linux/seq_file.h> | |
17 | #include <linux/vmalloc.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/sched.h> | |
f2517eb7 | 20 | #include <linux/list_lru.h> |
128f3804 | 21 | #include <linux/ratelimit.h> |
1e81c57b | 22 | #include <asm/cacheflush.h> |
1a7c3d9b TK |
23 | #include <linux/uaccess.h> |
24 | #include <linux/highmem.h> | |
45d02f79 | 25 | #include <linux/sizes.h> |
0c972a05 TK |
26 | #include "binder_alloc.h" |
27 | #include "binder_trace.h" | |
28 | ||
f2517eb7 SY |
29 | struct list_lru binder_alloc_lru; |
30 | ||
0c972a05 TK |
31 | static DEFINE_MUTEX(binder_alloc_mmap_lock); |
32 | ||
33 | enum { | |
128f3804 | 34 | BINDER_DEBUG_USER_ERROR = 1U << 0, |
0c972a05 TK |
35 | BINDER_DEBUG_OPEN_CLOSE = 1U << 1, |
36 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, | |
37 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, | |
38 | }; | |
128f3804 | 39 | static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; |
0c972a05 TK |
40 | |
41 | module_param_named(debug_mask, binder_alloc_debug_mask, | |
42 | uint, 0644); | |
43 | ||
44 | #define binder_alloc_debug(mask, x...) \ | |
45 | do { \ | |
46 | if (binder_alloc_debug_mask & mask) \ | |
128f3804 | 47 | pr_info_ratelimited(x); \ |
0c972a05 TK |
48 | } while (0) |
49 | ||
e2176219 SY |
50 | static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) |
51 | { | |
52 | return list_entry(buffer->entry.next, struct binder_buffer, entry); | |
53 | } | |
54 | ||
55 | static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) | |
56 | { | |
57 | return list_entry(buffer->entry.prev, struct binder_buffer, entry); | |
58 | } | |
59 | ||
0c972a05 TK |
60 | static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, |
61 | struct binder_buffer *buffer) | |
62 | { | |
63 | if (list_is_last(&buffer->entry, &alloc->buffers)) | |
bde4a19f TK |
64 | return alloc->buffer + alloc->buffer_size - buffer->user_data; |
65 | return binder_buffer_next(buffer)->user_data - buffer->user_data; | |
0c972a05 TK |
66 | } |
67 | ||
68 | static void binder_insert_free_buffer(struct binder_alloc *alloc, | |
69 | struct binder_buffer *new_buffer) | |
70 | { | |
71 | struct rb_node **p = &alloc->free_buffers.rb_node; | |
72 | struct rb_node *parent = NULL; | |
73 | struct binder_buffer *buffer; | |
74 | size_t buffer_size; | |
75 | size_t new_buffer_size; | |
76 | ||
77 | BUG_ON(!new_buffer->free); | |
78 | ||
79 | new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); | |
80 | ||
81 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
82 | "%d: add free buffer, size %zd, at %pK\n", | |
83 | alloc->pid, new_buffer_size, new_buffer); | |
84 | ||
85 | while (*p) { | |
86 | parent = *p; | |
87 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
88 | BUG_ON(!buffer->free); | |
89 | ||
90 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
91 | ||
92 | if (new_buffer_size < buffer_size) | |
93 | p = &parent->rb_left; | |
94 | else | |
95 | p = &parent->rb_right; | |
96 | } | |
97 | rb_link_node(&new_buffer->rb_node, parent, p); | |
98 | rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); | |
99 | } | |
100 | ||
101 | static void binder_insert_allocated_buffer_locked( | |
102 | struct binder_alloc *alloc, struct binder_buffer *new_buffer) | |
103 | { | |
104 | struct rb_node **p = &alloc->allocated_buffers.rb_node; | |
105 | struct rb_node *parent = NULL; | |
106 | struct binder_buffer *buffer; | |
107 | ||
108 | BUG_ON(new_buffer->free); | |
109 | ||
110 | while (*p) { | |
111 | parent = *p; | |
112 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
113 | BUG_ON(buffer->free); | |
114 | ||
bde4a19f | 115 | if (new_buffer->user_data < buffer->user_data) |
0c972a05 | 116 | p = &parent->rb_left; |
bde4a19f | 117 | else if (new_buffer->user_data > buffer->user_data) |
0c972a05 TK |
118 | p = &parent->rb_right; |
119 | else | |
120 | BUG(); | |
121 | } | |
122 | rb_link_node(&new_buffer->rb_node, parent, p); | |
123 | rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); | |
124 | } | |
125 | ||
53d311cf | 126 | static struct binder_buffer *binder_alloc_prepare_to_free_locked( |
0c972a05 | 127 | struct binder_alloc *alloc, |
df9aabea | 128 | unsigned long user_ptr) |
0c972a05 TK |
129 | { |
130 | struct rb_node *n = alloc->allocated_buffers.rb_node; | |
131 | struct binder_buffer *buffer; | |
0c972a05 TK |
132 | |
133 | while (n) { | |
134 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
135 | BUG_ON(buffer->free); | |
136 | ||
df9aabea | 137 | if (user_ptr < buffer->user_data) { |
0c972a05 | 138 | n = n->rb_left; |
df9aabea | 139 | } else if (user_ptr > buffer->user_data) { |
0c972a05 | 140 | n = n->rb_right; |
df9aabea | 141 | } else { |
53d311cf TK |
142 | /* |
143 | * Guard against user threads attempting to | |
7bada55a TK |
144 | * free the buffer when in use by kernel or |
145 | * after it's already been freed. | |
53d311cf | 146 | */ |
7bada55a TK |
147 | if (!buffer->allow_user_free) |
148 | return ERR_PTR(-EPERM); | |
149 | buffer->allow_user_free = 0; | |
0c972a05 | 150 | return buffer; |
53d311cf | 151 | } |
0c972a05 TK |
152 | } |
153 | return NULL; | |
154 | } | |
155 | ||
156 | /** | |
5dc54a06 | 157 | * binder_alloc_prepare_to_free() - get buffer given user ptr |
0c972a05 TK |
158 | * @alloc: binder_alloc for this proc |
159 | * @user_ptr: User pointer to buffer data | |
160 | * | |
161 | * Validate userspace pointer to buffer data and return buffer corresponding to | |
162 | * that user pointer. Search the rb tree for buffer that matches user data | |
163 | * pointer. | |
164 | * | |
165 | * Return: Pointer to buffer or NULL | |
166 | */ | |
53d311cf | 167 | struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, |
df9aabea | 168 | unsigned long user_ptr) |
0c972a05 TK |
169 | { |
170 | struct binder_buffer *buffer; | |
171 | ||
172 | mutex_lock(&alloc->mutex); | |
53d311cf | 173 | buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); |
0c972a05 TK |
174 | mutex_unlock(&alloc->mutex); |
175 | return buffer; | |
176 | } | |
177 | ||
37ebbb4f CL |
178 | static inline void |
179 | binder_set_installed_page(struct binder_lru_page *lru_page, | |
180 | struct page *page) | |
181 | { | |
182 | /* Pairs with acquire in binder_get_installed_page() */ | |
183 | smp_store_release(&lru_page->page_ptr, page); | |
184 | } | |
185 | ||
186 | static inline struct page * | |
187 | binder_get_installed_page(struct binder_lru_page *lru_page) | |
188 | { | |
189 | /* Pairs with release in binder_set_installed_page() */ | |
190 | return smp_load_acquire(&lru_page->page_ptr); | |
191 | } | |
192 | ||
0d35bf3b CL |
193 | static void binder_free_page_range(struct binder_alloc *alloc, |
194 | unsigned long start, unsigned long end) | |
195 | { | |
196 | struct binder_lru_page *page; | |
197 | unsigned long page_addr; | |
198 | ||
199 | trace_binder_update_page_range(alloc, false, start, end); | |
200 | ||
201 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
202 | size_t index; | |
203 | int ret; | |
204 | ||
205 | index = (page_addr - alloc->buffer) / PAGE_SIZE; | |
206 | page = &alloc->pages[index]; | |
207 | ||
37ebbb4f CL |
208 | if (!binder_get_installed_page(page)) |
209 | continue; | |
210 | ||
0d35bf3b CL |
211 | trace_binder_free_lru_start(alloc, index); |
212 | ||
213 | ret = list_lru_add(&binder_alloc_lru, &page->lru); | |
214 | WARN_ON(!ret); | |
215 | ||
216 | trace_binder_free_lru_end(alloc, index); | |
217 | } | |
218 | } | |
219 | ||
ea2735ce CL |
220 | static int binder_install_single_page(struct binder_alloc *alloc, |
221 | struct binder_lru_page *lru_page, | |
222 | unsigned long addr) | |
223 | { | |
224 | struct page *page; | |
225 | int ret = 0; | |
226 | ||
227 | if (!mmget_not_zero(alloc->mm)) | |
228 | return -ESRCH; | |
229 | ||
37ebbb4f CL |
230 | /* |
231 | * Protected with mmap_sem in write mode as multiple tasks | |
232 | * might race to install the same page. | |
233 | */ | |
ea2735ce | 234 | mmap_write_lock(alloc->mm); |
37ebbb4f CL |
235 | if (binder_get_installed_page(lru_page)) |
236 | goto out; | |
237 | ||
ea2735ce CL |
238 | if (!alloc->vma) { |
239 | pr_err("%d: %s failed, no vma\n", alloc->pid, __func__); | |
240 | ret = -ESRCH; | |
241 | goto out; | |
242 | } | |
243 | ||
244 | page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | |
245 | if (!page) { | |
246 | pr_err("%d: failed to allocate page\n", alloc->pid); | |
247 | ret = -ENOMEM; | |
248 | goto out; | |
249 | } | |
250 | ||
251 | ret = vm_insert_page(alloc->vma, addr, page); | |
252 | if (ret) { | |
253 | pr_err("%d: %s failed to insert page at %lx with %d\n", | |
254 | alloc->pid, __func__, addr, ret); | |
255 | __free_page(page); | |
256 | ret = -ENOMEM; | |
257 | goto out; | |
258 | } | |
259 | ||
37ebbb4f CL |
260 | /* Mark page installation complete and safe to use */ |
261 | binder_set_installed_page(lru_page, page); | |
ea2735ce CL |
262 | out: |
263 | mmap_write_unlock(alloc->mm); | |
264 | mmput_async(alloc->mm); | |
265 | return ret; | |
266 | } | |
267 | ||
37ebbb4f CL |
268 | static int binder_install_buffer_pages(struct binder_alloc *alloc, |
269 | struct binder_buffer *buffer, | |
270 | size_t size) | |
271 | { | |
272 | struct binder_lru_page *page; | |
273 | unsigned long start, final; | |
274 | unsigned long page_addr; | |
275 | ||
276 | start = buffer->user_data & PAGE_MASK; | |
277 | final = PAGE_ALIGN(buffer->user_data + size); | |
278 | ||
279 | for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) { | |
280 | unsigned long index; | |
281 | int ret; | |
282 | ||
283 | index = (page_addr - alloc->buffer) / PAGE_SIZE; | |
284 | page = &alloc->pages[index]; | |
285 | ||
286 | if (binder_get_installed_page(page)) | |
287 | continue; | |
288 | ||
289 | trace_binder_alloc_page_start(alloc, index); | |
290 | ||
291 | ret = binder_install_single_page(alloc, page, page_addr); | |
292 | if (ret) | |
293 | return ret; | |
294 | ||
295 | trace_binder_alloc_page_end(alloc, index); | |
296 | } | |
297 | ||
298 | return 0; | |
299 | } | |
300 | ||
301 | /* The range of pages should exclude those shared with other buffers */ | |
302 | static void binder_allocate_page_range(struct binder_alloc *alloc, | |
303 | unsigned long start, unsigned long end) | |
0c972a05 | 304 | { |
df9aabea | 305 | struct binder_lru_page *page; |
df9aabea | 306 | unsigned long page_addr; |
0c972a05 TK |
307 | |
308 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
0d35bf3b CL |
309 | "%d: allocate pages %lx-%lx\n", |
310 | alloc->pid, start, end); | |
0c972a05 | 311 | |
0d35bf3b | 312 | trace_binder_update_page_range(alloc, true, start, end); |
f2517eb7 SY |
313 | |
314 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
ea2735ce | 315 | unsigned long index; |
f2517eb7 | 316 | bool on_lru; |
0c972a05 | 317 | |
e41e164c SY |
318 | index = (page_addr - alloc->buffer) / PAGE_SIZE; |
319 | page = &alloc->pages[index]; | |
0c972a05 | 320 | |
f2517eb7 | 321 | if (page->page_ptr) { |
e41e164c SY |
322 | trace_binder_alloc_lru_start(alloc, index); |
323 | ||
f2517eb7 SY |
324 | on_lru = list_lru_del(&binder_alloc_lru, &page->lru); |
325 | WARN_ON(!on_lru); | |
e41e164c SY |
326 | |
327 | trace_binder_alloc_lru_end(alloc, index); | |
f2517eb7 SY |
328 | continue; |
329 | } | |
330 | ||
8d9a3ab6 MC |
331 | if (index + 1 > alloc->pages_high) |
332 | alloc->pages_high = index + 1; | |
0c972a05 | 333 | } |
0c972a05 TK |
334 | } |
335 | ||
0fa53349 CL |
336 | static inline void binder_alloc_set_vma(struct binder_alloc *alloc, |
337 | struct vm_area_struct *vma) | |
338 | { | |
339 | /* pairs with smp_load_acquire in binder_alloc_get_vma() */ | |
340 | smp_store_release(&alloc->vma, vma); | |
341 | } | |
342 | ||
da1b9564 MK |
343 | static inline struct vm_area_struct *binder_alloc_get_vma( |
344 | struct binder_alloc *alloc) | |
345 | { | |
0fa53349 CL |
346 | /* pairs with smp_store_release in binder_alloc_set_vma() */ |
347 | return smp_load_acquire(&alloc->vma); | |
da1b9564 MK |
348 | } |
349 | ||
9409af24 CL |
350 | static void debug_no_space_locked(struct binder_alloc *alloc) |
351 | { | |
352 | size_t largest_alloc_size = 0; | |
353 | struct binder_buffer *buffer; | |
354 | size_t allocated_buffers = 0; | |
355 | size_t largest_free_size = 0; | |
356 | size_t total_alloc_size = 0; | |
357 | size_t total_free_size = 0; | |
358 | size_t free_buffers = 0; | |
359 | size_t buffer_size; | |
360 | struct rb_node *n; | |
361 | ||
362 | for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { | |
363 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
364 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
365 | allocated_buffers++; | |
366 | total_alloc_size += buffer_size; | |
367 | if (buffer_size > largest_alloc_size) | |
368 | largest_alloc_size = buffer_size; | |
369 | } | |
370 | ||
371 | for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { | |
372 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
373 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
374 | free_buffers++; | |
375 | total_free_size += buffer_size; | |
376 | if (buffer_size > largest_free_size) | |
377 | largest_free_size = buffer_size; | |
378 | } | |
379 | ||
380 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, | |
381 | "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", | |
382 | total_alloc_size, allocated_buffers, | |
383 | largest_alloc_size, total_free_size, | |
384 | free_buffers, largest_free_size); | |
385 | } | |
386 | ||
89f71743 | 387 | static bool debug_low_async_space_locked(struct binder_alloc *alloc) |
261e7818 MC |
388 | { |
389 | /* | |
390 | * Find the amount and size of buffers allocated by the current caller; | |
391 | * The idea is that once we cross the threshold, whoever is responsible | |
392 | * for the low async space is likely to try to send another async txn, | |
393 | * and at some point we'll catch them in the act. This is more efficient | |
394 | * than keeping a map per pid. | |
395 | */ | |
261e7818 MC |
396 | struct binder_buffer *buffer; |
397 | size_t total_alloc_size = 0; | |
89f71743 | 398 | int pid = current->tgid; |
261e7818 | 399 | size_t num_buffers = 0; |
89f71743 | 400 | struct rb_node *n; |
261e7818 | 401 | |
c13500ea CL |
402 | /* |
403 | * Only start detecting spammers once we have less than 20% of async | |
404 | * space left (which is less than 10% of total buffer size). | |
405 | */ | |
406 | if (alloc->free_async_space >= alloc->buffer_size / 10) { | |
407 | alloc->oneway_spam_detected = false; | |
408 | return false; | |
409 | } | |
410 | ||
261e7818 MC |
411 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; |
412 | n = rb_next(n)) { | |
413 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
414 | if (buffer->pid != pid) | |
415 | continue; | |
416 | if (!buffer->async_transaction) | |
417 | continue; | |
c6d05e07 | 418 | total_alloc_size += binder_alloc_buffer_size(alloc, buffer); |
261e7818 MC |
419 | num_buffers++; |
420 | } | |
421 | ||
422 | /* | |
423 | * Warn if this pid has more than 50 transactions, or more than 50% of | |
a7dc1e6f HL |
424 | * async space (which is 25% of total buffer size). Oneway spam is only |
425 | * detected when the threshold is exceeded. | |
261e7818 MC |
426 | */ |
427 | if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { | |
428 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, | |
429 | "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", | |
430 | alloc->pid, pid, num_buffers, total_alloc_size); | |
a7dc1e6f HL |
431 | if (!alloc->oneway_spam_detected) { |
432 | alloc->oneway_spam_detected = true; | |
433 | return true; | |
434 | } | |
261e7818 | 435 | } |
a7dc1e6f | 436 | return false; |
261e7818 MC |
437 | } |
438 | ||
c7ac30fa | 439 | /* Callers preallocate @new_buffer, it is freed by this function if unused */ |
3f827245 XS |
440 | static struct binder_buffer *binder_alloc_new_buf_locked( |
441 | struct binder_alloc *alloc, | |
c7ac30fa | 442 | struct binder_buffer *new_buffer, |
377e1684 | 443 | size_t size, |
89f71743 | 444 | int is_async) |
0c972a05 TK |
445 | { |
446 | struct rb_node *n = alloc->free_buffers.rb_node; | |
0c972a05 | 447 | struct rb_node *best_fit = NULL; |
c7ac30fa | 448 | struct binder_buffer *buffer; |
df9aabea CL |
449 | unsigned long has_page_addr; |
450 | unsigned long end_page_addr; | |
c7ac30fa | 451 | size_t buffer_size; |
0c972a05 | 452 | |
c6d05e07 | 453 | if (is_async && alloc->free_async_space < size) { |
0c972a05 TK |
454 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
455 | "%d: binder_alloc_buf size %zd failed, no async space left\n", | |
456 | alloc->pid, size); | |
c7ac30fa CL |
457 | buffer = ERR_PTR(-ENOSPC); |
458 | goto out; | |
0c972a05 TK |
459 | } |
460 | ||
461 | while (n) { | |
462 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
463 | BUG_ON(!buffer->free); | |
464 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
465 | ||
466 | if (size < buffer_size) { | |
467 | best_fit = n; | |
468 | n = n->rb_left; | |
377e1684 | 469 | } else if (size > buffer_size) { |
0c972a05 | 470 | n = n->rb_right; |
377e1684 | 471 | } else { |
0c972a05 TK |
472 | best_fit = n; |
473 | break; | |
474 | } | |
475 | } | |
377e1684 | 476 | |
9409af24 | 477 | if (unlikely(!best_fit)) { |
128f3804 SY |
478 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
479 | "%d: binder_alloc_buf size %zd failed, no address space\n", | |
480 | alloc->pid, size); | |
9409af24 | 481 | debug_no_space_locked(alloc); |
c7ac30fa CL |
482 | buffer = ERR_PTR(-ENOSPC); |
483 | goto out; | |
0c972a05 | 484 | } |
9409af24 | 485 | |
0c972a05 TK |
486 | if (n == NULL) { |
487 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); | |
488 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
489 | } | |
490 | ||
491 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
492 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", | |
493 | alloc->pid, size, buffer, buffer_size); | |
494 | ||
74310e06 | 495 | WARN_ON(n && buffer_size != size); |
37ebbb4f CL |
496 | |
497 | has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK; | |
df9aabea | 498 | end_page_addr = PAGE_ALIGN(buffer->user_data + size); |
0c972a05 TK |
499 | if (end_page_addr > has_page_addr) |
500 | end_page_addr = has_page_addr; | |
37ebbb4f CL |
501 | binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data), |
502 | end_page_addr); | |
0c972a05 | 503 | if (buffer_size != size) { |
df9aabea | 504 | new_buffer->user_data = buffer->user_data + size; |
0c972a05 TK |
505 | list_add(&new_buffer->entry, &buffer->entry); |
506 | new_buffer->free = 1; | |
507 | binder_insert_free_buffer(alloc, new_buffer); | |
c7ac30fa | 508 | new_buffer = NULL; |
0c972a05 | 509 | } |
74310e06 SY |
510 | |
511 | rb_erase(best_fit, &alloc->free_buffers); | |
512 | buffer->free = 0; | |
7bada55a | 513 | buffer->allow_user_free = 0; |
74310e06 | 514 | binder_insert_allocated_buffer_locked(alloc, buffer); |
0c972a05 | 515 | buffer->async_transaction = is_async; |
a7dc1e6f | 516 | buffer->oneway_spam_suspect = false; |
0c972a05 | 517 | if (is_async) { |
c6d05e07 | 518 | alloc->free_async_space -= size; |
0c972a05 TK |
519 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, |
520 | "%d: binder_alloc_buf size %zd async free %zd\n", | |
521 | alloc->pid, size, alloc->free_async_space); | |
c13500ea CL |
522 | if (debug_low_async_space_locked(alloc)) |
523 | buffer->oneway_spam_suspect = true; | |
0c972a05 | 524 | } |
377e1684 | 525 | |
c7ac30fa CL |
526 | out: |
527 | /* Discard possibly unused new_buffer */ | |
528 | kfree(new_buffer); | |
0c972a05 TK |
529 | return buffer; |
530 | } | |
531 | ||
377e1684 CL |
532 | /* Calculate the sanitized total size, returns 0 for invalid request */ |
533 | static inline size_t sanitized_size(size_t data_size, | |
534 | size_t offsets_size, | |
535 | size_t extra_buffers_size) | |
536 | { | |
537 | size_t total, tmp; | |
538 | ||
539 | /* Align to pointer size and check for overflows */ | |
540 | tmp = ALIGN(data_size, sizeof(void *)) + | |
541 | ALIGN(offsets_size, sizeof(void *)); | |
542 | if (tmp < data_size || tmp < offsets_size) | |
543 | return 0; | |
544 | total = tmp + ALIGN(extra_buffers_size, sizeof(void *)); | |
545 | if (total < tmp || total < extra_buffers_size) | |
546 | return 0; | |
547 | ||
548 | /* Pad 0-sized buffers so they get a unique address */ | |
549 | total = max(total, sizeof(void *)); | |
550 | ||
551 | return total; | |
552 | } | |
553 | ||
0c972a05 TK |
554 | /** |
555 | * binder_alloc_new_buf() - Allocate a new binder buffer | |
556 | * @alloc: binder_alloc for this proc | |
557 | * @data_size: size of user data buffer | |
558 | * @offsets_size: user specified buffer offset | |
559 | * @extra_buffers_size: size of extra space for meta-data (eg, security context) | |
560 | * @is_async: buffer for async transaction | |
561 | * | |
562 | * Allocate a new buffer given the requested sizes. Returns | |
563 | * the kernel version of the buffer pointer. The size allocated | |
564 | * is the sum of the three given sizes (each rounded up to | |
565 | * pointer-sized boundary) | |
566 | * | |
e1090371 | 567 | * Return: The allocated buffer or %ERR_PTR(-errno) if error |
0c972a05 TK |
568 | */ |
569 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, | |
570 | size_t data_size, | |
571 | size_t offsets_size, | |
572 | size_t extra_buffers_size, | |
89f71743 | 573 | int is_async) |
0c972a05 | 574 | { |
c7ac30fa | 575 | struct binder_buffer *buffer, *next; |
377e1684 | 576 | size_t size; |
37ebbb4f | 577 | int ret; |
377e1684 CL |
578 | |
579 | /* Check binder_alloc is fully initialized */ | |
580 | if (!binder_alloc_get_vma(alloc)) { | |
581 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, | |
582 | "%d: binder_alloc_buf, no vma\n", | |
583 | alloc->pid); | |
584 | return ERR_PTR(-ESRCH); | |
585 | } | |
586 | ||
587 | size = sanitized_size(data_size, offsets_size, extra_buffers_size); | |
588 | if (unlikely(!size)) { | |
589 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
590 | "%d: got transaction with invalid size %zd-%zd-%zd\n", | |
591 | alloc->pid, data_size, offsets_size, | |
592 | extra_buffers_size); | |
593 | return ERR_PTR(-EINVAL); | |
594 | } | |
0c972a05 | 595 | |
c7ac30fa CL |
596 | /* Preallocate the next buffer */ |
597 | next = kzalloc(sizeof(*next), GFP_KERNEL); | |
598 | if (!next) | |
599 | return ERR_PTR(-ENOMEM); | |
600 | ||
0c972a05 | 601 | mutex_lock(&alloc->mutex); |
c7ac30fa | 602 | buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); |
377e1684 CL |
603 | if (IS_ERR(buffer)) { |
604 | mutex_unlock(&alloc->mutex); | |
605 | goto out; | |
606 | } | |
607 | ||
608 | buffer->data_size = data_size; | |
609 | buffer->offsets_size = offsets_size; | |
610 | buffer->extra_buffers_size = extra_buffers_size; | |
89f71743 | 611 | buffer->pid = current->tgid; |
0c972a05 | 612 | mutex_unlock(&alloc->mutex); |
377e1684 | 613 | |
37ebbb4f CL |
614 | ret = binder_install_buffer_pages(alloc, buffer, size); |
615 | if (ret) { | |
616 | binder_alloc_free_buf(alloc, buffer); | |
617 | buffer = ERR_PTR(ret); | |
618 | } | |
377e1684 | 619 | out: |
0c972a05 TK |
620 | return buffer; |
621 | } | |
622 | ||
df9aabea | 623 | static unsigned long buffer_start_page(struct binder_buffer *buffer) |
0c972a05 | 624 | { |
df9aabea | 625 | return buffer->user_data & PAGE_MASK; |
0c972a05 TK |
626 | } |
627 | ||
df9aabea | 628 | static unsigned long prev_buffer_end_page(struct binder_buffer *buffer) |
0c972a05 | 629 | { |
df9aabea | 630 | return (buffer->user_data - 1) & PAGE_MASK; |
0c972a05 TK |
631 | } |
632 | ||
633 | static void binder_delete_free_buffer(struct binder_alloc *alloc, | |
634 | struct binder_buffer *buffer) | |
635 | { | |
636 | struct binder_buffer *prev, *next = NULL; | |
74310e06 | 637 | bool to_free = true; |
4df9772c | 638 | |
0c972a05 | 639 | BUG_ON(alloc->buffers.next == &buffer->entry); |
e2176219 | 640 | prev = binder_buffer_prev(buffer); |
0c972a05 | 641 | BUG_ON(!prev->free); |
74310e06 SY |
642 | if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { |
643 | to_free = false; | |
0c972a05 | 644 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
df9aabea | 645 | "%d: merge free, buffer %lx share page with %lx\n", |
bde4a19f TK |
646 | alloc->pid, buffer->user_data, |
647 | prev->user_data); | |
0c972a05 TK |
648 | } |
649 | ||
650 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
e2176219 | 651 | next = binder_buffer_next(buffer); |
74310e06 SY |
652 | if (buffer_start_page(next) == buffer_start_page(buffer)) { |
653 | to_free = false; | |
0c972a05 | 654 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
df9aabea | 655 | "%d: merge free, buffer %lx share page with %lx\n", |
74310e06 | 656 | alloc->pid, |
bde4a19f TK |
657 | buffer->user_data, |
658 | next->user_data); | |
0c972a05 TK |
659 | } |
660 | } | |
74310e06 | 661 | |
bde4a19f | 662 | if (PAGE_ALIGNED(buffer->user_data)) { |
74310e06 | 663 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
df9aabea | 664 | "%d: merge free, buffer start %lx is page aligned\n", |
bde4a19f | 665 | alloc->pid, buffer->user_data); |
74310e06 SY |
666 | to_free = false; |
667 | } | |
668 | ||
669 | if (to_free) { | |
0c972a05 | 670 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
df9aabea | 671 | "%d: merge free, buffer %lx do not share page with %lx or %lx\n", |
bde4a19f TK |
672 | alloc->pid, buffer->user_data, |
673 | prev->user_data, | |
df9aabea | 674 | next ? next->user_data : 0); |
0d35bf3b CL |
675 | binder_free_page_range(alloc, buffer_start_page(buffer), |
676 | buffer_start_page(buffer) + PAGE_SIZE); | |
0c972a05 | 677 | } |
74310e06 SY |
678 | list_del(&buffer->entry); |
679 | kfree(buffer); | |
0c972a05 TK |
680 | } |
681 | ||
682 | static void binder_free_buf_locked(struct binder_alloc *alloc, | |
683 | struct binder_buffer *buffer) | |
684 | { | |
685 | size_t size, buffer_size; | |
686 | ||
687 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
688 | ||
689 | size = ALIGN(buffer->data_size, sizeof(void *)) + | |
690 | ALIGN(buffer->offsets_size, sizeof(void *)) + | |
691 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); | |
692 | ||
693 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
694 | "%d: binder_free_buf %pK size %zd buffer_size %zd\n", | |
695 | alloc->pid, buffer, size, buffer_size); | |
696 | ||
697 | BUG_ON(buffer->free); | |
698 | BUG_ON(size > buffer_size); | |
699 | BUG_ON(buffer->transaction != NULL); | |
bde4a19f TK |
700 | BUG_ON(buffer->user_data < alloc->buffer); |
701 | BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); | |
0c972a05 TK |
702 | |
703 | if (buffer->async_transaction) { | |
c6d05e07 | 704 | alloc->free_async_space += buffer_size; |
0c972a05 TK |
705 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, |
706 | "%d: binder_free_buf size %zd async free %zd\n", | |
707 | alloc->pid, size, alloc->free_async_space); | |
708 | } | |
709 | ||
0d35bf3b CL |
710 | binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data), |
711 | (buffer->user_data + buffer_size) & PAGE_MASK); | |
0c972a05 TK |
712 | |
713 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); | |
714 | buffer->free = 1; | |
715 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
e2176219 | 716 | struct binder_buffer *next = binder_buffer_next(buffer); |
0c972a05 TK |
717 | |
718 | if (next->free) { | |
719 | rb_erase(&next->rb_node, &alloc->free_buffers); | |
720 | binder_delete_free_buffer(alloc, next); | |
721 | } | |
722 | } | |
723 | if (alloc->buffers.next != &buffer->entry) { | |
e2176219 | 724 | struct binder_buffer *prev = binder_buffer_prev(buffer); |
0c972a05 TK |
725 | |
726 | if (prev->free) { | |
727 | binder_delete_free_buffer(alloc, buffer); | |
728 | rb_erase(&prev->rb_node, &alloc->free_buffers); | |
729 | buffer = prev; | |
730 | } | |
731 | } | |
732 | binder_insert_free_buffer(alloc, buffer); | |
733 | } | |
734 | ||
cbc174a6 CL |
735 | /** |
736 | * binder_alloc_get_page() - get kernel pointer for given buffer offset | |
737 | * @alloc: binder_alloc for this proc | |
738 | * @buffer: binder buffer to be accessed | |
739 | * @buffer_offset: offset into @buffer data | |
740 | * @pgoffp: address to copy final page offset to | |
741 | * | |
742 | * Lookup the struct page corresponding to the address | |
743 | * at @buffer_offset into @buffer->user_data. If @pgoffp is not | |
744 | * NULL, the byte-offset into the page is written there. | |
745 | * | |
746 | * The caller is responsible to ensure that the offset points | |
747 | * to a valid address within the @buffer and that @buffer is | |
748 | * not freeable by the user. Since it can't be freed, we are | |
749 | * guaranteed that the corresponding elements of @alloc->pages[] | |
750 | * cannot change. | |
751 | * | |
752 | * Return: struct page | |
753 | */ | |
754 | static struct page *binder_alloc_get_page(struct binder_alloc *alloc, | |
755 | struct binder_buffer *buffer, | |
756 | binder_size_t buffer_offset, | |
757 | pgoff_t *pgoffp) | |
758 | { | |
759 | binder_size_t buffer_space_offset = buffer_offset + | |
760 | (buffer->user_data - alloc->buffer); | |
761 | pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; | |
762 | size_t index = buffer_space_offset >> PAGE_SHIFT; | |
763 | struct binder_lru_page *lru_page; | |
764 | ||
765 | lru_page = &alloc->pages[index]; | |
766 | *pgoffp = pgoff; | |
767 | return lru_page->page_ptr; | |
768 | } | |
769 | ||
770 | /** | |
771 | * binder_alloc_clear_buf() - zero out buffer | |
772 | * @alloc: binder_alloc for this proc | |
773 | * @buffer: binder buffer to be cleared | |
774 | * | |
775 | * memset the given buffer to 0 | |
776 | */ | |
0f966cba | 777 | static void binder_alloc_clear_buf(struct binder_alloc *alloc, |
cbc174a6 CL |
778 | struct binder_buffer *buffer) |
779 | { | |
780 | size_t bytes = binder_alloc_buffer_size(alloc, buffer); | |
781 | binder_size_t buffer_offset = 0; | |
782 | ||
783 | while (bytes) { | |
784 | unsigned long size; | |
785 | struct page *page; | |
786 | pgoff_t pgoff; | |
787 | ||
788 | page = binder_alloc_get_page(alloc, buffer, | |
789 | buffer_offset, &pgoff); | |
790 | size = min_t(size_t, bytes, PAGE_SIZE - pgoff); | |
791 | memset_page(page, pgoff, 0, size); | |
792 | bytes -= size; | |
793 | buffer_offset += size; | |
794 | } | |
795 | } | |
796 | ||
0c972a05 TK |
797 | /** |
798 | * binder_alloc_free_buf() - free a binder buffer | |
799 | * @alloc: binder_alloc for this proc | |
800 | * @buffer: kernel pointer to buffer | |
801 | * | |
4b463822 | 802 | * Free the buffer allocated via binder_alloc_new_buf() |
0c972a05 TK |
803 | */ |
804 | void binder_alloc_free_buf(struct binder_alloc *alloc, | |
805 | struct binder_buffer *buffer) | |
806 | { | |
0f966cba TK |
807 | /* |
808 | * We could eliminate the call to binder_alloc_clear_buf() | |
809 | * from binder_alloc_deferred_release() by moving this to | |
122a3c1c | 810 | * binder_free_buf_locked(). However, that could |
0f966cba TK |
811 | * increase contention for the alloc mutex if clear_on_free |
812 | * is used frequently for large buffers. The mutex is not | |
813 | * needed for correctness here. | |
814 | */ | |
815 | if (buffer->clear_on_free) { | |
816 | binder_alloc_clear_buf(alloc, buffer); | |
817 | buffer->clear_on_free = false; | |
818 | } | |
0c972a05 TK |
819 | mutex_lock(&alloc->mutex); |
820 | binder_free_buf_locked(alloc, buffer); | |
821 | mutex_unlock(&alloc->mutex); | |
822 | } | |
823 | ||
824 | /** | |
825 | * binder_alloc_mmap_handler() - map virtual address space for proc | |
826 | * @alloc: alloc structure for this proc | |
827 | * @vma: vma passed to mmap() | |
828 | * | |
829 | * Called by binder_mmap() to initialize the space specified in | |
830 | * vma for allocating binder buffers | |
831 | * | |
832 | * Return: | |
833 | * 0 = success | |
834 | * -EBUSY = address space already mapped | |
835 | * -ENOMEM = failed to map memory to given address space | |
836 | */ | |
837 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |
838 | struct vm_area_struct *vma) | |
839 | { | |
0c972a05 | 840 | struct binder_buffer *buffer; |
68aef12d CL |
841 | const char *failure_string; |
842 | int ret, i; | |
0c972a05 | 843 | |
3ce00bb7 CL |
844 | if (unlikely(vma->vm_mm != alloc->mm)) { |
845 | ret = -EINVAL; | |
846 | failure_string = "invalid vma->vm_mm"; | |
847 | goto err_invalid_mm; | |
848 | } | |
849 | ||
0c972a05 | 850 | mutex_lock(&binder_alloc_mmap_lock); |
a7a74d7f | 851 | if (alloc->buffer_size) { |
0c972a05 TK |
852 | ret = -EBUSY; |
853 | failure_string = "already mapped"; | |
854 | goto err_already_mapped; | |
855 | } | |
a7a74d7f JH |
856 | alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, |
857 | SZ_4M); | |
858 | mutex_unlock(&binder_alloc_mmap_lock); | |
0c972a05 | 859 | |
df9aabea | 860 | alloc->buffer = vma->vm_start; |
0c972a05 | 861 | |
45d02f79 | 862 | alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, |
6396bb22 | 863 | sizeof(alloc->pages[0]), |
0c972a05 TK |
864 | GFP_KERNEL); |
865 | if (alloc->pages == NULL) { | |
866 | ret = -ENOMEM; | |
867 | failure_string = "alloc page array"; | |
868 | goto err_alloc_pages_failed; | |
869 | } | |
0c972a05 | 870 | |
68aef12d CL |
871 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { |
872 | alloc->pages[i].alloc = alloc; | |
873 | INIT_LIST_HEAD(&alloc->pages[i].lru); | |
874 | } | |
875 | ||
74310e06 SY |
876 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
877 | if (!buffer) { | |
0c972a05 | 878 | ret = -ENOMEM; |
74310e06 SY |
879 | failure_string = "alloc buffer struct"; |
880 | goto err_alloc_buf_struct_failed; | |
0c972a05 | 881 | } |
74310e06 | 882 | |
bde4a19f | 883 | buffer->user_data = alloc->buffer; |
0c972a05 TK |
884 | list_add(&buffer->entry, &alloc->buffers); |
885 | buffer->free = 1; | |
886 | binder_insert_free_buffer(alloc, buffer); | |
887 | alloc->free_async_space = alloc->buffer_size / 2; | |
0fa53349 CL |
888 | |
889 | /* Signal binder_alloc is fully initialized */ | |
890 | binder_alloc_set_vma(alloc, vma); | |
0c972a05 TK |
891 | |
892 | return 0; | |
893 | ||
74310e06 | 894 | err_alloc_buf_struct_failed: |
0c972a05 TK |
895 | kfree(alloc->pages); |
896 | alloc->pages = NULL; | |
897 | err_alloc_pages_failed: | |
df9aabea | 898 | alloc->buffer = 0; |
a7a74d7f JH |
899 | mutex_lock(&binder_alloc_mmap_lock); |
900 | alloc->buffer_size = 0; | |
0c972a05 TK |
901 | err_already_mapped: |
902 | mutex_unlock(&binder_alloc_mmap_lock); | |
3ce00bb7 | 903 | err_invalid_mm: |
128f3804 SY |
904 | binder_alloc_debug(BINDER_DEBUG_USER_ERROR, |
905 | "%s: %d %lx-%lx %s failed %d\n", __func__, | |
906 | alloc->pid, vma->vm_start, vma->vm_end, | |
907 | failure_string, ret); | |
0c972a05 TK |
908 | return ret; |
909 | } | |
910 | ||
911 | ||
912 | void binder_alloc_deferred_release(struct binder_alloc *alloc) | |
913 | { | |
914 | struct rb_node *n; | |
915 | int buffers, page_count; | |
74310e06 | 916 | struct binder_buffer *buffer; |
0c972a05 | 917 | |
0c972a05 TK |
918 | buffers = 0; |
919 | mutex_lock(&alloc->mutex); | |
c0fd2101 | 920 | BUG_ON(alloc->vma); |
da1b9564 | 921 | |
0c972a05 | 922 | while ((n = rb_first(&alloc->allocated_buffers))) { |
0c972a05 TK |
923 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
924 | ||
925 | /* Transaction should already have been freed */ | |
926 | BUG_ON(buffer->transaction); | |
927 | ||
0f966cba TK |
928 | if (buffer->clear_on_free) { |
929 | binder_alloc_clear_buf(alloc, buffer); | |
930 | buffer->clear_on_free = false; | |
931 | } | |
0c972a05 TK |
932 | binder_free_buf_locked(alloc, buffer); |
933 | buffers++; | |
934 | } | |
935 | ||
74310e06 SY |
936 | while (!list_empty(&alloc->buffers)) { |
937 | buffer = list_first_entry(&alloc->buffers, | |
938 | struct binder_buffer, entry); | |
939 | WARN_ON(!buffer->free); | |
940 | ||
941 | list_del(&buffer->entry); | |
942 | WARN_ON_ONCE(!list_empty(&alloc->buffers)); | |
943 | kfree(buffer); | |
944 | } | |
945 | ||
0c972a05 TK |
946 | page_count = 0; |
947 | if (alloc->pages) { | |
948 | int i; | |
949 | ||
950 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
df9aabea | 951 | unsigned long page_addr; |
f2517eb7 | 952 | bool on_lru; |
0c972a05 | 953 | |
f2517eb7 | 954 | if (!alloc->pages[i].page_ptr) |
0c972a05 TK |
955 | continue; |
956 | ||
f2517eb7 SY |
957 | on_lru = list_lru_del(&binder_alloc_lru, |
958 | &alloc->pages[i].lru); | |
0c972a05 TK |
959 | page_addr = alloc->buffer + i * PAGE_SIZE; |
960 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
df9aabea | 961 | "%s: %d: page %d at %lx %s\n", |
f2517eb7 SY |
962 | __func__, alloc->pid, i, page_addr, |
963 | on_lru ? "on lru" : "active"); | |
f2517eb7 | 964 | __free_page(alloc->pages[i].page_ptr); |
0c972a05 TK |
965 | page_count++; |
966 | } | |
967 | kfree(alloc->pages); | |
0c972a05 TK |
968 | } |
969 | mutex_unlock(&alloc->mutex); | |
e66b77e5 CL |
970 | if (alloc->mm) |
971 | mmdrop(alloc->mm); | |
0c972a05 TK |
972 | |
973 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, | |
974 | "%s: %d buffers %d, pages %d\n", | |
975 | __func__, alloc->pid, buffers, page_count); | |
976 | } | |
977 | ||
978 | static void print_binder_buffer(struct seq_file *m, const char *prefix, | |
979 | struct binder_buffer *buffer) | |
980 | { | |
df9aabea | 981 | seq_printf(m, "%s %d: %lx size %zd:%zd:%zd %s\n", |
bde4a19f | 982 | prefix, buffer->debug_id, buffer->user_data, |
0c972a05 | 983 | buffer->data_size, buffer->offsets_size, |
b05a68e9 | 984 | buffer->extra_buffers_size, |
0c972a05 TK |
985 | buffer->transaction ? "active" : "delivered"); |
986 | } | |
987 | ||
988 | /** | |
989 | * binder_alloc_print_allocated() - print buffer info | |
990 | * @m: seq_file for output via seq_printf() | |
991 | * @alloc: binder_alloc for this proc | |
992 | * | |
993 | * Prints information about every buffer associated with | |
994 | * the binder_alloc state to the given seq_file | |
995 | */ | |
996 | void binder_alloc_print_allocated(struct seq_file *m, | |
997 | struct binder_alloc *alloc) | |
998 | { | |
999 | struct rb_node *n; | |
1000 | ||
1001 | mutex_lock(&alloc->mutex); | |
1002 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
1003 | print_binder_buffer(m, " buffer", | |
1004 | rb_entry(n, struct binder_buffer, rb_node)); | |
1005 | mutex_unlock(&alloc->mutex); | |
1006 | } | |
1007 | ||
8ef4665a SY |
1008 | /** |
1009 | * binder_alloc_print_pages() - print page usage | |
1010 | * @m: seq_file for output via seq_printf() | |
1011 | * @alloc: binder_alloc for this proc | |
1012 | */ | |
1013 | void binder_alloc_print_pages(struct seq_file *m, | |
1014 | struct binder_alloc *alloc) | |
1015 | { | |
1016 | struct binder_lru_page *page; | |
1017 | int i; | |
1018 | int active = 0; | |
1019 | int lru = 0; | |
1020 | int free = 0; | |
1021 | ||
1022 | mutex_lock(&alloc->mutex); | |
8eb52a1e JH |
1023 | /* |
1024 | * Make sure the binder_alloc is fully initialized, otherwise we might | |
1025 | * read inconsistent state. | |
1026 | */ | |
b15655b1 CL |
1027 | if (binder_alloc_get_vma(alloc) != NULL) { |
1028 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
1029 | page = &alloc->pages[i]; | |
1030 | if (!page->page_ptr) | |
1031 | free++; | |
1032 | else if (list_empty(&page->lru)) | |
1033 | active++; | |
1034 | else | |
1035 | lru++; | |
1036 | } | |
44e602b4 | 1037 | } |
8ef4665a SY |
1038 | mutex_unlock(&alloc->mutex); |
1039 | seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); | |
8d9a3ab6 | 1040 | seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); |
8ef4665a SY |
1041 | } |
1042 | ||
0c972a05 TK |
1043 | /** |
1044 | * binder_alloc_get_allocated_count() - return count of buffers | |
1045 | * @alloc: binder_alloc for this proc | |
1046 | * | |
1047 | * Return: count of allocated buffers | |
1048 | */ | |
1049 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |
1050 | { | |
1051 | struct rb_node *n; | |
1052 | int count = 0; | |
1053 | ||
1054 | mutex_lock(&alloc->mutex); | |
1055 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
1056 | count++; | |
1057 | mutex_unlock(&alloc->mutex); | |
1058 | return count; | |
1059 | } | |
1060 | ||
1061 | ||
1062 | /** | |
1063 | * binder_alloc_vma_close() - invalidate address space | |
1064 | * @alloc: binder_alloc for this proc | |
1065 | * | |
1066 | * Called from binder_vma_close() when releasing address space. | |
1067 | * Clears alloc->vma to prevent new incoming transactions from | |
1068 | * allocating more buffers. | |
1069 | */ | |
1070 | void binder_alloc_vma_close(struct binder_alloc *alloc) | |
1071 | { | |
0fa53349 | 1072 | binder_alloc_set_vma(alloc, NULL); |
0c972a05 TK |
1073 | } |
1074 | ||
f2517eb7 SY |
1075 | /** |
1076 | * binder_alloc_free_page() - shrinker callback to free pages | |
1077 | * @item: item to free | |
1078 | * @lock: lock protecting the item | |
1079 | * @cb_arg: callback argument | |
1080 | * | |
1081 | * Called from list_lru_walk() in binder_shrink_scan() to free | |
1082 | * up pages when the system is under memory pressure. | |
1083 | */ | |
1084 | enum lru_status binder_alloc_free_page(struct list_head *item, | |
1085 | struct list_lru_one *lru, | |
1086 | spinlock_t *lock, | |
1087 | void *cb_arg) | |
324fa64c | 1088 | __must_hold(lock) |
f2517eb7 SY |
1089 | { |
1090 | struct mm_struct *mm = NULL; | |
1091 | struct binder_lru_page *page = container_of(item, | |
1092 | struct binder_lru_page, | |
1093 | lru); | |
1094 | struct binder_alloc *alloc; | |
df9aabea | 1095 | unsigned long page_addr; |
f2517eb7 | 1096 | size_t index; |
a1b2289c | 1097 | struct vm_area_struct *vma; |
f2517eb7 SY |
1098 | |
1099 | alloc = page->alloc; | |
1100 | if (!mutex_trylock(&alloc->mutex)) | |
1101 | goto err_get_alloc_mutex_failed; | |
1102 | ||
1103 | if (!page->page_ptr) | |
1104 | goto err_page_already_freed; | |
1105 | ||
1106 | index = page - alloc->pages; | |
df9aabea | 1107 | page_addr = alloc->buffer + index * PAGE_SIZE; |
5cec2d2e | 1108 | |
e66b77e5 | 1109 | mm = alloc->mm; |
5cec2d2e TK |
1110 | if (!mmget_not_zero(mm)) |
1111 | goto err_mmget; | |
d8ed45c5 | 1112 | if (!mmap_read_trylock(mm)) |
3e4e28c5 | 1113 | goto err_mmap_read_lock_failed; |
3f489c20 CL |
1114 | vma = vma_lookup(mm, page_addr); |
1115 | if (vma && vma != binder_alloc_get_vma(alloc)) | |
1116 | goto err_invalid_vma; | |
a1b2289c SY |
1117 | |
1118 | list_lru_isolate(lru, item); | |
1119 | spin_unlock(lock); | |
f2517eb7 | 1120 | |
a1b2289c | 1121 | if (vma) { |
e41e164c SY |
1122 | trace_binder_unmap_user_start(alloc, index); |
1123 | ||
e9adcfec | 1124 | zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL); |
f2517eb7 | 1125 | |
e41e164c | 1126 | trace_binder_unmap_user_end(alloc, index); |
f2517eb7 | 1127 | } |
d8ed45c5 | 1128 | mmap_read_unlock(mm); |
f867c771 | 1129 | mmput_async(mm); |
f2517eb7 | 1130 | |
e41e164c SY |
1131 | trace_binder_unmap_kernel_start(alloc, index); |
1132 | ||
f2517eb7 SY |
1133 | __free_page(page->page_ptr); |
1134 | page->page_ptr = NULL; | |
1135 | ||
e41e164c SY |
1136 | trace_binder_unmap_kernel_end(alloc, index); |
1137 | ||
a1b2289c | 1138 | spin_lock(lock); |
f2517eb7 | 1139 | mutex_unlock(&alloc->mutex); |
a1b2289c | 1140 | return LRU_REMOVED_RETRY; |
f2517eb7 | 1141 | |
3f489c20 CL |
1142 | err_invalid_vma: |
1143 | mmap_read_unlock(mm); | |
3e4e28c5 | 1144 | err_mmap_read_lock_failed: |
a1b2289c | 1145 | mmput_async(mm); |
a0c2baaf | 1146 | err_mmget: |
f2517eb7 SY |
1147 | err_page_already_freed: |
1148 | mutex_unlock(&alloc->mutex); | |
1149 | err_get_alloc_mutex_failed: | |
1150 | return LRU_SKIP; | |
1151 | } | |
1152 | ||
1153 | static unsigned long | |
1154 | binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
1155 | { | |
b2fb28de | 1156 | return list_lru_count(&binder_alloc_lru); |
f2517eb7 SY |
1157 | } |
1158 | ||
1159 | static unsigned long | |
1160 | binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1161 | { | |
b2fb28de | 1162 | return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, |
f2517eb7 | 1163 | NULL, sc->nr_to_scan); |
f2517eb7 SY |
1164 | } |
1165 | ||
95a542da | 1166 | static struct shrinker *binder_shrinker; |
f2517eb7 | 1167 | |
0c972a05 TK |
1168 | /** |
1169 | * binder_alloc_init() - called by binder_open() for per-proc initialization | |
1170 | * @alloc: binder_alloc for this proc | |
1171 | * | |
1172 | * Called from binder_open() to initialize binder_alloc fields for | |
1173 | * new binder proc | |
1174 | */ | |
1175 | void binder_alloc_init(struct binder_alloc *alloc) | |
1176 | { | |
0c972a05 | 1177 | alloc->pid = current->group_leader->pid; |
e66b77e5 CL |
1178 | alloc->mm = current->mm; |
1179 | mmgrab(alloc->mm); | |
0c972a05 | 1180 | mutex_init(&alloc->mutex); |
957ccc2b | 1181 | INIT_LIST_HEAD(&alloc->buffers); |
0c972a05 TK |
1182 | } |
1183 | ||
533dfb25 | 1184 | int binder_alloc_shrinker_init(void) |
f2517eb7 | 1185 | { |
95a542da | 1186 | int ret; |
533dfb25 | 1187 | |
95a542da QZ |
1188 | ret = list_lru_init(&binder_alloc_lru); |
1189 | if (ret) | |
1190 | return ret; | |
1191 | ||
1192 | binder_shrinker = shrinker_alloc(0, "android-binder"); | |
1193 | if (!binder_shrinker) { | |
1194 | list_lru_destroy(&binder_alloc_lru); | |
1195 | return -ENOMEM; | |
533dfb25 | 1196 | } |
95a542da QZ |
1197 | |
1198 | binder_shrinker->count_objects = binder_shrink_count; | |
1199 | binder_shrinker->scan_objects = binder_shrink_scan; | |
1200 | ||
1201 | shrinker_register(binder_shrinker); | |
1202 | ||
1203 | return 0; | |
f2517eb7 | 1204 | } |
1a7c3d9b | 1205 | |
adb9743d QZ |
1206 | void binder_alloc_shrinker_exit(void) |
1207 | { | |
95a542da | 1208 | shrinker_free(binder_shrinker); |
adb9743d QZ |
1209 | list_lru_destroy(&binder_alloc_lru); |
1210 | } | |
1211 | ||
1a7c3d9b TK |
1212 | /** |
1213 | * check_buffer() - verify that buffer/offset is safe to access | |
1214 | * @alloc: binder_alloc for this proc | |
1215 | * @buffer: binder buffer to be accessed | |
1216 | * @offset: offset into @buffer data | |
1217 | * @bytes: bytes to access from offset | |
1218 | * | |
1219 | * Check that the @offset/@bytes are within the size of the given | |
1220 | * @buffer and that the buffer is currently active and not freeable. | |
1221 | * Offsets must also be multiples of sizeof(u32). The kernel is | |
1222 | * allowed to touch the buffer in two cases: | |
1223 | * | |
1224 | * 1) when the buffer is being created: | |
1225 | * (buffer->free == 0 && buffer->allow_user_free == 0) | |
1226 | * 2) when the buffer is being torn down: | |
1227 | * (buffer->free == 0 && buffer->transaction == NULL). | |
1228 | * | |
1229 | * Return: true if the buffer is safe to access | |
1230 | */ | |
1231 | static inline bool check_buffer(struct binder_alloc *alloc, | |
1232 | struct binder_buffer *buffer, | |
1233 | binder_size_t offset, size_t bytes) | |
1234 | { | |
1235 | size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
1236 | ||
1237 | return buffer_size >= bytes && | |
1238 | offset <= buffer_size - bytes && | |
1239 | IS_ALIGNED(offset, sizeof(u32)) && | |
1240 | !buffer->free && | |
1241 | (!buffer->allow_user_free || !buffer->transaction); | |
1242 | } | |
1243 | ||
1a7c3d9b TK |
1244 | /** |
1245 | * binder_alloc_copy_user_to_buffer() - copy src user to tgt user | |
1246 | * @alloc: binder_alloc for this proc | |
1247 | * @buffer: binder buffer to be accessed | |
1248 | * @buffer_offset: offset into @buffer data | |
1249 | * @from: userspace pointer to source buffer | |
1250 | * @bytes: bytes to copy | |
1251 | * | |
1252 | * Copy bytes from source userspace to target buffer. | |
1253 | * | |
1254 | * Return: bytes remaining to be copied | |
1255 | */ | |
1256 | unsigned long | |
1257 | binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, | |
1258 | struct binder_buffer *buffer, | |
1259 | binder_size_t buffer_offset, | |
1260 | const void __user *from, | |
1261 | size_t bytes) | |
1262 | { | |
1263 | if (!check_buffer(alloc, buffer, buffer_offset, bytes)) | |
1264 | return bytes; | |
1265 | ||
1266 | while (bytes) { | |
1267 | unsigned long size; | |
1268 | unsigned long ret; | |
1269 | struct page *page; | |
1270 | pgoff_t pgoff; | |
1271 | void *kptr; | |
1272 | ||
1273 | page = binder_alloc_get_page(alloc, buffer, | |
1274 | buffer_offset, &pgoff); | |
1275 | size = min_t(size_t, bytes, PAGE_SIZE - pgoff); | |
1d625960 | 1276 | kptr = kmap_local_page(page) + pgoff; |
1a7c3d9b | 1277 | ret = copy_from_user(kptr, from, size); |
1d625960 | 1278 | kunmap_local(kptr); |
1a7c3d9b TK |
1279 | if (ret) |
1280 | return bytes - size + ret; | |
1281 | bytes -= size; | |
1282 | from += size; | |
1283 | buffer_offset += size; | |
1284 | } | |
1285 | return 0; | |
1286 | } | |
8ced0c62 | 1287 | |
bb4a2e48 TK |
1288 | static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, |
1289 | bool to_buffer, | |
1290 | struct binder_buffer *buffer, | |
1291 | binder_size_t buffer_offset, | |
1292 | void *ptr, | |
1293 | size_t bytes) | |
8ced0c62 TK |
1294 | { |
1295 | /* All copies must be 32-bit aligned and 32-bit size */ | |
bb4a2e48 TK |
1296 | if (!check_buffer(alloc, buffer, buffer_offset, bytes)) |
1297 | return -EINVAL; | |
8ced0c62 TK |
1298 | |
1299 | while (bytes) { | |
1300 | unsigned long size; | |
1301 | struct page *page; | |
1302 | pgoff_t pgoff; | |
8ced0c62 TK |
1303 | |
1304 | page = binder_alloc_get_page(alloc, buffer, | |
1305 | buffer_offset, &pgoff); | |
1306 | size = min_t(size_t, bytes, PAGE_SIZE - pgoff); | |
8ced0c62 | 1307 | if (to_buffer) |
e88a6a8f | 1308 | memcpy_to_page(page, pgoff, ptr, size); |
8ced0c62 | 1309 | else |
e88a6a8f | 1310 | memcpy_from_page(ptr, page, pgoff, size); |
8ced0c62 TK |
1311 | bytes -= size; |
1312 | pgoff = 0; | |
1313 | ptr = ptr + size; | |
1314 | buffer_offset += size; | |
1315 | } | |
bb4a2e48 | 1316 | return 0; |
8ced0c62 TK |
1317 | } |
1318 | ||
bb4a2e48 TK |
1319 | int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, |
1320 | struct binder_buffer *buffer, | |
1321 | binder_size_t buffer_offset, | |
1322 | void *src, | |
1323 | size_t bytes) | |
8ced0c62 | 1324 | { |
bb4a2e48 TK |
1325 | return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, |
1326 | src, bytes); | |
8ced0c62 TK |
1327 | } |
1328 | ||
bb4a2e48 TK |
1329 | int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, |
1330 | void *dest, | |
1331 | struct binder_buffer *buffer, | |
1332 | binder_size_t buffer_offset, | |
1333 | size_t bytes) | |
8ced0c62 | 1334 | { |
bb4a2e48 TK |
1335 | return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, |
1336 | dest, bytes); | |
8ced0c62 TK |
1337 | } |
1338 |