1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
71 #include <uapi/linux/android/binder.h>
73 #include <linux/cacheflush.h>
75 #include "binder_internal.h"
76 #include "binder_trace.h"
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
98 BINDER_DEBUG_USER_ERROR = 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
104 BINDER_DEBUG_READ_WRITE = 1U << 6,
105 BINDER_DEBUG_USER_REFS = 1U << 7,
106 BINDER_DEBUG_THREADS = 1U << 8,
107 BINDER_DEBUG_TRANSACTION = 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
112 BINDER_DEBUG_SPINLOCKS = 1U << 14,
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
124 static int binder_set_stop_on_user_error(const char *val,
125 const struct kernel_param *kp)
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, 0644);
137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
139 struct va_format vaf;
142 if (binder_debug_mask & mask) {
143 va_start(args, format);
146 pr_info_ratelimited("%pV", &vaf);
151 #define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
154 static __printf(1, 2) void binder_user_error(const char *format, ...)
156 struct va_format vaf;
159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 va_start(args, format);
163 pr_info_ratelimited("%pV", &vaf);
167 if (binder_stop_on_user_error)
168 binder_stop_on_user_error = 2;
171 #define binder_set_extended_error(ee, _id, _command, _param) \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
178 #define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183 #define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
186 #define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
189 static struct binder_stats binder_stats;
191 static inline void binder_stats_deleted(enum binder_stat_types type)
193 atomic_inc(&binder_stats.obj_deleted[type]);
196 static inline void binder_stats_created(enum binder_stat_types type)
198 atomic_inc(&binder_stats.obj_created[type]);
201 struct binder_transaction_log_entry {
213 int return_error_line;
214 uint32_t return_error;
215 uint32_t return_error_param;
216 char context_name[BINDERFS_MAX_NAME + 1];
219 struct binder_transaction_log {
222 struct binder_transaction_log_entry entry[32];
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 struct binder_transaction_log *log)
231 struct binder_transaction_log_entry *e;
232 unsigned int cur = atomic_inc_return(&log->cur);
234 if (cur >= ARRAY_SIZE(log->entry))
236 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 WRITE_ONCE(e->debug_id_done, 0);
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
244 memset(e, 0, sizeof(*e));
248 enum binder_deferred_state {
249 BINDER_DEFERRED_FLUSH = 0x01,
250 BINDER_DEFERRED_RELEASE = 0x02,
254 BINDER_LOOPER_STATE_REGISTERED = 0x01,
255 BINDER_LOOPER_STATE_ENTERED = 0x02,
256 BINDER_LOOPER_STATE_EXITED = 0x04,
257 BINDER_LOOPER_STATE_INVALID = 0x08,
258 BINDER_LOOPER_STATE_WAITING = 0x10,
259 BINDER_LOOPER_STATE_POLL = 0x20,
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
271 _binder_proc_lock(struct binder_proc *proc, int line)
272 __acquires(&proc->outer_lock)
274 binder_debug(BINDER_DEBUG_SPINLOCKS,
275 "%s: line=%d\n", __func__, line);
276 spin_lock(&proc->outer_lock);
280 * binder_proc_unlock() - Release spinlock for given binder_proc
281 * @proc: struct binder_proc to acquire
283 * Release lock acquired via binder_proc_lock()
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 __releases(&proc->outer_lock)
290 binder_debug(BINDER_DEBUG_SPINLOCKS,
291 "%s: line=%d\n", __func__, line);
292 spin_unlock(&proc->outer_lock);
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
299 * Acquires proc->inner_lock. Used to protect todo lists
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 __acquires(&proc->inner_lock)
306 binder_debug(BINDER_DEBUG_SPINLOCKS,
307 "%s: line=%d\n", __func__, line);
308 spin_lock(&proc->inner_lock);
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
315 * Release lock acquired via binder_inner_proc_lock()
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 __releases(&proc->inner_lock)
322 binder_debug(BINDER_DEBUG_SPINLOCKS,
323 "%s: line=%d\n", __func__, line);
324 spin_unlock(&proc->inner_lock);
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
331 * Acquires node->lock. Used to protect binder_node fields
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
335 _binder_node_lock(struct binder_node *node, int line)
336 __acquires(&node->lock)
338 binder_debug(BINDER_DEBUG_SPINLOCKS,
339 "%s: line=%d\n", __func__, line);
340 spin_lock(&node->lock);
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
347 * Release lock acquired via binder_node_lock()
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
351 _binder_node_unlock(struct binder_node *node, int line)
352 __releases(&node->lock)
354 binder_debug(BINDER_DEBUG_SPINLOCKS,
355 "%s: line=%d\n", __func__, line);
356 spin_unlock(&node->lock);
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
368 _binder_node_inner_lock(struct binder_node *node, int line)
369 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
371 binder_debug(BINDER_DEBUG_SPINLOCKS,
372 "%s: line=%d\n", __func__, line);
373 spin_lock(&node->lock);
375 binder_inner_proc_lock(node->proc);
377 /* annotation for sparse */
378 __acquire(&node->proc->inner_lock);
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
385 * Release lock acquired via binder_node_lock()
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 __releases(&node->lock) __releases(&node->proc->inner_lock)
392 struct binder_proc *proc = node->proc;
394 binder_debug(BINDER_DEBUG_SPINLOCKS,
395 "%s: line=%d\n", __func__, line);
397 binder_inner_proc_unlock(proc);
399 /* annotation for sparse */
400 __release(&node->proc->inner_lock);
401 spin_unlock(&node->lock);
404 static bool binder_worklist_empty_ilocked(struct list_head *list)
406 return list_empty(list);
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
414 * Return: true if there are no items on list, else false
416 static bool binder_worklist_empty(struct binder_proc *proc,
417 struct list_head *list)
421 binder_inner_proc_lock(proc);
422 ret = binder_worklist_empty_ilocked(list);
423 binder_inner_proc_unlock(proc);
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
435 * Requires the proc->inner_lock to be held.
438 binder_enqueue_work_ilocked(struct binder_work *work,
439 struct list_head *target_list)
441 BUG_ON(target_list == NULL);
442 BUG_ON(work->entry.next && !list_empty(&work->entry));
443 list_add_tail(&work->entry, target_list);
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
455 * Requires the proc->inner_lock to be held.
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 struct binder_work *work)
461 WARN_ON(!list_empty(&thread->waiting_thread_node));
462 binder_enqueue_work_ilocked(work, &thread->todo);
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
470 * Adds the work to the todo list of the thread, and enables processing
473 * Requires the proc->inner_lock to be held.
476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 struct binder_work *work)
479 WARN_ON(!list_empty(&thread->waiting_thread_node));
480 binder_enqueue_work_ilocked(work, &thread->todo);
481 thread->process_todo = true;
485 * binder_enqueue_thread_work() - Add an item to the thread work list
486 * @thread: thread to queue work to
487 * @work: struct binder_work to add to list
489 * Adds the work to the todo list of the thread, and enables processing
493 binder_enqueue_thread_work(struct binder_thread *thread,
494 struct binder_work *work)
496 binder_inner_proc_lock(thread->proc);
497 binder_enqueue_thread_work_ilocked(thread, work);
498 binder_inner_proc_unlock(thread->proc);
502 binder_dequeue_work_ilocked(struct binder_work *work)
504 list_del_init(&work->entry);
508 * binder_dequeue_work() - Removes an item from the work list
509 * @proc: binder_proc associated with list
510 * @work: struct binder_work to remove from list
512 * Removes the specified work item from whatever list it is on.
513 * Can safely be called if work is not on any list.
516 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
518 binder_inner_proc_lock(proc);
519 binder_dequeue_work_ilocked(work);
520 binder_inner_proc_unlock(proc);
523 static struct binder_work *binder_dequeue_work_head_ilocked(
524 struct list_head *list)
526 struct binder_work *w;
528 w = list_first_entry_or_null(list, struct binder_work, entry);
530 list_del_init(&w->entry);
535 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
536 static void binder_free_thread(struct binder_thread *thread);
537 static void binder_free_proc(struct binder_proc *proc);
538 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
540 static bool binder_has_work_ilocked(struct binder_thread *thread,
543 return thread->process_todo ||
544 thread->looper_need_return ||
546 !binder_worklist_empty_ilocked(&thread->proc->todo));
549 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
553 binder_inner_proc_lock(thread->proc);
554 has_work = binder_has_work_ilocked(thread, do_proc_work);
555 binder_inner_proc_unlock(thread->proc);
560 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
562 return !thread->transaction_stack &&
563 binder_worklist_empty_ilocked(&thread->todo) &&
564 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
565 BINDER_LOOPER_STATE_REGISTERED));
568 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
572 struct binder_thread *thread;
574 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
575 thread = rb_entry(n, struct binder_thread, rb_node);
576 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
577 binder_available_for_proc_work_ilocked(thread)) {
579 wake_up_interruptible_sync(&thread->wait);
581 wake_up_interruptible(&thread->wait);
587 * binder_select_thread_ilocked() - selects a thread for doing proc work.
588 * @proc: process to select a thread from
590 * Note that calling this function moves the thread off the waiting_threads
591 * list, so it can only be woken up by the caller of this function, or a
592 * signal. Therefore, callers *should* always wake up the thread this function
595 * Return: If there's a thread currently waiting for process work,
596 * returns that thread. Otherwise returns NULL.
598 static struct binder_thread *
599 binder_select_thread_ilocked(struct binder_proc *proc)
601 struct binder_thread *thread;
603 assert_spin_locked(&proc->inner_lock);
604 thread = list_first_entry_or_null(&proc->waiting_threads,
605 struct binder_thread,
606 waiting_thread_node);
609 list_del_init(&thread->waiting_thread_node);
615 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
616 * @proc: process to wake up a thread in
617 * @thread: specific thread to wake-up (may be NULL)
618 * @sync: whether to do a synchronous wake-up
620 * This function wakes up a thread in the @proc process.
621 * The caller may provide a specific thread to wake-up in
622 * the @thread parameter. If @thread is NULL, this function
623 * will wake up threads that have called poll().
625 * Note that for this function to work as expected, callers
626 * should first call binder_select_thread() to find a thread
627 * to handle the work (if they don't have a thread already),
628 * and pass the result into the @thread parameter.
630 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
631 struct binder_thread *thread,
634 assert_spin_locked(&proc->inner_lock);
638 wake_up_interruptible_sync(&thread->wait);
640 wake_up_interruptible(&thread->wait);
644 /* Didn't find a thread waiting for proc work; this can happen
646 * 1. All threads are busy handling transactions
647 * In that case, one of those threads should call back into
648 * the kernel driver soon and pick up this work.
649 * 2. Threads are using the (e)poll interface, in which case
650 * they may be blocked on the waitqueue without having been
651 * added to waiting_threads. For this case, we just iterate
652 * over all threads not handling transaction work, and
653 * wake them all up. We wake all because we don't know whether
654 * a thread that called into (e)poll is handling non-binder
657 binder_wakeup_poll_threads_ilocked(proc, sync);
660 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
662 struct binder_thread *thread = binder_select_thread_ilocked(proc);
664 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
667 static void binder_set_nice(long nice)
671 if (can_nice(current, nice)) {
672 set_user_nice(current, nice);
675 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
676 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
677 "%d: nice value %ld not allowed use %ld instead\n",
678 current->pid, nice, min_nice);
679 set_user_nice(current, min_nice);
680 if (min_nice <= MAX_NICE)
682 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
685 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
686 binder_uintptr_t ptr)
688 struct rb_node *n = proc->nodes.rb_node;
689 struct binder_node *node;
691 assert_spin_locked(&proc->inner_lock);
694 node = rb_entry(n, struct binder_node, rb_node);
698 else if (ptr > node->ptr)
702 * take an implicit weak reference
703 * to ensure node stays alive until
704 * call to binder_put_node()
706 binder_inc_node_tmpref_ilocked(node);
713 static struct binder_node *binder_get_node(struct binder_proc *proc,
714 binder_uintptr_t ptr)
716 struct binder_node *node;
718 binder_inner_proc_lock(proc);
719 node = binder_get_node_ilocked(proc, ptr);
720 binder_inner_proc_unlock(proc);
724 static struct binder_node *binder_init_node_ilocked(
725 struct binder_proc *proc,
726 struct binder_node *new_node,
727 struct flat_binder_object *fp)
729 struct rb_node **p = &proc->nodes.rb_node;
730 struct rb_node *parent = NULL;
731 struct binder_node *node;
732 binder_uintptr_t ptr = fp ? fp->binder : 0;
733 binder_uintptr_t cookie = fp ? fp->cookie : 0;
734 __u32 flags = fp ? fp->flags : 0;
736 assert_spin_locked(&proc->inner_lock);
741 node = rb_entry(parent, struct binder_node, rb_node);
745 else if (ptr > node->ptr)
749 * A matching node is already in
750 * the rb tree. Abandon the init
753 binder_inc_node_tmpref_ilocked(node);
758 binder_stats_created(BINDER_STAT_NODE);
760 rb_link_node(&node->rb_node, parent, p);
761 rb_insert_color(&node->rb_node, &proc->nodes);
762 node->debug_id = atomic_inc_return(&binder_last_id);
765 node->cookie = cookie;
766 node->work.type = BINDER_WORK_NODE;
767 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
768 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
769 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
770 spin_lock_init(&node->lock);
771 INIT_LIST_HEAD(&node->work.entry);
772 INIT_LIST_HEAD(&node->async_todo);
773 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
774 "%d:%d node %d u%016llx c%016llx created\n",
775 proc->pid, current->pid, node->debug_id,
776 (u64)node->ptr, (u64)node->cookie);
781 static struct binder_node *binder_new_node(struct binder_proc *proc,
782 struct flat_binder_object *fp)
784 struct binder_node *node;
785 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
789 binder_inner_proc_lock(proc);
790 node = binder_init_node_ilocked(proc, new_node, fp);
791 binder_inner_proc_unlock(proc);
792 if (node != new_node)
794 * The node was already added by another thread
801 static void binder_free_node(struct binder_node *node)
804 binder_stats_deleted(BINDER_STAT_NODE);
807 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
809 struct list_head *target_list)
811 struct binder_proc *proc = node->proc;
813 assert_spin_locked(&node->lock);
815 assert_spin_locked(&proc->inner_lock);
818 if (target_list == NULL &&
819 node->internal_strong_refs == 0 &&
821 node == node->proc->context->binder_context_mgr_node &&
822 node->has_strong_ref)) {
823 pr_err("invalid inc strong node for %d\n",
827 node->internal_strong_refs++;
829 node->local_strong_refs++;
830 if (!node->has_strong_ref && target_list) {
831 struct binder_thread *thread = container_of(target_list,
832 struct binder_thread, todo);
833 binder_dequeue_work_ilocked(&node->work);
834 BUG_ON(&thread->todo != target_list);
835 binder_enqueue_deferred_thread_work_ilocked(thread,
840 node->local_weak_refs++;
841 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
842 if (target_list == NULL) {
843 pr_err("invalid inc weak node for %d\n",
850 binder_enqueue_work_ilocked(&node->work, target_list);
856 static int binder_inc_node(struct binder_node *node, int strong, int internal,
857 struct list_head *target_list)
861 binder_node_inner_lock(node);
862 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
863 binder_node_inner_unlock(node);
868 static bool binder_dec_node_nilocked(struct binder_node *node,
869 int strong, int internal)
871 struct binder_proc *proc = node->proc;
873 assert_spin_locked(&node->lock);
875 assert_spin_locked(&proc->inner_lock);
878 node->internal_strong_refs--;
880 node->local_strong_refs--;
881 if (node->local_strong_refs || node->internal_strong_refs)
885 node->local_weak_refs--;
886 if (node->local_weak_refs || node->tmp_refs ||
887 !hlist_empty(&node->refs))
891 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
892 if (list_empty(&node->work.entry)) {
893 binder_enqueue_work_ilocked(&node->work, &proc->todo);
894 binder_wakeup_proc_ilocked(proc);
897 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
898 !node->local_weak_refs && !node->tmp_refs) {
900 binder_dequeue_work_ilocked(&node->work);
901 rb_erase(&node->rb_node, &proc->nodes);
902 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
903 "refless node %d deleted\n",
906 BUG_ON(!list_empty(&node->work.entry));
907 spin_lock(&binder_dead_nodes_lock);
909 * tmp_refs could have changed so
912 if (node->tmp_refs) {
913 spin_unlock(&binder_dead_nodes_lock);
916 hlist_del(&node->dead_node);
917 spin_unlock(&binder_dead_nodes_lock);
918 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
919 "dead node %d deleted\n",
928 static void binder_dec_node(struct binder_node *node, int strong, int internal)
932 binder_node_inner_lock(node);
933 free_node = binder_dec_node_nilocked(node, strong, internal);
934 binder_node_inner_unlock(node);
936 binder_free_node(node);
939 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
942 * No call to binder_inc_node() is needed since we
943 * don't need to inform userspace of any changes to
950 * binder_inc_node_tmpref() - take a temporary reference on node
951 * @node: node to reference
953 * Take reference on node to prevent the node from being freed
954 * while referenced only by a local variable. The inner lock is
955 * needed to serialize with the node work on the queue (which
956 * isn't needed after the node is dead). If the node is dead
957 * (node->proc is NULL), use binder_dead_nodes_lock to protect
958 * node->tmp_refs against dead-node-only cases where the node
959 * lock cannot be acquired (eg traversing the dead node list to
962 static void binder_inc_node_tmpref(struct binder_node *node)
964 binder_node_lock(node);
966 binder_inner_proc_lock(node->proc);
968 spin_lock(&binder_dead_nodes_lock);
969 binder_inc_node_tmpref_ilocked(node);
971 binder_inner_proc_unlock(node->proc);
973 spin_unlock(&binder_dead_nodes_lock);
974 binder_node_unlock(node);
978 * binder_dec_node_tmpref() - remove a temporary reference on node
979 * @node: node to reference
981 * Release temporary reference on node taken via binder_inc_node_tmpref()
983 static void binder_dec_node_tmpref(struct binder_node *node)
987 binder_node_inner_lock(node);
989 spin_lock(&binder_dead_nodes_lock);
991 __acquire(&binder_dead_nodes_lock);
993 BUG_ON(node->tmp_refs < 0);
995 spin_unlock(&binder_dead_nodes_lock);
997 __release(&binder_dead_nodes_lock);
999 * Call binder_dec_node() to check if all refcounts are 0
1000 * and cleanup is needed. Calling with strong=0 and internal=1
1001 * causes no actual reference to be released in binder_dec_node().
1002 * If that changes, a change is needed here too.
1004 free_node = binder_dec_node_nilocked(node, 0, 1);
1005 binder_node_inner_unlock(node);
1007 binder_free_node(node);
1010 static void binder_put_node(struct binder_node *node)
1012 binder_dec_node_tmpref(node);
1015 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1016 u32 desc, bool need_strong_ref)
1018 struct rb_node *n = proc->refs_by_desc.rb_node;
1019 struct binder_ref *ref;
1022 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1024 if (desc < ref->data.desc) {
1026 } else if (desc > ref->data.desc) {
1028 } else if (need_strong_ref && !ref->data.strong) {
1029 binder_user_error("tried to use weak ref as strong ref\n");
1039 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1040 * @proc: binder_proc that owns the ref
1041 * @node: binder_node of target
1042 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1044 * Look up the ref for the given node and return it if it exists
1046 * If it doesn't exist and the caller provides a newly allocated
1047 * ref, initialize the fields of the newly allocated ref and insert
1048 * into the given proc rb_trees and node refs list.
1050 * Return: the ref for node. It is possible that another thread
1051 * allocated/initialized the ref first in which case the
1052 * returned ref would be different than the passed-in
1053 * new_ref. new_ref must be kfree'd by the caller in
1056 static struct binder_ref *binder_get_ref_for_node_olocked(
1057 struct binder_proc *proc,
1058 struct binder_node *node,
1059 struct binder_ref *new_ref)
1061 struct binder_context *context = proc->context;
1062 struct rb_node **p = &proc->refs_by_node.rb_node;
1063 struct rb_node *parent = NULL;
1064 struct binder_ref *ref;
1069 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1071 if (node < ref->node)
1073 else if (node > ref->node)
1074 p = &(*p)->rb_right;
1081 binder_stats_created(BINDER_STAT_REF);
1082 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1083 new_ref->proc = proc;
1084 new_ref->node = node;
1085 rb_link_node(&new_ref->rb_node_node, parent, p);
1086 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1088 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1089 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1090 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1091 if (ref->data.desc > new_ref->data.desc)
1093 new_ref->data.desc = ref->data.desc + 1;
1096 p = &proc->refs_by_desc.rb_node;
1099 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1101 if (new_ref->data.desc < ref->data.desc)
1103 else if (new_ref->data.desc > ref->data.desc)
1104 p = &(*p)->rb_right;
1108 rb_link_node(&new_ref->rb_node_desc, parent, p);
1109 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1111 binder_node_lock(node);
1112 hlist_add_head(&new_ref->node_entry, &node->refs);
1114 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1115 "%d new ref %d desc %d for node %d\n",
1116 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1118 binder_node_unlock(node);
1122 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1124 bool delete_node = false;
1126 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1127 "%d delete ref %d desc %d for node %d\n",
1128 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1129 ref->node->debug_id);
1131 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1132 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1134 binder_node_inner_lock(ref->node);
1135 if (ref->data.strong)
1136 binder_dec_node_nilocked(ref->node, 1, 1);
1138 hlist_del(&ref->node_entry);
1139 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1140 binder_node_inner_unlock(ref->node);
1142 * Clear ref->node unless we want the caller to free the node
1146 * The caller uses ref->node to determine
1147 * whether the node needs to be freed. Clear
1148 * it since the node is still alive.
1154 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1155 "%d delete ref %d desc %d has death notification\n",
1156 ref->proc->pid, ref->data.debug_id,
1158 binder_dequeue_work(ref->proc, &ref->death->work);
1159 binder_stats_deleted(BINDER_STAT_DEATH);
1161 binder_stats_deleted(BINDER_STAT_REF);
1165 * binder_inc_ref_olocked() - increment the ref for given handle
1166 * @ref: ref to be incremented
1167 * @strong: if true, strong increment, else weak
1168 * @target_list: list to queue node work on
1170 * Increment the ref. @ref->proc->outer_lock must be held on entry
1172 * Return: 0, if successful, else errno
1174 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1175 struct list_head *target_list)
1180 if (ref->data.strong == 0) {
1181 ret = binder_inc_node(ref->node, 1, 1, target_list);
1187 if (ref->data.weak == 0) {
1188 ret = binder_inc_node(ref->node, 0, 1, target_list);
1198 * binder_dec_ref_olocked() - dec the ref for given handle
1199 * @ref: ref to be decremented
1200 * @strong: if true, strong decrement, else weak
1202 * Decrement the ref.
1204 * Return: %true if ref is cleaned up and ready to be freed.
1206 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1209 if (ref->data.strong == 0) {
1210 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1211 ref->proc->pid, ref->data.debug_id,
1212 ref->data.desc, ref->data.strong,
1217 if (ref->data.strong == 0)
1218 binder_dec_node(ref->node, strong, 1);
1220 if (ref->data.weak == 0) {
1221 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1222 ref->proc->pid, ref->data.debug_id,
1223 ref->data.desc, ref->data.strong,
1229 if (ref->data.strong == 0 && ref->data.weak == 0) {
1230 binder_cleanup_ref_olocked(ref);
1237 * binder_get_node_from_ref() - get the node from the given proc/desc
1238 * @proc: proc containing the ref
1239 * @desc: the handle associated with the ref
1240 * @need_strong_ref: if true, only return node if ref is strong
1241 * @rdata: the id/refcount data for the ref
1243 * Given a proc and ref handle, return the associated binder_node
1245 * Return: a binder_node or NULL if not found or not strong when strong required
1247 static struct binder_node *binder_get_node_from_ref(
1248 struct binder_proc *proc,
1249 u32 desc, bool need_strong_ref,
1250 struct binder_ref_data *rdata)
1252 struct binder_node *node;
1253 struct binder_ref *ref;
1255 binder_proc_lock(proc);
1256 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1261 * Take an implicit reference on the node to ensure
1262 * it stays alive until the call to binder_put_node()
1264 binder_inc_node_tmpref(node);
1267 binder_proc_unlock(proc);
1272 binder_proc_unlock(proc);
1277 * binder_free_ref() - free the binder_ref
1280 * Free the binder_ref. Free the binder_node indicated by ref->node
1281 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1283 static void binder_free_ref(struct binder_ref *ref)
1286 binder_free_node(ref->node);
1292 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1293 * @proc: proc containing the ref
1294 * @desc: the handle associated with the ref
1295 * @increment: true=inc reference, false=dec reference
1296 * @strong: true=strong reference, false=weak reference
1297 * @rdata: the id/refcount data for the ref
1299 * Given a proc and ref handle, increment or decrement the ref
1300 * according to "increment" arg.
1302 * Return: 0 if successful, else errno
1304 static int binder_update_ref_for_handle(struct binder_proc *proc,
1305 uint32_t desc, bool increment, bool strong,
1306 struct binder_ref_data *rdata)
1309 struct binder_ref *ref;
1310 bool delete_ref = false;
1312 binder_proc_lock(proc);
1313 ref = binder_get_ref_olocked(proc, desc, strong);
1319 ret = binder_inc_ref_olocked(ref, strong, NULL);
1321 delete_ref = binder_dec_ref_olocked(ref, strong);
1325 binder_proc_unlock(proc);
1328 binder_free_ref(ref);
1332 binder_proc_unlock(proc);
1337 * binder_dec_ref_for_handle() - dec the ref for given handle
1338 * @proc: proc containing the ref
1339 * @desc: the handle associated with the ref
1340 * @strong: true=strong reference, false=weak reference
1341 * @rdata: the id/refcount data for the ref
1343 * Just calls binder_update_ref_for_handle() to decrement the ref.
1345 * Return: 0 if successful, else errno
1347 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1348 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1350 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1355 * binder_inc_ref_for_node() - increment the ref for given proc/node
1356 * @proc: proc containing the ref
1357 * @node: target node
1358 * @strong: true=strong reference, false=weak reference
1359 * @target_list: worklist to use if node is incremented
1360 * @rdata: the id/refcount data for the ref
1362 * Given a proc and node, increment the ref. Create the ref if it
1363 * doesn't already exist
1365 * Return: 0 if successful, else errno
1367 static int binder_inc_ref_for_node(struct binder_proc *proc,
1368 struct binder_node *node,
1370 struct list_head *target_list,
1371 struct binder_ref_data *rdata)
1373 struct binder_ref *ref;
1374 struct binder_ref *new_ref = NULL;
1377 binder_proc_lock(proc);
1378 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1380 binder_proc_unlock(proc);
1381 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1384 binder_proc_lock(proc);
1385 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1387 ret = binder_inc_ref_olocked(ref, strong, target_list);
1389 if (ret && ref == new_ref) {
1391 * Cleanup the failed reference here as the target
1392 * could now be dead and have already released its
1393 * references by now. Calling on the new reference
1394 * with strong=0 and a tmp_refs will not decrement
1395 * the node. The new_ref gets kfree'd below.
1397 binder_cleanup_ref_olocked(new_ref);
1401 binder_proc_unlock(proc);
1402 if (new_ref && ref != new_ref)
1404 * Another thread created the ref first so
1405 * free the one we allocated
1411 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1412 struct binder_transaction *t)
1414 BUG_ON(!target_thread);
1415 assert_spin_locked(&target_thread->proc->inner_lock);
1416 BUG_ON(target_thread->transaction_stack != t);
1417 BUG_ON(target_thread->transaction_stack->from != target_thread);
1418 target_thread->transaction_stack =
1419 target_thread->transaction_stack->from_parent;
1424 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1425 * @thread: thread to decrement
1427 * A thread needs to be kept alive while being used to create or
1428 * handle a transaction. binder_get_txn_from() is used to safely
1429 * extract t->from from a binder_transaction and keep the thread
1430 * indicated by t->from from being freed. When done with that
1431 * binder_thread, this function is called to decrement the
1432 * tmp_ref and free if appropriate (thread has been released
1433 * and no transaction being processed by the driver)
1435 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1438 * atomic is used to protect the counter value while
1439 * it cannot reach zero or thread->is_dead is false
1441 binder_inner_proc_lock(thread->proc);
1442 atomic_dec(&thread->tmp_ref);
1443 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1444 binder_inner_proc_unlock(thread->proc);
1445 binder_free_thread(thread);
1448 binder_inner_proc_unlock(thread->proc);
1452 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1453 * @proc: proc to decrement
1455 * A binder_proc needs to be kept alive while being used to create or
1456 * handle a transaction. proc->tmp_ref is incremented when
1457 * creating a new transaction or the binder_proc is currently in-use
1458 * by threads that are being released. When done with the binder_proc,
1459 * this function is called to decrement the counter and free the
1460 * proc if appropriate (proc has been released, all threads have
1461 * been released and not currenly in-use to process a transaction).
1463 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1465 binder_inner_proc_lock(proc);
1467 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1469 binder_inner_proc_unlock(proc);
1470 binder_free_proc(proc);
1473 binder_inner_proc_unlock(proc);
1477 * binder_get_txn_from() - safely extract the "from" thread in transaction
1478 * @t: binder transaction for t->from
1480 * Atomically return the "from" thread and increment the tmp_ref
1481 * count for the thread to ensure it stays alive until
1482 * binder_thread_dec_tmpref() is called.
1484 * Return: the value of t->from
1486 static struct binder_thread *binder_get_txn_from(
1487 struct binder_transaction *t)
1489 struct binder_thread *from;
1491 spin_lock(&t->lock);
1494 atomic_inc(&from->tmp_ref);
1495 spin_unlock(&t->lock);
1500 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1501 * @t: binder transaction for t->from
1503 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1504 * to guarantee that the thread cannot be released while operating on it.
1505 * The caller must call binder_inner_proc_unlock() to release the inner lock
1506 * as well as call binder_dec_thread_txn() to release the reference.
1508 * Return: the value of t->from
1510 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1511 struct binder_transaction *t)
1512 __acquires(&t->from->proc->inner_lock)
1514 struct binder_thread *from;
1516 from = binder_get_txn_from(t);
1518 __acquire(&from->proc->inner_lock);
1521 binder_inner_proc_lock(from->proc);
1523 BUG_ON(from != t->from);
1526 binder_inner_proc_unlock(from->proc);
1527 __acquire(&from->proc->inner_lock);
1528 binder_thread_dec_tmpref(from);
1533 * binder_free_txn_fixups() - free unprocessed fd fixups
1534 * @t: binder transaction for t->from
1536 * If the transaction is being torn down prior to being
1537 * processed by the target process, free all of the
1538 * fd fixups and fput the file structs. It is safe to
1539 * call this function after the fixups have been
1540 * processed -- in that case, the list will be empty.
1542 static void binder_free_txn_fixups(struct binder_transaction *t)
1544 struct binder_txn_fd_fixup *fixup, *tmp;
1546 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1548 if (fixup->target_fd >= 0)
1549 put_unused_fd(fixup->target_fd);
1550 list_del(&fixup->fixup_entry);
1555 static void binder_txn_latency_free(struct binder_transaction *t)
1557 int from_proc, from_thread, to_proc, to_thread;
1559 spin_lock(&t->lock);
1560 from_proc = t->from ? t->from->proc->pid : 0;
1561 from_thread = t->from ? t->from->pid : 0;
1562 to_proc = t->to_proc ? t->to_proc->pid : 0;
1563 to_thread = t->to_thread ? t->to_thread->pid : 0;
1564 spin_unlock(&t->lock);
1566 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1569 static void binder_free_transaction(struct binder_transaction *t)
1571 struct binder_proc *target_proc = t->to_proc;
1574 binder_inner_proc_lock(target_proc);
1575 target_proc->outstanding_txns--;
1576 if (target_proc->outstanding_txns < 0)
1577 pr_warn("%s: Unexpected outstanding_txns %d\n",
1578 __func__, target_proc->outstanding_txns);
1579 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1580 wake_up_interruptible_all(&target_proc->freeze_wait);
1582 t->buffer->transaction = NULL;
1583 binder_inner_proc_unlock(target_proc);
1585 if (trace_binder_txn_latency_free_enabled())
1586 binder_txn_latency_free(t);
1588 * If the transaction has no target_proc, then
1589 * t->buffer->transaction has already been cleared.
1591 binder_free_txn_fixups(t);
1593 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1596 static void binder_send_failed_reply(struct binder_transaction *t,
1597 uint32_t error_code)
1599 struct binder_thread *target_thread;
1600 struct binder_transaction *next;
1602 BUG_ON(t->flags & TF_ONE_WAY);
1604 target_thread = binder_get_txn_from_and_acq_inner(t);
1605 if (target_thread) {
1606 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1607 "send failed reply for transaction %d to %d:%d\n",
1609 target_thread->proc->pid,
1610 target_thread->pid);
1612 binder_pop_transaction_ilocked(target_thread, t);
1613 if (target_thread->reply_error.cmd == BR_OK) {
1614 target_thread->reply_error.cmd = error_code;
1615 binder_enqueue_thread_work_ilocked(
1617 &target_thread->reply_error.work);
1618 wake_up_interruptible(&target_thread->wait);
1621 * Cannot get here for normal operation, but
1622 * we can if multiple synchronous transactions
1623 * are sent without blocking for responses.
1624 * Just ignore the 2nd error in this case.
1626 pr_warn("Unexpected reply error: %u\n",
1627 target_thread->reply_error.cmd);
1629 binder_inner_proc_unlock(target_thread->proc);
1630 binder_thread_dec_tmpref(target_thread);
1631 binder_free_transaction(t);
1634 __release(&target_thread->proc->inner_lock);
1635 next = t->from_parent;
1637 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1638 "send failed reply for transaction %d, target dead\n",
1641 binder_free_transaction(t);
1643 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1644 "reply failed, no target thread at root\n");
1648 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1649 "reply failed, no target thread -- retry %d\n",
1655 * binder_cleanup_transaction() - cleans up undelivered transaction
1656 * @t: transaction that needs to be cleaned up
1657 * @reason: reason the transaction wasn't delivered
1658 * @error_code: error to return to caller (if synchronous call)
1660 static void binder_cleanup_transaction(struct binder_transaction *t,
1662 uint32_t error_code)
1664 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1665 binder_send_failed_reply(t, error_code);
1667 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1668 "undelivered transaction %d, %s\n",
1669 t->debug_id, reason);
1670 binder_free_transaction(t);
1675 * binder_get_object() - gets object and checks for valid metadata
1676 * @proc: binder_proc owning the buffer
1677 * @u: sender's user pointer to base of buffer
1678 * @buffer: binder_buffer that we're parsing.
1679 * @offset: offset in the @buffer at which to validate an object.
1680 * @object: struct binder_object to read into
1682 * Copy the binder object at the given offset into @object. If @u is
1683 * provided then the copy is from the sender's buffer. If not, then
1684 * it is copied from the target's @buffer.
1686 * Return: If there's a valid metadata object at @offset, the
1687 * size of that object. Otherwise, it returns zero. The object
1688 * is read into the struct binder_object pointed to by @object.
1690 static size_t binder_get_object(struct binder_proc *proc,
1691 const void __user *u,
1692 struct binder_buffer *buffer,
1693 unsigned long offset,
1694 struct binder_object *object)
1697 struct binder_object_header *hdr;
1698 size_t object_size = 0;
1700 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1701 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1704 if (copy_from_user(object, u + offset, read_size))
1707 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1712 /* Ok, now see if we read a complete object. */
1714 switch (hdr->type) {
1715 case BINDER_TYPE_BINDER:
1716 case BINDER_TYPE_WEAK_BINDER:
1717 case BINDER_TYPE_HANDLE:
1718 case BINDER_TYPE_WEAK_HANDLE:
1719 object_size = sizeof(struct flat_binder_object);
1721 case BINDER_TYPE_FD:
1722 object_size = sizeof(struct binder_fd_object);
1724 case BINDER_TYPE_PTR:
1725 object_size = sizeof(struct binder_buffer_object);
1727 case BINDER_TYPE_FDA:
1728 object_size = sizeof(struct binder_fd_array_object);
1733 if (offset <= buffer->data_size - object_size &&
1734 buffer->data_size >= object_size)
1741 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1742 * @proc: binder_proc owning the buffer
1743 * @b: binder_buffer containing the object
1744 * @object: struct binder_object to read into
1745 * @index: index in offset array at which the binder_buffer_object is
1747 * @start_offset: points to the start of the offset array
1748 * @object_offsetp: offset of @object read from @b
1749 * @num_valid: the number of valid offsets in the offset array
1751 * Return: If @index is within the valid range of the offset array
1752 * described by @start and @num_valid, and if there's a valid
1753 * binder_buffer_object at the offset found in index @index
1754 * of the offset array, that object is returned. Otherwise,
1755 * %NULL is returned.
1756 * Note that the offset found in index @index itself is not
1757 * verified; this function assumes that @num_valid elements
1758 * from @start were previously verified to have valid offsets.
1759 * If @object_offsetp is non-NULL, then the offset within
1760 * @b is written to it.
1762 static struct binder_buffer_object *binder_validate_ptr(
1763 struct binder_proc *proc,
1764 struct binder_buffer *b,
1765 struct binder_object *object,
1766 binder_size_t index,
1767 binder_size_t start_offset,
1768 binder_size_t *object_offsetp,
1769 binder_size_t num_valid)
1772 binder_size_t object_offset;
1773 unsigned long buffer_offset;
1775 if (index >= num_valid)
1778 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1779 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1781 sizeof(object_offset)))
1783 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1784 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1787 *object_offsetp = object_offset;
1789 return &object->bbo;
1793 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1794 * @proc: binder_proc owning the buffer
1795 * @b: transaction buffer
1796 * @objects_start_offset: offset to start of objects buffer
1797 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1798 * @fixup_offset: start offset in @buffer to fix up
1799 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1800 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1802 * Return: %true if a fixup in buffer @buffer at offset @offset is
1805 * For safety reasons, we only allow fixups inside a buffer to happen
1806 * at increasing offsets; additionally, we only allow fixup on the last
1807 * buffer object that was verified, or one of its parents.
1809 * Example of what is allowed:
1812 * B (parent = A, offset = 0)
1813 * C (parent = A, offset = 16)
1814 * D (parent = C, offset = 0)
1815 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1817 * Examples of what is not allowed:
1819 * Decreasing offsets within the same parent:
1821 * C (parent = A, offset = 16)
1822 * B (parent = A, offset = 0) // decreasing offset within A
1824 * Referring to a parent that wasn't the last object or any of its parents:
1826 * B (parent = A, offset = 0)
1827 * C (parent = A, offset = 0)
1828 * C (parent = A, offset = 16)
1829 * D (parent = B, offset = 0) // B is not A or any of A's parents
1831 static bool binder_validate_fixup(struct binder_proc *proc,
1832 struct binder_buffer *b,
1833 binder_size_t objects_start_offset,
1834 binder_size_t buffer_obj_offset,
1835 binder_size_t fixup_offset,
1836 binder_size_t last_obj_offset,
1837 binder_size_t last_min_offset)
1839 if (!last_obj_offset) {
1840 /* Nothing to fix up in */
1844 while (last_obj_offset != buffer_obj_offset) {
1845 unsigned long buffer_offset;
1846 struct binder_object last_object;
1847 struct binder_buffer_object *last_bbo;
1848 size_t object_size = binder_get_object(proc, NULL, b,
1851 if (object_size != sizeof(*last_bbo))
1854 last_bbo = &last_object.bbo;
1856 * Safe to retrieve the parent of last_obj, since it
1857 * was already previously verified by the driver.
1859 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1861 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1862 buffer_offset = objects_start_offset +
1863 sizeof(binder_size_t) * last_bbo->parent;
1864 if (binder_alloc_copy_from_buffer(&proc->alloc,
1867 sizeof(last_obj_offset)))
1870 return (fixup_offset >= last_min_offset);
1874 * struct binder_task_work_cb - for deferred close
1876 * @twork: callback_head for task work
1879 * Structure to pass task work to be handled after
1880 * returning from binder_ioctl() via task_work_add().
1882 struct binder_task_work_cb {
1883 struct callback_head twork;
1888 * binder_do_fd_close() - close list of file descriptors
1889 * @twork: callback head for task work
1891 * It is not safe to call ksys_close() during the binder_ioctl()
1892 * function if there is a chance that binder's own file descriptor
1893 * might be closed. This is to meet the requirements for using
1894 * fdget() (see comments for __fget_light()). Therefore use
1895 * task_work_add() to schedule the close operation once we have
1896 * returned from binder_ioctl(). This function is a callback
1897 * for that mechanism and does the actual ksys_close() on the
1898 * given file descriptor.
1900 static void binder_do_fd_close(struct callback_head *twork)
1902 struct binder_task_work_cb *twcb = container_of(twork,
1903 struct binder_task_work_cb, twork);
1910 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1911 * @fd: file-descriptor to close
1913 * See comments in binder_do_fd_close(). This function is used to schedule
1914 * a file-descriptor to be closed after returning from binder_ioctl().
1916 static void binder_deferred_fd_close(int fd)
1918 struct binder_task_work_cb *twcb;
1920 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1923 init_task_work(&twcb->twork, binder_do_fd_close);
1924 twcb->file = file_close_fd(fd);
1926 // pin it until binder_do_fd_close(); see comments there
1927 get_file(twcb->file);
1928 filp_close(twcb->file, current->files);
1929 task_work_add(current, &twcb->twork, TWA_RESUME);
1935 static void binder_transaction_buffer_release(struct binder_proc *proc,
1936 struct binder_thread *thread,
1937 struct binder_buffer *buffer,
1938 binder_size_t off_end_offset,
1941 int debug_id = buffer->debug_id;
1942 binder_size_t off_start_offset, buffer_offset;
1944 binder_debug(BINDER_DEBUG_TRANSACTION,
1945 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1946 proc->pid, buffer->debug_id,
1947 buffer->data_size, buffer->offsets_size,
1948 (unsigned long long)off_end_offset);
1950 if (buffer->target_node)
1951 binder_dec_node(buffer->target_node, 1, 0);
1953 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1955 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1956 buffer_offset += sizeof(binder_size_t)) {
1957 struct binder_object_header *hdr;
1958 size_t object_size = 0;
1959 struct binder_object object;
1960 binder_size_t object_offset;
1962 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1963 buffer, buffer_offset,
1964 sizeof(object_offset)))
1965 object_size = binder_get_object(proc, NULL, buffer,
1966 object_offset, &object);
1967 if (object_size == 0) {
1968 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1969 debug_id, (u64)object_offset, buffer->data_size);
1973 switch (hdr->type) {
1974 case BINDER_TYPE_BINDER:
1975 case BINDER_TYPE_WEAK_BINDER: {
1976 struct flat_binder_object *fp;
1977 struct binder_node *node;
1979 fp = to_flat_binder_object(hdr);
1980 node = binder_get_node(proc, fp->binder);
1982 pr_err("transaction release %d bad node %016llx\n",
1983 debug_id, (u64)fp->binder);
1986 binder_debug(BINDER_DEBUG_TRANSACTION,
1987 " node %d u%016llx\n",
1988 node->debug_id, (u64)node->ptr);
1989 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1991 binder_put_node(node);
1993 case BINDER_TYPE_HANDLE:
1994 case BINDER_TYPE_WEAK_HANDLE: {
1995 struct flat_binder_object *fp;
1996 struct binder_ref_data rdata;
1999 fp = to_flat_binder_object(hdr);
2000 ret = binder_dec_ref_for_handle(proc, fp->handle,
2001 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2004 pr_err("transaction release %d bad handle %d, ret = %d\n",
2005 debug_id, fp->handle, ret);
2008 binder_debug(BINDER_DEBUG_TRANSACTION,
2009 " ref %d desc %d\n",
2010 rdata.debug_id, rdata.desc);
2013 case BINDER_TYPE_FD: {
2015 * No need to close the file here since user-space
2016 * closes it for successfully delivered
2017 * transactions. For transactions that weren't
2018 * delivered, the new fd was never allocated so
2019 * there is no need to close and the fput on the
2020 * file is done when the transaction is torn
2024 case BINDER_TYPE_PTR:
2026 * Nothing to do here, this will get cleaned up when the
2027 * transaction buffer gets freed
2030 case BINDER_TYPE_FDA: {
2031 struct binder_fd_array_object *fda;
2032 struct binder_buffer_object *parent;
2033 struct binder_object ptr_object;
2034 binder_size_t fda_offset;
2036 binder_size_t fd_buf_size;
2037 binder_size_t num_valid;
2041 * The fd fixups have not been applied so no
2042 * fds need to be closed.
2047 num_valid = (buffer_offset - off_start_offset) /
2048 sizeof(binder_size_t);
2049 fda = to_binder_fd_array_object(hdr);
2050 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2056 pr_err("transaction release %d bad parent offset\n",
2060 fd_buf_size = sizeof(u32) * fda->num_fds;
2061 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2062 pr_err("transaction release %d invalid number of fds (%lld)\n",
2063 debug_id, (u64)fda->num_fds);
2066 if (fd_buf_size > parent->length ||
2067 fda->parent_offset > parent->length - fd_buf_size) {
2068 /* No space for all file descriptors here. */
2069 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2070 debug_id, (u64)fda->num_fds);
2074 * the source data for binder_buffer_object is visible
2075 * to user-space and the @buffer element is the user
2076 * pointer to the buffer_object containing the fd_array.
2077 * Convert the address to an offset relative to
2078 * the base of the transaction buffer.
2080 fda_offset = parent->buffer - buffer->user_data +
2082 for (fd_index = 0; fd_index < fda->num_fds;
2086 binder_size_t offset = fda_offset +
2087 fd_index * sizeof(fd);
2089 err = binder_alloc_copy_from_buffer(
2090 &proc->alloc, &fd, buffer,
2091 offset, sizeof(fd));
2094 binder_deferred_fd_close(fd);
2096 * Need to make sure the thread goes
2097 * back to userspace to complete the
2101 thread->looper_need_return = true;
2106 pr_err("transaction release %d bad object type %x\n",
2107 debug_id, hdr->type);
2113 /* Clean up all the objects in the buffer */
2114 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2115 struct binder_thread *thread,
2116 struct binder_buffer *buffer,
2119 binder_size_t off_end_offset;
2121 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2122 off_end_offset += buffer->offsets_size;
2124 binder_transaction_buffer_release(proc, thread, buffer,
2125 off_end_offset, is_failure);
2128 static int binder_translate_binder(struct flat_binder_object *fp,
2129 struct binder_transaction *t,
2130 struct binder_thread *thread)
2132 struct binder_node *node;
2133 struct binder_proc *proc = thread->proc;
2134 struct binder_proc *target_proc = t->to_proc;
2135 struct binder_ref_data rdata;
2138 node = binder_get_node(proc, fp->binder);
2140 node = binder_new_node(proc, fp);
2144 if (fp->cookie != node->cookie) {
2145 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2146 proc->pid, thread->pid, (u64)fp->binder,
2147 node->debug_id, (u64)fp->cookie,
2152 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2157 ret = binder_inc_ref_for_node(target_proc, node,
2158 fp->hdr.type == BINDER_TYPE_BINDER,
2159 &thread->todo, &rdata);
2163 if (fp->hdr.type == BINDER_TYPE_BINDER)
2164 fp->hdr.type = BINDER_TYPE_HANDLE;
2166 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2168 fp->handle = rdata.desc;
2171 trace_binder_transaction_node_to_ref(t, node, &rdata);
2172 binder_debug(BINDER_DEBUG_TRANSACTION,
2173 " node %d u%016llx -> ref %d desc %d\n",
2174 node->debug_id, (u64)node->ptr,
2175 rdata.debug_id, rdata.desc);
2177 binder_put_node(node);
2181 static int binder_translate_handle(struct flat_binder_object *fp,
2182 struct binder_transaction *t,
2183 struct binder_thread *thread)
2185 struct binder_proc *proc = thread->proc;
2186 struct binder_proc *target_proc = t->to_proc;
2187 struct binder_node *node;
2188 struct binder_ref_data src_rdata;
2191 node = binder_get_node_from_ref(proc, fp->handle,
2192 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2194 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2195 proc->pid, thread->pid, fp->handle);
2198 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2203 binder_node_lock(node);
2204 if (node->proc == target_proc) {
2205 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2206 fp->hdr.type = BINDER_TYPE_BINDER;
2208 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2209 fp->binder = node->ptr;
2210 fp->cookie = node->cookie;
2212 binder_inner_proc_lock(node->proc);
2214 __acquire(&node->proc->inner_lock);
2215 binder_inc_node_nilocked(node,
2216 fp->hdr.type == BINDER_TYPE_BINDER,
2219 binder_inner_proc_unlock(node->proc);
2221 __release(&node->proc->inner_lock);
2222 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2223 binder_debug(BINDER_DEBUG_TRANSACTION,
2224 " ref %d desc %d -> node %d u%016llx\n",
2225 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2227 binder_node_unlock(node);
2229 struct binder_ref_data dest_rdata;
2231 binder_node_unlock(node);
2232 ret = binder_inc_ref_for_node(target_proc, node,
2233 fp->hdr.type == BINDER_TYPE_HANDLE,
2239 fp->handle = dest_rdata.desc;
2241 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2243 binder_debug(BINDER_DEBUG_TRANSACTION,
2244 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2245 src_rdata.debug_id, src_rdata.desc,
2246 dest_rdata.debug_id, dest_rdata.desc,
2250 binder_put_node(node);
2254 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2255 struct binder_transaction *t,
2256 struct binder_thread *thread,
2257 struct binder_transaction *in_reply_to)
2259 struct binder_proc *proc = thread->proc;
2260 struct binder_proc *target_proc = t->to_proc;
2261 struct binder_txn_fd_fixup *fixup;
2264 bool target_allows_fd;
2267 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2269 target_allows_fd = t->buffer->target_node->accept_fds;
2270 if (!target_allows_fd) {
2271 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2272 proc->pid, thread->pid,
2273 in_reply_to ? "reply" : "transaction",
2276 goto err_fd_not_accepted;
2281 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2282 proc->pid, thread->pid, fd);
2286 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2293 * Add fixup record for this transaction. The allocation
2294 * of the fd in the target needs to be done from a
2297 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2303 fixup->offset = fd_offset;
2304 fixup->target_fd = -1;
2305 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2306 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2314 err_fd_not_accepted:
2319 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2320 * @offset offset in target buffer to fixup
2321 * @skip_size bytes to skip in copy (fixup will be written later)
2322 * @fixup_data data to write at fixup offset
2325 * This is used for the pointer fixup list (pf) which is created and consumed
2326 * during binder_transaction() and is only accessed locally. No
2327 * locking is necessary.
2329 * The list is ordered by @offset.
2331 struct binder_ptr_fixup {
2332 binder_size_t offset;
2334 binder_uintptr_t fixup_data;
2335 struct list_head node;
2339 * struct binder_sg_copy - scatter-gather data to be copied
2340 * @offset offset in target buffer
2341 * @sender_uaddr user address in source buffer
2342 * @length bytes to copy
2345 * This is used for the sg copy list (sgc) which is created and consumed
2346 * during binder_transaction() and is only accessed locally. No
2347 * locking is necessary.
2349 * The list is ordered by @offset.
2351 struct binder_sg_copy {
2352 binder_size_t offset;
2353 const void __user *sender_uaddr;
2355 struct list_head node;
2359 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2360 * @alloc: binder_alloc associated with @buffer
2361 * @buffer: binder buffer in target process
2362 * @sgc_head: list_head of scatter-gather copy list
2363 * @pf_head: list_head of pointer fixup list
2365 * Processes all elements of @sgc_head, applying fixups from @pf_head
2366 * and copying the scatter-gather data from the source process' user
2367 * buffer to the target's buffer. It is expected that the list creation
2368 * and processing all occurs during binder_transaction() so these lists
2369 * are only accessed in local context.
2371 * Return: 0=success, else -errno
2373 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2374 struct binder_buffer *buffer,
2375 struct list_head *sgc_head,
2376 struct list_head *pf_head)
2379 struct binder_sg_copy *sgc, *tmpsgc;
2380 struct binder_ptr_fixup *tmppf;
2381 struct binder_ptr_fixup *pf =
2382 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2385 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2386 size_t bytes_copied = 0;
2388 while (bytes_copied < sgc->length) {
2390 size_t bytes_left = sgc->length - bytes_copied;
2391 size_t offset = sgc->offset + bytes_copied;
2394 * We copy up to the fixup (pointed to by pf)
2396 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2398 if (!ret && copy_size)
2399 ret = binder_alloc_copy_user_to_buffer(
2402 sgc->sender_uaddr + bytes_copied,
2404 bytes_copied += copy_size;
2405 if (copy_size != bytes_left) {
2407 /* we stopped at a fixup offset */
2408 if (pf->skip_size) {
2410 * we are just skipping. This is for
2411 * BINDER_TYPE_FDA where the translated
2412 * fds will be fixed up when we get
2413 * to target context.
2415 bytes_copied += pf->skip_size;
2417 /* apply the fixup indicated by pf */
2419 ret = binder_alloc_copy_to_buffer(
2423 sizeof(pf->fixup_data));
2424 bytes_copied += sizeof(pf->fixup_data);
2426 list_del(&pf->node);
2428 pf = list_first_entry_or_null(pf_head,
2429 struct binder_ptr_fixup, node);
2432 list_del(&sgc->node);
2435 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2436 BUG_ON(pf->skip_size == 0);
2437 list_del(&pf->node);
2440 BUG_ON(!list_empty(sgc_head));
2442 return ret > 0 ? -EINVAL : ret;
2446 * binder_cleanup_deferred_txn_lists() - free specified lists
2447 * @sgc_head: list_head of scatter-gather copy list
2448 * @pf_head: list_head of pointer fixup list
2450 * Called to clean up @sgc_head and @pf_head if there is an
2453 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2454 struct list_head *pf_head)
2456 struct binder_sg_copy *sgc, *tmpsgc;
2457 struct binder_ptr_fixup *pf, *tmppf;
2459 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2460 list_del(&sgc->node);
2463 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2464 list_del(&pf->node);
2470 * binder_defer_copy() - queue a scatter-gather buffer for copy
2471 * @sgc_head: list_head of scatter-gather copy list
2472 * @offset: binder buffer offset in target process
2473 * @sender_uaddr: user address in source process
2474 * @length: bytes to copy
2476 * Specify a scatter-gather block to be copied. The actual copy must
2477 * be deferred until all the needed fixups are identified and queued.
2478 * Then the copy and fixups are done together so un-translated values
2479 * from the source are never visible in the target buffer.
2481 * We are guaranteed that repeated calls to this function will have
2482 * monotonically increasing @offset values so the list will naturally
2485 * Return: 0=success, else -errno
2487 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2488 const void __user *sender_uaddr, size_t length)
2490 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2495 bc->offset = offset;
2496 bc->sender_uaddr = sender_uaddr;
2497 bc->length = length;
2498 INIT_LIST_HEAD(&bc->node);
2501 * We are guaranteed that the deferred copies are in-order
2502 * so just add to the tail.
2504 list_add_tail(&bc->node, sgc_head);
2510 * binder_add_fixup() - queue a fixup to be applied to sg copy
2511 * @pf_head: list_head of binder ptr fixup list
2512 * @offset: binder buffer offset in target process
2513 * @fixup: bytes to be copied for fixup
2514 * @skip_size: bytes to skip when copying (fixup will be applied later)
2516 * Add the specified fixup to a list ordered by @offset. When copying
2517 * the scatter-gather buffers, the fixup will be copied instead of
2518 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2519 * will be applied later (in target process context), so we just skip
2520 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2523 * This function is called *mostly* in @offset order, but there are
2524 * exceptions. Since out-of-order inserts are relatively uncommon,
2525 * we insert the new element by searching backward from the tail of
2528 * Return: 0=success, else -errno
2530 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2531 binder_uintptr_t fixup, size_t skip_size)
2533 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2534 struct binder_ptr_fixup *tmppf;
2539 pf->offset = offset;
2540 pf->fixup_data = fixup;
2541 pf->skip_size = skip_size;
2542 INIT_LIST_HEAD(&pf->node);
2544 /* Fixups are *mostly* added in-order, but there are some
2545 * exceptions. Look backwards through list for insertion point.
2547 list_for_each_entry_reverse(tmppf, pf_head, node) {
2548 if (tmppf->offset < pf->offset) {
2549 list_add(&pf->node, &tmppf->node);
2554 * if we get here, then the new offset is the lowest so
2555 * insert at the head
2557 list_add(&pf->node, pf_head);
2561 static int binder_translate_fd_array(struct list_head *pf_head,
2562 struct binder_fd_array_object *fda,
2563 const void __user *sender_ubuffer,
2564 struct binder_buffer_object *parent,
2565 struct binder_buffer_object *sender_uparent,
2566 struct binder_transaction *t,
2567 struct binder_thread *thread,
2568 struct binder_transaction *in_reply_to)
2570 binder_size_t fdi, fd_buf_size;
2571 binder_size_t fda_offset;
2572 const void __user *sender_ufda_base;
2573 struct binder_proc *proc = thread->proc;
2576 if (fda->num_fds == 0)
2579 fd_buf_size = sizeof(u32) * fda->num_fds;
2580 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2581 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2582 proc->pid, thread->pid, (u64)fda->num_fds);
2585 if (fd_buf_size > parent->length ||
2586 fda->parent_offset > parent->length - fd_buf_size) {
2587 /* No space for all file descriptors here. */
2588 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2589 proc->pid, thread->pid, (u64)fda->num_fds);
2593 * the source data for binder_buffer_object is visible
2594 * to user-space and the @buffer element is the user
2595 * pointer to the buffer_object containing the fd_array.
2596 * Convert the address to an offset relative to
2597 * the base of the transaction buffer.
2599 fda_offset = parent->buffer - t->buffer->user_data +
2601 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2604 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2605 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2606 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2607 proc->pid, thread->pid);
2610 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2614 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2616 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2617 binder_size_t sender_uoffset = fdi * sizeof(fd);
2619 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2621 ret = binder_translate_fd(fd, offset, t, thread,
2624 return ret > 0 ? -EINVAL : ret;
2629 static int binder_fixup_parent(struct list_head *pf_head,
2630 struct binder_transaction *t,
2631 struct binder_thread *thread,
2632 struct binder_buffer_object *bp,
2633 binder_size_t off_start_offset,
2634 binder_size_t num_valid,
2635 binder_size_t last_fixup_obj_off,
2636 binder_size_t last_fixup_min_off)
2638 struct binder_buffer_object *parent;
2639 struct binder_buffer *b = t->buffer;
2640 struct binder_proc *proc = thread->proc;
2641 struct binder_proc *target_proc = t->to_proc;
2642 struct binder_object object;
2643 binder_size_t buffer_offset;
2644 binder_size_t parent_offset;
2646 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2649 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2650 off_start_offset, &parent_offset,
2653 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2654 proc->pid, thread->pid);
2658 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2659 parent_offset, bp->parent_offset,
2661 last_fixup_min_off)) {
2662 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2663 proc->pid, thread->pid);
2667 if (parent->length < sizeof(binder_uintptr_t) ||
2668 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2669 /* No space for a pointer here! */
2670 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2671 proc->pid, thread->pid);
2675 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2677 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2681 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2682 * @t1: the pending async txn in the frozen process
2683 * @t2: the new async txn to supersede the outdated pending one
2685 * Return: true if t2 can supersede t1
2686 * false if t2 can not supersede t1
2688 static bool binder_can_update_transaction(struct binder_transaction *t1,
2689 struct binder_transaction *t2)
2691 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2692 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2694 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2695 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2696 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2697 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2703 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2704 * @t: new async transaction
2705 * @target_list: list to find outdated transaction
2707 * Return: the outdated transaction if found
2708 * NULL if no outdated transacton can be found
2710 * Requires the proc->inner_lock to be held.
2712 static struct binder_transaction *
2713 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2714 struct list_head *target_list)
2716 struct binder_work *w;
2718 list_for_each_entry(w, target_list, entry) {
2719 struct binder_transaction *t_queued;
2721 if (w->type != BINDER_WORK_TRANSACTION)
2723 t_queued = container_of(w, struct binder_transaction, work);
2724 if (binder_can_update_transaction(t_queued, t))
2731 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2732 * @t: transaction to send
2733 * @proc: process to send the transaction to
2734 * @thread: thread in @proc to send the transaction to (may be NULL)
2736 * This function queues a transaction to the specified process. It will try
2737 * to find a thread in the target process to handle the transaction and
2738 * wake it up. If no thread is found, the work is queued to the proc
2741 * If the @thread parameter is not NULL, the transaction is always queued
2742 * to the waitlist of that specific thread.
2744 * Return: 0 if the transaction was successfully queued
2745 * BR_DEAD_REPLY if the target process or thread is dead
2746 * BR_FROZEN_REPLY if the target process or thread is frozen and
2747 * the sync transaction was rejected
2748 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2749 * and the async transaction was successfully queued
2751 static int binder_proc_transaction(struct binder_transaction *t,
2752 struct binder_proc *proc,
2753 struct binder_thread *thread)
2755 struct binder_node *node = t->buffer->target_node;
2756 bool oneway = !!(t->flags & TF_ONE_WAY);
2757 bool pending_async = false;
2758 struct binder_transaction *t_outdated = NULL;
2759 bool frozen = false;
2762 binder_node_lock(node);
2765 if (node->has_async_transaction)
2766 pending_async = true;
2768 node->has_async_transaction = true;
2771 binder_inner_proc_lock(proc);
2772 if (proc->is_frozen) {
2774 proc->sync_recv |= !oneway;
2775 proc->async_recv |= oneway;
2778 if ((frozen && !oneway) || proc->is_dead ||
2779 (thread && thread->is_dead)) {
2780 binder_inner_proc_unlock(proc);
2781 binder_node_unlock(node);
2782 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2785 if (!thread && !pending_async)
2786 thread = binder_select_thread_ilocked(proc);
2789 binder_enqueue_thread_work_ilocked(thread, &t->work);
2790 } else if (!pending_async) {
2791 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2793 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2794 t_outdated = binder_find_outdated_transaction_ilocked(t,
2797 binder_debug(BINDER_DEBUG_TRANSACTION,
2798 "txn %d supersedes %d\n",
2799 t->debug_id, t_outdated->debug_id);
2800 list_del_init(&t_outdated->work.entry);
2801 proc->outstanding_txns--;
2804 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2808 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2810 proc->outstanding_txns++;
2811 binder_inner_proc_unlock(proc);
2812 binder_node_unlock(node);
2815 * To reduce potential contention, free the outdated transaction and
2816 * buffer after releasing the locks.
2819 struct binder_buffer *buffer = t_outdated->buffer;
2821 t_outdated->buffer = NULL;
2822 buffer->transaction = NULL;
2823 trace_binder_transaction_update_buffer_release(buffer);
2824 binder_release_entire_buffer(proc, NULL, buffer, false);
2825 binder_alloc_free_buf(&proc->alloc, buffer);
2827 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2830 if (oneway && frozen)
2831 return BR_TRANSACTION_PENDING_FROZEN;
2837 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2838 * @node: struct binder_node for which to get refs
2839 * @procp: returns @node->proc if valid
2840 * @error: if no @procp then returns BR_DEAD_REPLY
2842 * User-space normally keeps the node alive when creating a transaction
2843 * since it has a reference to the target. The local strong ref keeps it
2844 * alive if the sending process dies before the target process processes
2845 * the transaction. If the source process is malicious or has a reference
2846 * counting bug, relying on the local strong ref can fail.
2848 * Since user-space can cause the local strong ref to go away, we also take
2849 * a tmpref on the node to ensure it survives while we are constructing
2850 * the transaction. We also need a tmpref on the proc while we are
2851 * constructing the transaction, so we take that here as well.
2853 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2854 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2855 * target proc has died, @error is set to BR_DEAD_REPLY.
2857 static struct binder_node *binder_get_node_refs_for_txn(
2858 struct binder_node *node,
2859 struct binder_proc **procp,
2862 struct binder_node *target_node = NULL;
2864 binder_node_inner_lock(node);
2867 binder_inc_node_nilocked(node, 1, 0, NULL);
2868 binder_inc_node_tmpref_ilocked(node);
2869 node->proc->tmp_ref++;
2870 *procp = node->proc;
2872 *error = BR_DEAD_REPLY;
2873 binder_node_inner_unlock(node);
2878 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2879 uint32_t command, int32_t param)
2881 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2884 /* annotation for sparse */
2885 __release(&from->proc->inner_lock);
2889 /* don't override existing errors */
2890 if (from->ee.command == BR_OK)
2891 binder_set_extended_error(&from->ee, id, command, param);
2892 binder_inner_proc_unlock(from->proc);
2893 binder_thread_dec_tmpref(from);
2896 static void binder_transaction(struct binder_proc *proc,
2897 struct binder_thread *thread,
2898 struct binder_transaction_data *tr, int reply,
2899 binder_size_t extra_buffers_size)
2902 struct binder_transaction *t;
2903 struct binder_work *w;
2904 struct binder_work *tcomplete;
2905 binder_size_t buffer_offset = 0;
2906 binder_size_t off_start_offset, off_end_offset;
2907 binder_size_t off_min;
2908 binder_size_t sg_buf_offset, sg_buf_end_offset;
2909 binder_size_t user_offset = 0;
2910 struct binder_proc *target_proc = NULL;
2911 struct binder_thread *target_thread = NULL;
2912 struct binder_node *target_node = NULL;
2913 struct binder_transaction *in_reply_to = NULL;
2914 struct binder_transaction_log_entry *e;
2915 uint32_t return_error = 0;
2916 uint32_t return_error_param = 0;
2917 uint32_t return_error_line = 0;
2918 binder_size_t last_fixup_obj_off = 0;
2919 binder_size_t last_fixup_min_off = 0;
2920 struct binder_context *context = proc->context;
2921 int t_debug_id = atomic_inc_return(&binder_last_id);
2922 ktime_t t_start_time = ktime_get();
2923 char *secctx = NULL;
2925 struct list_head sgc_head;
2926 struct list_head pf_head;
2927 const void __user *user_buffer = (const void __user *)
2928 (uintptr_t)tr->data.ptr.buffer;
2929 INIT_LIST_HEAD(&sgc_head);
2930 INIT_LIST_HEAD(&pf_head);
2932 e = binder_transaction_log_add(&binder_transaction_log);
2933 e->debug_id = t_debug_id;
2934 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2935 e->from_proc = proc->pid;
2936 e->from_thread = thread->pid;
2937 e->target_handle = tr->target.handle;
2938 e->data_size = tr->data_size;
2939 e->offsets_size = tr->offsets_size;
2940 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2942 binder_inner_proc_lock(proc);
2943 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2944 binder_inner_proc_unlock(proc);
2947 binder_inner_proc_lock(proc);
2948 in_reply_to = thread->transaction_stack;
2949 if (in_reply_to == NULL) {
2950 binder_inner_proc_unlock(proc);
2951 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2952 proc->pid, thread->pid);
2953 return_error = BR_FAILED_REPLY;
2954 return_error_param = -EPROTO;
2955 return_error_line = __LINE__;
2956 goto err_empty_call_stack;
2958 if (in_reply_to->to_thread != thread) {
2959 spin_lock(&in_reply_to->lock);
2960 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2961 proc->pid, thread->pid, in_reply_to->debug_id,
2962 in_reply_to->to_proc ?
2963 in_reply_to->to_proc->pid : 0,
2964 in_reply_to->to_thread ?
2965 in_reply_to->to_thread->pid : 0);
2966 spin_unlock(&in_reply_to->lock);
2967 binder_inner_proc_unlock(proc);
2968 return_error = BR_FAILED_REPLY;
2969 return_error_param = -EPROTO;
2970 return_error_line = __LINE__;
2972 goto err_bad_call_stack;
2974 thread->transaction_stack = in_reply_to->to_parent;
2975 binder_inner_proc_unlock(proc);
2976 binder_set_nice(in_reply_to->saved_priority);
2977 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2978 if (target_thread == NULL) {
2979 /* annotation for sparse */
2980 __release(&target_thread->proc->inner_lock);
2981 binder_txn_error("%d:%d reply target not found\n",
2982 thread->pid, proc->pid);
2983 return_error = BR_DEAD_REPLY;
2984 return_error_line = __LINE__;
2985 goto err_dead_binder;
2987 if (target_thread->transaction_stack != in_reply_to) {
2988 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2989 proc->pid, thread->pid,
2990 target_thread->transaction_stack ?
2991 target_thread->transaction_stack->debug_id : 0,
2992 in_reply_to->debug_id);
2993 binder_inner_proc_unlock(target_thread->proc);
2994 return_error = BR_FAILED_REPLY;
2995 return_error_param = -EPROTO;
2996 return_error_line = __LINE__;
2998 target_thread = NULL;
2999 goto err_dead_binder;
3001 target_proc = target_thread->proc;
3002 target_proc->tmp_ref++;
3003 binder_inner_proc_unlock(target_thread->proc);
3005 if (tr->target.handle) {
3006 struct binder_ref *ref;
3009 * There must already be a strong ref
3010 * on this node. If so, do a strong
3011 * increment on the node to ensure it
3012 * stays alive until the transaction is
3015 binder_proc_lock(proc);
3016 ref = binder_get_ref_olocked(proc, tr->target.handle,
3019 target_node = binder_get_node_refs_for_txn(
3020 ref->node, &target_proc,
3023 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3024 proc->pid, thread->pid, tr->target.handle);
3025 return_error = BR_FAILED_REPLY;
3027 binder_proc_unlock(proc);
3029 mutex_lock(&context->context_mgr_node_lock);
3030 target_node = context->binder_context_mgr_node;
3032 target_node = binder_get_node_refs_for_txn(
3033 target_node, &target_proc,
3036 return_error = BR_DEAD_REPLY;
3037 mutex_unlock(&context->context_mgr_node_lock);
3038 if (target_node && target_proc->pid == proc->pid) {
3039 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3040 proc->pid, thread->pid);
3041 return_error = BR_FAILED_REPLY;
3042 return_error_param = -EINVAL;
3043 return_error_line = __LINE__;
3044 goto err_invalid_target_handle;
3048 binder_txn_error("%d:%d cannot find target node\n",
3049 thread->pid, proc->pid);
3051 * return_error is set above
3053 return_error_param = -EINVAL;
3054 return_error_line = __LINE__;
3055 goto err_dead_binder;
3057 e->to_node = target_node->debug_id;
3058 if (WARN_ON(proc == target_proc)) {
3059 binder_txn_error("%d:%d self transactions not allowed\n",
3060 thread->pid, proc->pid);
3061 return_error = BR_FAILED_REPLY;
3062 return_error_param = -EINVAL;
3063 return_error_line = __LINE__;
3064 goto err_invalid_target_handle;
3066 if (security_binder_transaction(proc->cred,
3067 target_proc->cred) < 0) {
3068 binder_txn_error("%d:%d transaction credentials failed\n",
3069 thread->pid, proc->pid);
3070 return_error = BR_FAILED_REPLY;
3071 return_error_param = -EPERM;
3072 return_error_line = __LINE__;
3073 goto err_invalid_target_handle;
3075 binder_inner_proc_lock(proc);
3077 w = list_first_entry_or_null(&thread->todo,
3078 struct binder_work, entry);
3079 if (!(tr->flags & TF_ONE_WAY) && w &&
3080 w->type == BINDER_WORK_TRANSACTION) {
3082 * Do not allow new outgoing transaction from a
3083 * thread that has a transaction at the head of
3084 * its todo list. Only need to check the head
3085 * because binder_select_thread_ilocked picks a
3086 * thread from proc->waiting_threads to enqueue
3087 * the transaction, and nothing is queued to the
3088 * todo list while the thread is on waiting_threads.
3090 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3091 proc->pid, thread->pid);
3092 binder_inner_proc_unlock(proc);
3093 return_error = BR_FAILED_REPLY;
3094 return_error_param = -EPROTO;
3095 return_error_line = __LINE__;
3096 goto err_bad_todo_list;
3099 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3100 struct binder_transaction *tmp;
3102 tmp = thread->transaction_stack;
3103 if (tmp->to_thread != thread) {
3104 spin_lock(&tmp->lock);
3105 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3106 proc->pid, thread->pid, tmp->debug_id,
3107 tmp->to_proc ? tmp->to_proc->pid : 0,
3109 tmp->to_thread->pid : 0);
3110 spin_unlock(&tmp->lock);
3111 binder_inner_proc_unlock(proc);
3112 return_error = BR_FAILED_REPLY;
3113 return_error_param = -EPROTO;
3114 return_error_line = __LINE__;
3115 goto err_bad_call_stack;
3118 struct binder_thread *from;
3120 spin_lock(&tmp->lock);
3122 if (from && from->proc == target_proc) {
3123 atomic_inc(&from->tmp_ref);
3124 target_thread = from;
3125 spin_unlock(&tmp->lock);
3128 spin_unlock(&tmp->lock);
3129 tmp = tmp->from_parent;
3132 binder_inner_proc_unlock(proc);
3135 e->to_thread = target_thread->pid;
3136 e->to_proc = target_proc->pid;
3138 /* TODO: reuse incoming transaction for reply */
3139 t = kzalloc(sizeof(*t), GFP_KERNEL);
3141 binder_txn_error("%d:%d cannot allocate transaction\n",
3142 thread->pid, proc->pid);
3143 return_error = BR_FAILED_REPLY;
3144 return_error_param = -ENOMEM;
3145 return_error_line = __LINE__;
3146 goto err_alloc_t_failed;
3148 INIT_LIST_HEAD(&t->fd_fixups);
3149 binder_stats_created(BINDER_STAT_TRANSACTION);
3150 spin_lock_init(&t->lock);
3152 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3153 if (tcomplete == NULL) {
3154 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3155 thread->pid, proc->pid);
3156 return_error = BR_FAILED_REPLY;
3157 return_error_param = -ENOMEM;
3158 return_error_line = __LINE__;
3159 goto err_alloc_tcomplete_failed;
3161 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3163 t->debug_id = t_debug_id;
3164 t->start_time = t_start_time;
3167 binder_debug(BINDER_DEBUG_TRANSACTION,
3168 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3169 proc->pid, thread->pid, t->debug_id,
3170 target_proc->pid, target_thread->pid,
3171 (u64)tr->data.ptr.buffer,
3172 (u64)tr->data.ptr.offsets,
3173 (u64)tr->data_size, (u64)tr->offsets_size,
3174 (u64)extra_buffers_size);
3176 binder_debug(BINDER_DEBUG_TRANSACTION,
3177 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3178 proc->pid, thread->pid, t->debug_id,
3179 target_proc->pid, target_node->debug_id,
3180 (u64)tr->data.ptr.buffer,
3181 (u64)tr->data.ptr.offsets,
3182 (u64)tr->data_size, (u64)tr->offsets_size,
3183 (u64)extra_buffers_size);
3185 if (!reply && !(tr->flags & TF_ONE_WAY))
3189 t->from_pid = proc->pid;
3190 t->from_tid = thread->pid;
3191 t->sender_euid = task_euid(proc->tsk);
3192 t->to_proc = target_proc;
3193 t->to_thread = target_thread;
3195 t->flags = tr->flags;
3196 t->priority = task_nice(current);
3198 if (target_node && target_node->txn_security_ctx) {
3202 security_cred_getsecid(proc->cred, &secid);
3203 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3205 binder_txn_error("%d:%d failed to get security context\n",
3206 thread->pid, proc->pid);
3207 return_error = BR_FAILED_REPLY;
3208 return_error_param = ret;
3209 return_error_line = __LINE__;
3210 goto err_get_secctx_failed;
3212 added_size = ALIGN(secctx_sz, sizeof(u64));
3213 extra_buffers_size += added_size;
3214 if (extra_buffers_size < added_size) {
3215 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3216 thread->pid, proc->pid);
3217 return_error = BR_FAILED_REPLY;
3218 return_error_param = -EINVAL;
3219 return_error_line = __LINE__;
3220 goto err_bad_extra_size;
3224 trace_binder_transaction(reply, t, target_node);
3226 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3227 tr->offsets_size, extra_buffers_size,
3228 !reply && (t->flags & TF_ONE_WAY));
3229 if (IS_ERR(t->buffer)) {
3232 ret = PTR_ERR(t->buffer);
3233 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3234 : (ret == -ENOSPC) ? ": no space left"
3235 : (ret == -ENOMEM) ? ": memory allocation failed"
3237 binder_txn_error("cannot allocate buffer%s", s);
3239 return_error_param = PTR_ERR(t->buffer);
3240 return_error = return_error_param == -ESRCH ?
3241 BR_DEAD_REPLY : BR_FAILED_REPLY;
3242 return_error_line = __LINE__;
3244 goto err_binder_alloc_buf_failed;
3248 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3249 ALIGN(tr->offsets_size, sizeof(void *)) +
3250 ALIGN(extra_buffers_size, sizeof(void *)) -
3251 ALIGN(secctx_sz, sizeof(u64));
3253 t->security_ctx = t->buffer->user_data + buf_offset;
3254 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3255 t->buffer, buf_offset,
3258 t->security_ctx = 0;
3261 security_release_secctx(secctx, secctx_sz);
3264 t->buffer->debug_id = t->debug_id;
3265 t->buffer->transaction = t;
3266 t->buffer->target_node = target_node;
3267 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3268 trace_binder_transaction_alloc_buf(t->buffer);
3270 if (binder_alloc_copy_user_to_buffer(
3271 &target_proc->alloc,
3273 ALIGN(tr->data_size, sizeof(void *)),
3274 (const void __user *)
3275 (uintptr_t)tr->data.ptr.offsets,
3276 tr->offsets_size)) {
3277 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3278 proc->pid, thread->pid);
3279 return_error = BR_FAILED_REPLY;
3280 return_error_param = -EFAULT;
3281 return_error_line = __LINE__;
3282 goto err_copy_data_failed;
3284 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3285 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3286 proc->pid, thread->pid, (u64)tr->offsets_size);
3287 return_error = BR_FAILED_REPLY;
3288 return_error_param = -EINVAL;
3289 return_error_line = __LINE__;
3290 goto err_bad_offset;
3292 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3293 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3294 proc->pid, thread->pid,
3295 (u64)extra_buffers_size);
3296 return_error = BR_FAILED_REPLY;
3297 return_error_param = -EINVAL;
3298 return_error_line = __LINE__;
3299 goto err_bad_offset;
3301 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3302 buffer_offset = off_start_offset;
3303 off_end_offset = off_start_offset + tr->offsets_size;
3304 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3305 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3306 ALIGN(secctx_sz, sizeof(u64));
3308 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3309 buffer_offset += sizeof(binder_size_t)) {
3310 struct binder_object_header *hdr;
3312 struct binder_object object;
3313 binder_size_t object_offset;
3314 binder_size_t copy_size;
3316 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3320 sizeof(object_offset))) {
3321 binder_txn_error("%d:%d copy offset from buffer failed\n",
3322 thread->pid, proc->pid);
3323 return_error = BR_FAILED_REPLY;
3324 return_error_param = -EINVAL;
3325 return_error_line = __LINE__;
3326 goto err_bad_offset;
3330 * Copy the source user buffer up to the next object
3331 * that will be processed.
3333 copy_size = object_offset - user_offset;
3334 if (copy_size && (user_offset > object_offset ||
3335 binder_alloc_copy_user_to_buffer(
3336 &target_proc->alloc,
3337 t->buffer, user_offset,
3338 user_buffer + user_offset,
3340 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3341 proc->pid, thread->pid);
3342 return_error = BR_FAILED_REPLY;
3343 return_error_param = -EFAULT;
3344 return_error_line = __LINE__;
3345 goto err_copy_data_failed;
3347 object_size = binder_get_object(target_proc, user_buffer,
3348 t->buffer, object_offset, &object);
3349 if (object_size == 0 || object_offset < off_min) {
3350 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3351 proc->pid, thread->pid,
3354 (u64)t->buffer->data_size);
3355 return_error = BR_FAILED_REPLY;
3356 return_error_param = -EINVAL;
3357 return_error_line = __LINE__;
3358 goto err_bad_offset;
3361 * Set offset to the next buffer fragment to be
3364 user_offset = object_offset + object_size;
3367 off_min = object_offset + object_size;
3368 switch (hdr->type) {
3369 case BINDER_TYPE_BINDER:
3370 case BINDER_TYPE_WEAK_BINDER: {
3371 struct flat_binder_object *fp;
3373 fp = to_flat_binder_object(hdr);
3374 ret = binder_translate_binder(fp, t, thread);
3377 binder_alloc_copy_to_buffer(&target_proc->alloc,
3381 binder_txn_error("%d:%d translate binder failed\n",
3382 thread->pid, proc->pid);
3383 return_error = BR_FAILED_REPLY;
3384 return_error_param = ret;
3385 return_error_line = __LINE__;
3386 goto err_translate_failed;
3389 case BINDER_TYPE_HANDLE:
3390 case BINDER_TYPE_WEAK_HANDLE: {
3391 struct flat_binder_object *fp;
3393 fp = to_flat_binder_object(hdr);
3394 ret = binder_translate_handle(fp, t, thread);
3396 binder_alloc_copy_to_buffer(&target_proc->alloc,
3400 binder_txn_error("%d:%d translate handle failed\n",
3401 thread->pid, proc->pid);
3402 return_error = BR_FAILED_REPLY;
3403 return_error_param = ret;
3404 return_error_line = __LINE__;
3405 goto err_translate_failed;
3409 case BINDER_TYPE_FD: {
3410 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3411 binder_size_t fd_offset = object_offset +
3412 (uintptr_t)&fp->fd - (uintptr_t)fp;
3413 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3414 thread, in_reply_to);
3418 binder_alloc_copy_to_buffer(&target_proc->alloc,
3422 binder_txn_error("%d:%d translate fd failed\n",
3423 thread->pid, proc->pid);
3424 return_error = BR_FAILED_REPLY;
3425 return_error_param = ret;
3426 return_error_line = __LINE__;
3427 goto err_translate_failed;
3430 case BINDER_TYPE_FDA: {
3431 struct binder_object ptr_object;
3432 binder_size_t parent_offset;
3433 struct binder_object user_object;
3434 size_t user_parent_size;
3435 struct binder_fd_array_object *fda =
3436 to_binder_fd_array_object(hdr);
3437 size_t num_valid = (buffer_offset - off_start_offset) /
3438 sizeof(binder_size_t);
3439 struct binder_buffer_object *parent =
3440 binder_validate_ptr(target_proc, t->buffer,
3441 &ptr_object, fda->parent,
3446 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3447 proc->pid, thread->pid);
3448 return_error = BR_FAILED_REPLY;
3449 return_error_param = -EINVAL;
3450 return_error_line = __LINE__;
3451 goto err_bad_parent;
3453 if (!binder_validate_fixup(target_proc, t->buffer,
3458 last_fixup_min_off)) {
3459 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3460 proc->pid, thread->pid);
3461 return_error = BR_FAILED_REPLY;
3462 return_error_param = -EINVAL;
3463 return_error_line = __LINE__;
3464 goto err_bad_parent;
3467 * We need to read the user version of the parent
3468 * object to get the original user offset
3471 binder_get_object(proc, user_buffer, t->buffer,
3472 parent_offset, &user_object);
3473 if (user_parent_size != sizeof(user_object.bbo)) {
3474 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3475 proc->pid, thread->pid,
3477 sizeof(user_object.bbo));
3478 return_error = BR_FAILED_REPLY;
3479 return_error_param = -EINVAL;
3480 return_error_line = __LINE__;
3481 goto err_bad_parent;
3483 ret = binder_translate_fd_array(&pf_head, fda,
3484 user_buffer, parent,
3485 &user_object.bbo, t,
3486 thread, in_reply_to);
3488 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3493 binder_txn_error("%d:%d translate fd array failed\n",
3494 thread->pid, proc->pid);
3495 return_error = BR_FAILED_REPLY;
3496 return_error_param = ret > 0 ? -EINVAL : ret;
3497 return_error_line = __LINE__;
3498 goto err_translate_failed;
3500 last_fixup_obj_off = parent_offset;
3501 last_fixup_min_off =
3502 fda->parent_offset + sizeof(u32) * fda->num_fds;
3504 case BINDER_TYPE_PTR: {
3505 struct binder_buffer_object *bp =
3506 to_binder_buffer_object(hdr);
3507 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3510 if (bp->length > buf_left) {
3511 binder_user_error("%d:%d got transaction with too large buffer\n",
3512 proc->pid, thread->pid);
3513 return_error = BR_FAILED_REPLY;
3514 return_error_param = -EINVAL;
3515 return_error_line = __LINE__;
3516 goto err_bad_offset;
3518 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3519 (const void __user *)(uintptr_t)bp->buffer,
3522 binder_txn_error("%d:%d deferred copy failed\n",
3523 thread->pid, proc->pid);
3524 return_error = BR_FAILED_REPLY;
3525 return_error_param = ret;
3526 return_error_line = __LINE__;
3527 goto err_translate_failed;
3529 /* Fixup buffer pointer to target proc address space */
3530 bp->buffer = t->buffer->user_data + sg_buf_offset;
3531 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3533 num_valid = (buffer_offset - off_start_offset) /
3534 sizeof(binder_size_t);
3535 ret = binder_fixup_parent(&pf_head, t,
3540 last_fixup_min_off);
3542 binder_alloc_copy_to_buffer(&target_proc->alloc,
3546 binder_txn_error("%d:%d failed to fixup parent\n",
3547 thread->pid, proc->pid);
3548 return_error = BR_FAILED_REPLY;
3549 return_error_param = ret;
3550 return_error_line = __LINE__;
3551 goto err_translate_failed;
3553 last_fixup_obj_off = object_offset;
3554 last_fixup_min_off = 0;
3557 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3558 proc->pid, thread->pid, hdr->type);
3559 return_error = BR_FAILED_REPLY;
3560 return_error_param = -EINVAL;
3561 return_error_line = __LINE__;
3562 goto err_bad_object_type;
3565 /* Done processing objects, copy the rest of the buffer */
3566 if (binder_alloc_copy_user_to_buffer(
3567 &target_proc->alloc,
3568 t->buffer, user_offset,
3569 user_buffer + user_offset,
3570 tr->data_size - user_offset)) {
3571 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3572 proc->pid, thread->pid);
3573 return_error = BR_FAILED_REPLY;
3574 return_error_param = -EFAULT;
3575 return_error_line = __LINE__;
3576 goto err_copy_data_failed;
3579 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3580 &sgc_head, &pf_head);
3582 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3583 proc->pid, thread->pid);
3584 return_error = BR_FAILED_REPLY;
3585 return_error_param = ret;
3586 return_error_line = __LINE__;
3587 goto err_copy_data_failed;
3589 if (t->buffer->oneway_spam_suspect)
3590 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3592 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3593 t->work.type = BINDER_WORK_TRANSACTION;
3596 binder_enqueue_thread_work(thread, tcomplete);
3597 binder_inner_proc_lock(target_proc);
3598 if (target_thread->is_dead) {
3599 return_error = BR_DEAD_REPLY;
3600 binder_inner_proc_unlock(target_proc);
3601 goto err_dead_proc_or_thread;
3603 BUG_ON(t->buffer->async_transaction != 0);
3604 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3605 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3606 target_proc->outstanding_txns++;
3607 binder_inner_proc_unlock(target_proc);
3608 wake_up_interruptible_sync(&target_thread->wait);
3609 binder_free_transaction(in_reply_to);
3610 } else if (!(t->flags & TF_ONE_WAY)) {
3611 BUG_ON(t->buffer->async_transaction != 0);
3612 binder_inner_proc_lock(proc);
3614 * Defer the TRANSACTION_COMPLETE, so we don't return to
3615 * userspace immediately; this allows the target process to
3616 * immediately start processing this transaction, reducing
3617 * latency. We will then return the TRANSACTION_COMPLETE when
3618 * the target replies (or there is an error).
3620 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3622 t->from_parent = thread->transaction_stack;
3623 thread->transaction_stack = t;
3624 binder_inner_proc_unlock(proc);
3625 return_error = binder_proc_transaction(t,
3626 target_proc, target_thread);
3628 binder_inner_proc_lock(proc);
3629 binder_pop_transaction_ilocked(thread, t);
3630 binder_inner_proc_unlock(proc);
3631 goto err_dead_proc_or_thread;
3634 BUG_ON(target_node == NULL);
3635 BUG_ON(t->buffer->async_transaction != 1);
3636 return_error = binder_proc_transaction(t, target_proc, NULL);
3638 * Let the caller know when async transaction reaches a frozen
3639 * process and is put in a pending queue, waiting for the target
3640 * process to be unfrozen.
3642 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3643 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3644 binder_enqueue_thread_work(thread, tcomplete);
3646 return_error != BR_TRANSACTION_PENDING_FROZEN)
3647 goto err_dead_proc_or_thread;
3650 binder_thread_dec_tmpref(target_thread);
3651 binder_proc_dec_tmpref(target_proc);
3653 binder_dec_node_tmpref(target_node);
3655 * write barrier to synchronize with initialization
3659 WRITE_ONCE(e->debug_id_done, t_debug_id);
3662 err_dead_proc_or_thread:
3663 binder_txn_error("%d:%d dead process or thread\n",
3664 thread->pid, proc->pid);
3665 return_error_line = __LINE__;
3666 binder_dequeue_work(proc, tcomplete);
3667 err_translate_failed:
3668 err_bad_object_type:
3671 err_copy_data_failed:
3672 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3673 binder_free_txn_fixups(t);
3674 trace_binder_transaction_failed_buffer_release(t->buffer);
3675 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3676 buffer_offset, true);
3678 binder_dec_node_tmpref(target_node);
3680 t->buffer->transaction = NULL;
3681 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3682 err_binder_alloc_buf_failed:
3685 security_release_secctx(secctx, secctx_sz);
3686 err_get_secctx_failed:
3688 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3689 err_alloc_tcomplete_failed:
3690 if (trace_binder_txn_latency_free_enabled())
3691 binder_txn_latency_free(t);
3693 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3697 err_empty_call_stack:
3699 err_invalid_target_handle:
3701 binder_dec_node(target_node, 1, 0);
3702 binder_dec_node_tmpref(target_node);
3705 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3706 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3707 proc->pid, thread->pid, reply ? "reply" :
3708 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3709 target_proc ? target_proc->pid : 0,
3710 target_thread ? target_thread->pid : 0,
3711 t_debug_id, return_error, return_error_param,
3712 (u64)tr->data_size, (u64)tr->offsets_size,
3716 binder_thread_dec_tmpref(target_thread);
3718 binder_proc_dec_tmpref(target_proc);
3721 struct binder_transaction_log_entry *fe;
3723 e->return_error = return_error;
3724 e->return_error_param = return_error_param;
3725 e->return_error_line = return_error_line;
3726 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3729 * write barrier to synchronize with initialization
3733 WRITE_ONCE(e->debug_id_done, t_debug_id);
3734 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3737 BUG_ON(thread->return_error.cmd != BR_OK);
3739 binder_set_txn_from_error(in_reply_to, t_debug_id,
3740 return_error, return_error_param);
3741 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3742 binder_enqueue_thread_work(thread, &thread->return_error.work);
3743 binder_send_failed_reply(in_reply_to, return_error);
3745 binder_inner_proc_lock(proc);
3746 binder_set_extended_error(&thread->ee, t_debug_id,
3747 return_error, return_error_param);
3748 binder_inner_proc_unlock(proc);
3749 thread->return_error.cmd = return_error;
3750 binder_enqueue_thread_work(thread, &thread->return_error.work);
3755 * binder_free_buf() - free the specified buffer
3756 * @proc: binder proc that owns buffer
3757 * @buffer: buffer to be freed
3758 * @is_failure: failed to send transaction
3760 * If buffer for an async transaction, enqueue the next async
3761 * transaction from the node.
3763 * Cleanup buffer and free it.
3766 binder_free_buf(struct binder_proc *proc,
3767 struct binder_thread *thread,
3768 struct binder_buffer *buffer, bool is_failure)
3770 binder_inner_proc_lock(proc);
3771 if (buffer->transaction) {
3772 buffer->transaction->buffer = NULL;
3773 buffer->transaction = NULL;
3775 binder_inner_proc_unlock(proc);
3776 if (buffer->async_transaction && buffer->target_node) {
3777 struct binder_node *buf_node;
3778 struct binder_work *w;
3780 buf_node = buffer->target_node;
3781 binder_node_inner_lock(buf_node);
3782 BUG_ON(!buf_node->has_async_transaction);
3783 BUG_ON(buf_node->proc != proc);
3784 w = binder_dequeue_work_head_ilocked(
3785 &buf_node->async_todo);
3787 buf_node->has_async_transaction = false;
3789 binder_enqueue_work_ilocked(
3791 binder_wakeup_proc_ilocked(proc);
3793 binder_node_inner_unlock(buf_node);
3795 trace_binder_transaction_buffer_release(buffer);
3796 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3797 binder_alloc_free_buf(&proc->alloc, buffer);
3800 static int binder_thread_write(struct binder_proc *proc,
3801 struct binder_thread *thread,
3802 binder_uintptr_t binder_buffer, size_t size,
3803 binder_size_t *consumed)
3806 struct binder_context *context = proc->context;
3807 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3808 void __user *ptr = buffer + *consumed;
3809 void __user *end = buffer + size;
3811 while (ptr < end && thread->return_error.cmd == BR_OK) {
3814 if (get_user(cmd, (uint32_t __user *)ptr))
3816 ptr += sizeof(uint32_t);
3817 trace_binder_command(cmd);
3818 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3819 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3820 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3821 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3829 const char *debug_string;
3830 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3831 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3832 struct binder_ref_data rdata;
3834 if (get_user(target, (uint32_t __user *)ptr))
3837 ptr += sizeof(uint32_t);
3839 if (increment && !target) {
3840 struct binder_node *ctx_mgr_node;
3842 mutex_lock(&context->context_mgr_node_lock);
3843 ctx_mgr_node = context->binder_context_mgr_node;
3845 if (ctx_mgr_node->proc == proc) {
3846 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3847 proc->pid, thread->pid);
3848 mutex_unlock(&context->context_mgr_node_lock);
3851 ret = binder_inc_ref_for_node(
3853 strong, NULL, &rdata);
3855 mutex_unlock(&context->context_mgr_node_lock);
3858 ret = binder_update_ref_for_handle(
3859 proc, target, increment, strong,
3861 if (!ret && rdata.desc != target) {
3862 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3863 proc->pid, thread->pid,
3864 target, rdata.desc);
3868 debug_string = "IncRefs";
3871 debug_string = "Acquire";
3874 debug_string = "Release";
3878 debug_string = "DecRefs";
3882 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3883 proc->pid, thread->pid, debug_string,
3884 strong, target, ret);
3887 binder_debug(BINDER_DEBUG_USER_REFS,
3888 "%d:%d %s ref %d desc %d s %d w %d\n",
3889 proc->pid, thread->pid, debug_string,
3890 rdata.debug_id, rdata.desc, rdata.strong,
3894 case BC_INCREFS_DONE:
3895 case BC_ACQUIRE_DONE: {
3896 binder_uintptr_t node_ptr;
3897 binder_uintptr_t cookie;
3898 struct binder_node *node;
3901 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3903 ptr += sizeof(binder_uintptr_t);
3904 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3906 ptr += sizeof(binder_uintptr_t);
3907 node = binder_get_node(proc, node_ptr);
3909 binder_user_error("%d:%d %s u%016llx no match\n",
3910 proc->pid, thread->pid,
3911 cmd == BC_INCREFS_DONE ?
3917 if (cookie != node->cookie) {
3918 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3919 proc->pid, thread->pid,
3920 cmd == BC_INCREFS_DONE ?
3921 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3922 (u64)node_ptr, node->debug_id,
3923 (u64)cookie, (u64)node->cookie);
3924 binder_put_node(node);
3927 binder_node_inner_lock(node);
3928 if (cmd == BC_ACQUIRE_DONE) {
3929 if (node->pending_strong_ref == 0) {
3930 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3931 proc->pid, thread->pid,
3933 binder_node_inner_unlock(node);
3934 binder_put_node(node);
3937 node->pending_strong_ref = 0;
3939 if (node->pending_weak_ref == 0) {
3940 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3941 proc->pid, thread->pid,
3943 binder_node_inner_unlock(node);
3944 binder_put_node(node);
3947 node->pending_weak_ref = 0;
3949 free_node = binder_dec_node_nilocked(node,
3950 cmd == BC_ACQUIRE_DONE, 0);
3952 binder_debug(BINDER_DEBUG_USER_REFS,
3953 "%d:%d %s node %d ls %d lw %d tr %d\n",
3954 proc->pid, thread->pid,
3955 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3956 node->debug_id, node->local_strong_refs,
3957 node->local_weak_refs, node->tmp_refs);
3958 binder_node_inner_unlock(node);
3959 binder_put_node(node);
3962 case BC_ATTEMPT_ACQUIRE:
3963 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3965 case BC_ACQUIRE_RESULT:
3966 pr_err("BC_ACQUIRE_RESULT not supported\n");
3969 case BC_FREE_BUFFER: {
3970 binder_uintptr_t data_ptr;
3971 struct binder_buffer *buffer;
3973 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3975 ptr += sizeof(binder_uintptr_t);
3977 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3979 if (IS_ERR_OR_NULL(buffer)) {
3980 if (PTR_ERR(buffer) == -EPERM) {
3982 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3983 proc->pid, thread->pid,
3987 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3988 proc->pid, thread->pid,
3993 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3994 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3995 proc->pid, thread->pid, (u64)data_ptr,
3997 buffer->transaction ? "active" : "finished");
3998 binder_free_buf(proc, thread, buffer, false);
4002 case BC_TRANSACTION_SG:
4004 struct binder_transaction_data_sg tr;
4006 if (copy_from_user(&tr, ptr, sizeof(tr)))
4009 binder_transaction(proc, thread, &tr.transaction_data,
4010 cmd == BC_REPLY_SG, tr.buffers_size);
4013 case BC_TRANSACTION:
4015 struct binder_transaction_data tr;
4017 if (copy_from_user(&tr, ptr, sizeof(tr)))
4020 binder_transaction(proc, thread, &tr,
4021 cmd == BC_REPLY, 0);
4025 case BC_REGISTER_LOOPER:
4026 binder_debug(BINDER_DEBUG_THREADS,
4027 "%d:%d BC_REGISTER_LOOPER\n",
4028 proc->pid, thread->pid);
4029 binder_inner_proc_lock(proc);
4030 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4031 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4032 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4033 proc->pid, thread->pid);
4034 } else if (proc->requested_threads == 0) {
4035 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4036 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4037 proc->pid, thread->pid);
4039 proc->requested_threads--;
4040 proc->requested_threads_started++;
4042 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4043 binder_inner_proc_unlock(proc);
4045 case BC_ENTER_LOOPER:
4046 binder_debug(BINDER_DEBUG_THREADS,
4047 "%d:%d BC_ENTER_LOOPER\n",
4048 proc->pid, thread->pid);
4049 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4050 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4051 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4052 proc->pid, thread->pid);
4054 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4056 case BC_EXIT_LOOPER:
4057 binder_debug(BINDER_DEBUG_THREADS,
4058 "%d:%d BC_EXIT_LOOPER\n",
4059 proc->pid, thread->pid);
4060 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4063 case BC_REQUEST_DEATH_NOTIFICATION:
4064 case BC_CLEAR_DEATH_NOTIFICATION: {
4066 binder_uintptr_t cookie;
4067 struct binder_ref *ref;
4068 struct binder_ref_death *death = NULL;
4070 if (get_user(target, (uint32_t __user *)ptr))
4072 ptr += sizeof(uint32_t);
4073 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4075 ptr += sizeof(binder_uintptr_t);
4076 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4078 * Allocate memory for death notification
4079 * before taking lock
4081 death = kzalloc(sizeof(*death), GFP_KERNEL);
4082 if (death == NULL) {
4083 WARN_ON(thread->return_error.cmd !=
4085 thread->return_error.cmd = BR_ERROR;
4086 binder_enqueue_thread_work(
4088 &thread->return_error.work);
4090 BINDER_DEBUG_FAILED_TRANSACTION,
4091 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4092 proc->pid, thread->pid);
4096 binder_proc_lock(proc);
4097 ref = binder_get_ref_olocked(proc, target, false);
4099 binder_user_error("%d:%d %s invalid ref %d\n",
4100 proc->pid, thread->pid,
4101 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4102 "BC_REQUEST_DEATH_NOTIFICATION" :
4103 "BC_CLEAR_DEATH_NOTIFICATION",
4105 binder_proc_unlock(proc);
4110 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4111 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4112 proc->pid, thread->pid,
4113 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4114 "BC_REQUEST_DEATH_NOTIFICATION" :
4115 "BC_CLEAR_DEATH_NOTIFICATION",
4116 (u64)cookie, ref->data.debug_id,
4117 ref->data.desc, ref->data.strong,
4118 ref->data.weak, ref->node->debug_id);
4120 binder_node_lock(ref->node);
4121 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4123 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4124 proc->pid, thread->pid);
4125 binder_node_unlock(ref->node);
4126 binder_proc_unlock(proc);
4130 binder_stats_created(BINDER_STAT_DEATH);
4131 INIT_LIST_HEAD(&death->work.entry);
4132 death->cookie = cookie;
4134 if (ref->node->proc == NULL) {
4135 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4137 binder_inner_proc_lock(proc);
4138 binder_enqueue_work_ilocked(
4139 &ref->death->work, &proc->todo);
4140 binder_wakeup_proc_ilocked(proc);
4141 binder_inner_proc_unlock(proc);
4144 if (ref->death == NULL) {
4145 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4146 proc->pid, thread->pid);
4147 binder_node_unlock(ref->node);
4148 binder_proc_unlock(proc);
4152 if (death->cookie != cookie) {
4153 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4154 proc->pid, thread->pid,
4157 binder_node_unlock(ref->node);
4158 binder_proc_unlock(proc);
4162 binder_inner_proc_lock(proc);
4163 if (list_empty(&death->work.entry)) {
4164 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4165 if (thread->looper &
4166 (BINDER_LOOPER_STATE_REGISTERED |
4167 BINDER_LOOPER_STATE_ENTERED))
4168 binder_enqueue_thread_work_ilocked(
4172 binder_enqueue_work_ilocked(
4175 binder_wakeup_proc_ilocked(
4179 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4180 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4182 binder_inner_proc_unlock(proc);
4184 binder_node_unlock(ref->node);
4185 binder_proc_unlock(proc);
4187 case BC_DEAD_BINDER_DONE: {
4188 struct binder_work *w;
4189 binder_uintptr_t cookie;
4190 struct binder_ref_death *death = NULL;
4192 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4195 ptr += sizeof(cookie);
4196 binder_inner_proc_lock(proc);
4197 list_for_each_entry(w, &proc->delivered_death,
4199 struct binder_ref_death *tmp_death =
4201 struct binder_ref_death,
4204 if (tmp_death->cookie == cookie) {
4209 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4210 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4211 proc->pid, thread->pid, (u64)cookie,
4213 if (death == NULL) {
4214 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4215 proc->pid, thread->pid, (u64)cookie);
4216 binder_inner_proc_unlock(proc);
4219 binder_dequeue_work_ilocked(&death->work);
4220 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4221 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4222 if (thread->looper &
4223 (BINDER_LOOPER_STATE_REGISTERED |
4224 BINDER_LOOPER_STATE_ENTERED))
4225 binder_enqueue_thread_work_ilocked(
4226 thread, &death->work);
4228 binder_enqueue_work_ilocked(
4231 binder_wakeup_proc_ilocked(proc);
4234 binder_inner_proc_unlock(proc);
4238 pr_err("%d:%d unknown command %u\n",
4239 proc->pid, thread->pid, cmd);
4242 *consumed = ptr - buffer;
4247 static void binder_stat_br(struct binder_proc *proc,
4248 struct binder_thread *thread, uint32_t cmd)
4250 trace_binder_return(cmd);
4251 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4252 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4253 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4254 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4258 static int binder_put_node_cmd(struct binder_proc *proc,
4259 struct binder_thread *thread,
4261 binder_uintptr_t node_ptr,
4262 binder_uintptr_t node_cookie,
4264 uint32_t cmd, const char *cmd_name)
4266 void __user *ptr = *ptrp;
4268 if (put_user(cmd, (uint32_t __user *)ptr))
4270 ptr += sizeof(uint32_t);
4272 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4274 ptr += sizeof(binder_uintptr_t);
4276 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4278 ptr += sizeof(binder_uintptr_t);
4280 binder_stat_br(proc, thread, cmd);
4281 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4282 proc->pid, thread->pid, cmd_name, node_debug_id,
4283 (u64)node_ptr, (u64)node_cookie);
4289 static int binder_wait_for_work(struct binder_thread *thread,
4293 struct binder_proc *proc = thread->proc;
4296 binder_inner_proc_lock(proc);
4298 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4299 if (binder_has_work_ilocked(thread, do_proc_work))
4302 list_add(&thread->waiting_thread_node,
4303 &proc->waiting_threads);
4304 binder_inner_proc_unlock(proc);
4306 binder_inner_proc_lock(proc);
4307 list_del_init(&thread->waiting_thread_node);
4308 if (signal_pending(current)) {
4313 finish_wait(&thread->wait, &wait);
4314 binder_inner_proc_unlock(proc);
4320 * binder_apply_fd_fixups() - finish fd translation
4321 * @proc: binder_proc associated @t->buffer
4322 * @t: binder transaction with list of fd fixups
4324 * Now that we are in the context of the transaction target
4325 * process, we can allocate and install fds. Process the
4326 * list of fds to translate and fixup the buffer with the
4327 * new fds first and only then install the files.
4329 * If we fail to allocate an fd, skip the install and release
4330 * any fds that have already been allocated.
4332 static int binder_apply_fd_fixups(struct binder_proc *proc,
4333 struct binder_transaction *t)
4335 struct binder_txn_fd_fixup *fixup, *tmp;
4338 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4339 int fd = get_unused_fd_flags(O_CLOEXEC);
4342 binder_debug(BINDER_DEBUG_TRANSACTION,
4343 "failed fd fixup txn %d fd %d\n",
4348 binder_debug(BINDER_DEBUG_TRANSACTION,
4349 "fd fixup txn %d fd %d\n",
4351 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4352 fixup->target_fd = fd;
4353 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4360 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4361 fd_install(fixup->target_fd, fixup->file);
4362 list_del(&fixup->fixup_entry);
4369 binder_free_txn_fixups(t);
4373 static int binder_thread_read(struct binder_proc *proc,
4374 struct binder_thread *thread,
4375 binder_uintptr_t binder_buffer, size_t size,
4376 binder_size_t *consumed, int non_block)
4378 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4379 void __user *ptr = buffer + *consumed;
4380 void __user *end = buffer + size;
4383 int wait_for_proc_work;
4385 if (*consumed == 0) {
4386 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4388 ptr += sizeof(uint32_t);
4392 binder_inner_proc_lock(proc);
4393 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4394 binder_inner_proc_unlock(proc);
4396 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4398 trace_binder_wait_for_work(wait_for_proc_work,
4399 !!thread->transaction_stack,
4400 !binder_worklist_empty(proc, &thread->todo));
4401 if (wait_for_proc_work) {
4402 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4403 BINDER_LOOPER_STATE_ENTERED))) {
4404 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4405 proc->pid, thread->pid, thread->looper);
4406 wait_event_interruptible(binder_user_error_wait,
4407 binder_stop_on_user_error < 2);
4409 binder_set_nice(proc->default_priority);
4413 if (!binder_has_work(thread, wait_for_proc_work))
4416 ret = binder_wait_for_work(thread, wait_for_proc_work);
4419 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4426 struct binder_transaction_data_secctx tr;
4427 struct binder_transaction_data *trd = &tr.transaction_data;
4428 struct binder_work *w = NULL;
4429 struct list_head *list = NULL;
4430 struct binder_transaction *t = NULL;
4431 struct binder_thread *t_from;
4432 size_t trsize = sizeof(*trd);
4434 binder_inner_proc_lock(proc);
4435 if (!binder_worklist_empty_ilocked(&thread->todo))
4436 list = &thread->todo;
4437 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4441 binder_inner_proc_unlock(proc);
4444 if (ptr - buffer == 4 && !thread->looper_need_return)
4449 if (end - ptr < sizeof(tr) + 4) {
4450 binder_inner_proc_unlock(proc);
4453 w = binder_dequeue_work_head_ilocked(list);
4454 if (binder_worklist_empty_ilocked(&thread->todo))
4455 thread->process_todo = false;
4458 case BINDER_WORK_TRANSACTION: {
4459 binder_inner_proc_unlock(proc);
4460 t = container_of(w, struct binder_transaction, work);
4462 case BINDER_WORK_RETURN_ERROR: {
4463 struct binder_error *e = container_of(
4464 w, struct binder_error, work);
4466 WARN_ON(e->cmd == BR_OK);
4467 binder_inner_proc_unlock(proc);
4468 if (put_user(e->cmd, (uint32_t __user *)ptr))
4472 ptr += sizeof(uint32_t);
4474 binder_stat_br(proc, thread, cmd);
4476 case BINDER_WORK_TRANSACTION_COMPLETE:
4477 case BINDER_WORK_TRANSACTION_PENDING:
4478 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4479 if (proc->oneway_spam_detection_enabled &&
4480 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4481 cmd = BR_ONEWAY_SPAM_SUSPECT;
4482 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4483 cmd = BR_TRANSACTION_PENDING_FROZEN;
4485 cmd = BR_TRANSACTION_COMPLETE;
4486 binder_inner_proc_unlock(proc);
4488 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4489 if (put_user(cmd, (uint32_t __user *)ptr))
4491 ptr += sizeof(uint32_t);
4493 binder_stat_br(proc, thread, cmd);
4494 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4495 "%d:%d BR_TRANSACTION_COMPLETE\n",
4496 proc->pid, thread->pid);
4498 case BINDER_WORK_NODE: {
4499 struct binder_node *node = container_of(w, struct binder_node, work);
4501 binder_uintptr_t node_ptr = node->ptr;
4502 binder_uintptr_t node_cookie = node->cookie;
4503 int node_debug_id = node->debug_id;
4506 void __user *orig_ptr = ptr;
4508 BUG_ON(proc != node->proc);
4509 strong = node->internal_strong_refs ||
4510 node->local_strong_refs;
4511 weak = !hlist_empty(&node->refs) ||
4512 node->local_weak_refs ||
4513 node->tmp_refs || strong;
4514 has_strong_ref = node->has_strong_ref;
4515 has_weak_ref = node->has_weak_ref;
4517 if (weak && !has_weak_ref) {
4518 node->has_weak_ref = 1;
4519 node->pending_weak_ref = 1;
4520 node->local_weak_refs++;
4522 if (strong && !has_strong_ref) {
4523 node->has_strong_ref = 1;
4524 node->pending_strong_ref = 1;
4525 node->local_strong_refs++;
4527 if (!strong && has_strong_ref)
4528 node->has_strong_ref = 0;
4529 if (!weak && has_weak_ref)
4530 node->has_weak_ref = 0;
4531 if (!weak && !strong) {
4532 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4533 "%d:%d node %d u%016llx c%016llx deleted\n",
4534 proc->pid, thread->pid,
4538 rb_erase(&node->rb_node, &proc->nodes);
4539 binder_inner_proc_unlock(proc);
4540 binder_node_lock(node);
4542 * Acquire the node lock before freeing the
4543 * node to serialize with other threads that
4544 * may have been holding the node lock while
4545 * decrementing this node (avoids race where
4546 * this thread frees while the other thread
4547 * is unlocking the node after the final
4550 binder_node_unlock(node);
4551 binder_free_node(node);
4553 binder_inner_proc_unlock(proc);
4555 if (weak && !has_weak_ref)
4556 ret = binder_put_node_cmd(
4557 proc, thread, &ptr, node_ptr,
4558 node_cookie, node_debug_id,
4559 BR_INCREFS, "BR_INCREFS");
4560 if (!ret && strong && !has_strong_ref)
4561 ret = binder_put_node_cmd(
4562 proc, thread, &ptr, node_ptr,
4563 node_cookie, node_debug_id,
4564 BR_ACQUIRE, "BR_ACQUIRE");
4565 if (!ret && !strong && has_strong_ref)
4566 ret = binder_put_node_cmd(
4567 proc, thread, &ptr, node_ptr,
4568 node_cookie, node_debug_id,
4569 BR_RELEASE, "BR_RELEASE");
4570 if (!ret && !weak && has_weak_ref)
4571 ret = binder_put_node_cmd(
4572 proc, thread, &ptr, node_ptr,
4573 node_cookie, node_debug_id,
4574 BR_DECREFS, "BR_DECREFS");
4575 if (orig_ptr == ptr)
4576 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4577 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4578 proc->pid, thread->pid,
4585 case BINDER_WORK_DEAD_BINDER:
4586 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4587 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4588 struct binder_ref_death *death;
4590 binder_uintptr_t cookie;
4592 death = container_of(w, struct binder_ref_death, work);
4593 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4594 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4596 cmd = BR_DEAD_BINDER;
4597 cookie = death->cookie;
4599 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4600 "%d:%d %s %016llx\n",
4601 proc->pid, thread->pid,
4602 cmd == BR_DEAD_BINDER ?
4604 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4606 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4607 binder_inner_proc_unlock(proc);
4609 binder_stats_deleted(BINDER_STAT_DEATH);
4611 binder_enqueue_work_ilocked(
4612 w, &proc->delivered_death);
4613 binder_inner_proc_unlock(proc);
4615 if (put_user(cmd, (uint32_t __user *)ptr))
4617 ptr += sizeof(uint32_t);
4618 if (put_user(cookie,
4619 (binder_uintptr_t __user *)ptr))
4621 ptr += sizeof(binder_uintptr_t);
4622 binder_stat_br(proc, thread, cmd);
4623 if (cmd == BR_DEAD_BINDER)
4624 goto done; /* DEAD_BINDER notifications can cause transactions */
4627 binder_inner_proc_unlock(proc);
4628 pr_err("%d:%d: bad work type %d\n",
4629 proc->pid, thread->pid, w->type);
4636 BUG_ON(t->buffer == NULL);
4637 if (t->buffer->target_node) {
4638 struct binder_node *target_node = t->buffer->target_node;
4640 trd->target.ptr = target_node->ptr;
4641 trd->cookie = target_node->cookie;
4642 t->saved_priority = task_nice(current);
4643 if (t->priority < target_node->min_priority &&
4644 !(t->flags & TF_ONE_WAY))
4645 binder_set_nice(t->priority);
4646 else if (!(t->flags & TF_ONE_WAY) ||
4647 t->saved_priority > target_node->min_priority)
4648 binder_set_nice(target_node->min_priority);
4649 cmd = BR_TRANSACTION;
4651 trd->target.ptr = 0;
4655 trd->code = t->code;
4656 trd->flags = t->flags;
4657 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4659 t_from = binder_get_txn_from(t);
4661 struct task_struct *sender = t_from->proc->tsk;
4664 task_tgid_nr_ns(sender,
4665 task_active_pid_ns(current));
4667 trd->sender_pid = 0;
4670 ret = binder_apply_fd_fixups(proc, t);
4672 struct binder_buffer *buffer = t->buffer;
4673 bool oneway = !!(t->flags & TF_ONE_WAY);
4674 int tid = t->debug_id;
4677 binder_thread_dec_tmpref(t_from);
4678 buffer->transaction = NULL;
4679 binder_cleanup_transaction(t, "fd fixups failed",
4681 binder_free_buf(proc, thread, buffer, true);
4682 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4683 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4684 proc->pid, thread->pid,
4686 (cmd == BR_REPLY ? "reply " : ""),
4687 tid, BR_FAILED_REPLY, ret, __LINE__);
4688 if (cmd == BR_REPLY) {
4689 cmd = BR_FAILED_REPLY;
4690 if (put_user(cmd, (uint32_t __user *)ptr))
4692 ptr += sizeof(uint32_t);
4693 binder_stat_br(proc, thread, cmd);
4698 trd->data_size = t->buffer->data_size;
4699 trd->offsets_size = t->buffer->offsets_size;
4700 trd->data.ptr.buffer = t->buffer->user_data;
4701 trd->data.ptr.offsets = trd->data.ptr.buffer +
4702 ALIGN(t->buffer->data_size,
4705 tr.secctx = t->security_ctx;
4706 if (t->security_ctx) {
4707 cmd = BR_TRANSACTION_SEC_CTX;
4708 trsize = sizeof(tr);
4710 if (put_user(cmd, (uint32_t __user *)ptr)) {
4712 binder_thread_dec_tmpref(t_from);
4714 binder_cleanup_transaction(t, "put_user failed",
4719 ptr += sizeof(uint32_t);
4720 if (copy_to_user(ptr, &tr, trsize)) {
4722 binder_thread_dec_tmpref(t_from);
4724 binder_cleanup_transaction(t, "copy_to_user failed",
4731 trace_binder_transaction_received(t);
4732 binder_stat_br(proc, thread, cmd);
4733 binder_debug(BINDER_DEBUG_TRANSACTION,
4734 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4735 proc->pid, thread->pid,
4736 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4737 (cmd == BR_TRANSACTION_SEC_CTX) ?
4738 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4739 t->debug_id, t_from ? t_from->proc->pid : 0,
4740 t_from ? t_from->pid : 0, cmd,
4741 t->buffer->data_size, t->buffer->offsets_size,
4742 (u64)trd->data.ptr.buffer,
4743 (u64)trd->data.ptr.offsets);
4746 binder_thread_dec_tmpref(t_from);
4747 t->buffer->allow_user_free = 1;
4748 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4749 binder_inner_proc_lock(thread->proc);
4750 t->to_parent = thread->transaction_stack;
4751 t->to_thread = thread;
4752 thread->transaction_stack = t;
4753 binder_inner_proc_unlock(thread->proc);
4755 binder_free_transaction(t);
4762 *consumed = ptr - buffer;
4763 binder_inner_proc_lock(proc);
4764 if (proc->requested_threads == 0 &&
4765 list_empty(&thread->proc->waiting_threads) &&
4766 proc->requested_threads_started < proc->max_threads &&
4767 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4768 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4769 /*spawn a new thread if we leave this out */) {
4770 proc->requested_threads++;
4771 binder_inner_proc_unlock(proc);
4772 binder_debug(BINDER_DEBUG_THREADS,
4773 "%d:%d BR_SPAWN_LOOPER\n",
4774 proc->pid, thread->pid);
4775 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4777 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4779 binder_inner_proc_unlock(proc);
4783 static void binder_release_work(struct binder_proc *proc,
4784 struct list_head *list)
4786 struct binder_work *w;
4787 enum binder_work_type wtype;
4790 binder_inner_proc_lock(proc);
4791 w = binder_dequeue_work_head_ilocked(list);
4792 wtype = w ? w->type : 0;
4793 binder_inner_proc_unlock(proc);
4798 case BINDER_WORK_TRANSACTION: {
4799 struct binder_transaction *t;
4801 t = container_of(w, struct binder_transaction, work);
4803 binder_cleanup_transaction(t, "process died.",
4806 case BINDER_WORK_RETURN_ERROR: {
4807 struct binder_error *e = container_of(
4808 w, struct binder_error, work);
4810 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4811 "undelivered TRANSACTION_ERROR: %u\n",
4814 case BINDER_WORK_TRANSACTION_PENDING:
4815 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4816 case BINDER_WORK_TRANSACTION_COMPLETE: {
4817 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4818 "undelivered TRANSACTION_COMPLETE\n");
4820 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4822 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4823 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4824 struct binder_ref_death *death;
4826 death = container_of(w, struct binder_ref_death, work);
4827 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4828 "undelivered death notification, %016llx\n",
4829 (u64)death->cookie);
4831 binder_stats_deleted(BINDER_STAT_DEATH);
4833 case BINDER_WORK_NODE:
4836 pr_err("unexpected work type, %d, not freed\n",
4844 static struct binder_thread *binder_get_thread_ilocked(
4845 struct binder_proc *proc, struct binder_thread *new_thread)
4847 struct binder_thread *thread = NULL;
4848 struct rb_node *parent = NULL;
4849 struct rb_node **p = &proc->threads.rb_node;
4853 thread = rb_entry(parent, struct binder_thread, rb_node);
4855 if (current->pid < thread->pid)
4857 else if (current->pid > thread->pid)
4858 p = &(*p)->rb_right;
4864 thread = new_thread;
4865 binder_stats_created(BINDER_STAT_THREAD);
4866 thread->proc = proc;
4867 thread->pid = current->pid;
4868 atomic_set(&thread->tmp_ref, 0);
4869 init_waitqueue_head(&thread->wait);
4870 INIT_LIST_HEAD(&thread->todo);
4871 rb_link_node(&thread->rb_node, parent, p);
4872 rb_insert_color(&thread->rb_node, &proc->threads);
4873 thread->looper_need_return = true;
4874 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4875 thread->return_error.cmd = BR_OK;
4876 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4877 thread->reply_error.cmd = BR_OK;
4878 thread->ee.command = BR_OK;
4879 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4883 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4885 struct binder_thread *thread;
4886 struct binder_thread *new_thread;
4888 binder_inner_proc_lock(proc);
4889 thread = binder_get_thread_ilocked(proc, NULL);
4890 binder_inner_proc_unlock(proc);
4892 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4893 if (new_thread == NULL)
4895 binder_inner_proc_lock(proc);
4896 thread = binder_get_thread_ilocked(proc, new_thread);
4897 binder_inner_proc_unlock(proc);
4898 if (thread != new_thread)
4904 static void binder_free_proc(struct binder_proc *proc)
4906 struct binder_device *device;
4908 BUG_ON(!list_empty(&proc->todo));
4909 BUG_ON(!list_empty(&proc->delivered_death));
4910 if (proc->outstanding_txns)
4911 pr_warn("%s: Unexpected outstanding_txns %d\n",
4912 __func__, proc->outstanding_txns);
4913 device = container_of(proc->context, struct binder_device, context);
4914 if (refcount_dec_and_test(&device->ref)) {
4915 kfree(proc->context->name);
4918 binder_alloc_deferred_release(&proc->alloc);
4919 put_task_struct(proc->tsk);
4920 put_cred(proc->cred);
4921 binder_stats_deleted(BINDER_STAT_PROC);
4925 static void binder_free_thread(struct binder_thread *thread)
4927 BUG_ON(!list_empty(&thread->todo));
4928 binder_stats_deleted(BINDER_STAT_THREAD);
4929 binder_proc_dec_tmpref(thread->proc);
4933 static int binder_thread_release(struct binder_proc *proc,
4934 struct binder_thread *thread)
4936 struct binder_transaction *t;
4937 struct binder_transaction *send_reply = NULL;
4938 int active_transactions = 0;
4939 struct binder_transaction *last_t = NULL;
4941 binder_inner_proc_lock(thread->proc);
4943 * take a ref on the proc so it survives
4944 * after we remove this thread from proc->threads.
4945 * The corresponding dec is when we actually
4946 * free the thread in binder_free_thread()
4950 * take a ref on this thread to ensure it
4951 * survives while we are releasing it
4953 atomic_inc(&thread->tmp_ref);
4954 rb_erase(&thread->rb_node, &proc->threads);
4955 t = thread->transaction_stack;
4957 spin_lock(&t->lock);
4958 if (t->to_thread == thread)
4961 __acquire(&t->lock);
4963 thread->is_dead = true;
4967 active_transactions++;
4968 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4969 "release %d:%d transaction %d %s, still active\n",
4970 proc->pid, thread->pid,
4972 (t->to_thread == thread) ? "in" : "out");
4974 if (t->to_thread == thread) {
4975 thread->proc->outstanding_txns--;
4977 t->to_thread = NULL;
4979 t->buffer->transaction = NULL;
4983 } else if (t->from == thread) {
4988 spin_unlock(&last_t->lock);
4990 spin_lock(&t->lock);
4992 __acquire(&t->lock);
4994 /* annotation for sparse, lock not acquired in last iteration above */
4995 __release(&t->lock);
4998 * If this thread used poll, make sure we remove the waitqueue from any
4999 * poll data structures holding it.
5001 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5002 wake_up_pollfree(&thread->wait);
5004 binder_inner_proc_unlock(thread->proc);
5007 * This is needed to avoid races between wake_up_pollfree() above and
5008 * someone else removing the last entry from the queue for other reasons
5009 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5010 * descriptor being closed). Such other users hold an RCU read lock, so
5011 * we can be sure they're done after we call synchronize_rcu().
5013 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5017 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5018 binder_release_work(proc, &thread->todo);
5019 binder_thread_dec_tmpref(thread);
5020 return active_transactions;
5023 static __poll_t binder_poll(struct file *filp,
5024 struct poll_table_struct *wait)
5026 struct binder_proc *proc = filp->private_data;
5027 struct binder_thread *thread = NULL;
5028 bool wait_for_proc_work;
5030 thread = binder_get_thread(proc);
5034 binder_inner_proc_lock(thread->proc);
5035 thread->looper |= BINDER_LOOPER_STATE_POLL;
5036 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5038 binder_inner_proc_unlock(thread->proc);
5040 poll_wait(filp, &thread->wait, wait);
5042 if (binder_has_work(thread, wait_for_proc_work))
5048 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5049 struct binder_thread *thread)
5052 struct binder_proc *proc = filp->private_data;
5053 void __user *ubuf = (void __user *)arg;
5054 struct binder_write_read bwr;
5056 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5060 binder_debug(BINDER_DEBUG_READ_WRITE,
5061 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5062 proc->pid, thread->pid,
5063 (u64)bwr.write_size, (u64)bwr.write_buffer,
5064 (u64)bwr.read_size, (u64)bwr.read_buffer);
5066 if (bwr.write_size > 0) {
5067 ret = binder_thread_write(proc, thread,
5070 &bwr.write_consumed);
5071 trace_binder_write_done(ret);
5073 bwr.read_consumed = 0;
5074 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5079 if (bwr.read_size > 0) {
5080 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5083 filp->f_flags & O_NONBLOCK);
5084 trace_binder_read_done(ret);
5085 binder_inner_proc_lock(proc);
5086 if (!binder_worklist_empty_ilocked(&proc->todo))
5087 binder_wakeup_proc_ilocked(proc);
5088 binder_inner_proc_unlock(proc);
5090 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5095 binder_debug(BINDER_DEBUG_READ_WRITE,
5096 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5097 proc->pid, thread->pid,
5098 (u64)bwr.write_consumed, (u64)bwr.write_size,
5099 (u64)bwr.read_consumed, (u64)bwr.read_size);
5100 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5108 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5109 struct flat_binder_object *fbo)
5112 struct binder_proc *proc = filp->private_data;
5113 struct binder_context *context = proc->context;
5114 struct binder_node *new_node;
5115 kuid_t curr_euid = current_euid();
5117 mutex_lock(&context->context_mgr_node_lock);
5118 if (context->binder_context_mgr_node) {
5119 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5123 ret = security_binder_set_context_mgr(proc->cred);
5126 if (uid_valid(context->binder_context_mgr_uid)) {
5127 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5128 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5129 from_kuid(&init_user_ns, curr_euid),
5130 from_kuid(&init_user_ns,
5131 context->binder_context_mgr_uid));
5136 context->binder_context_mgr_uid = curr_euid;
5138 new_node = binder_new_node(proc, fbo);
5143 binder_node_lock(new_node);
5144 new_node->local_weak_refs++;
5145 new_node->local_strong_refs++;
5146 new_node->has_strong_ref = 1;
5147 new_node->has_weak_ref = 1;
5148 context->binder_context_mgr_node = new_node;
5149 binder_node_unlock(new_node);
5150 binder_put_node(new_node);
5152 mutex_unlock(&context->context_mgr_node_lock);
5156 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5157 struct binder_node_info_for_ref *info)
5159 struct binder_node *node;
5160 struct binder_context *context = proc->context;
5161 __u32 handle = info->handle;
5163 if (info->strong_count || info->weak_count || info->reserved1 ||
5164 info->reserved2 || info->reserved3) {
5165 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5170 /* This ioctl may only be used by the context manager */
5171 mutex_lock(&context->context_mgr_node_lock);
5172 if (!context->binder_context_mgr_node ||
5173 context->binder_context_mgr_node->proc != proc) {
5174 mutex_unlock(&context->context_mgr_node_lock);
5177 mutex_unlock(&context->context_mgr_node_lock);
5179 node = binder_get_node_from_ref(proc, handle, true, NULL);
5183 info->strong_count = node->local_strong_refs +
5184 node->internal_strong_refs;
5185 info->weak_count = node->local_weak_refs;
5187 binder_put_node(node);
5192 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5193 struct binder_node_debug_info *info)
5196 binder_uintptr_t ptr = info->ptr;
5198 memset(info, 0, sizeof(*info));
5200 binder_inner_proc_lock(proc);
5201 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5202 struct binder_node *node = rb_entry(n, struct binder_node,
5204 if (node->ptr > ptr) {
5205 info->ptr = node->ptr;
5206 info->cookie = node->cookie;
5207 info->has_strong_ref = node->has_strong_ref;
5208 info->has_weak_ref = node->has_weak_ref;
5212 binder_inner_proc_unlock(proc);
5217 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5220 struct binder_thread *thread;
5222 if (proc->outstanding_txns > 0)
5225 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5226 thread = rb_entry(n, struct binder_thread, rb_node);
5227 if (thread->transaction_stack)
5233 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5234 struct binder_proc *target_proc)
5238 if (!info->enable) {
5239 binder_inner_proc_lock(target_proc);
5240 target_proc->sync_recv = false;
5241 target_proc->async_recv = false;
5242 target_proc->is_frozen = false;
5243 binder_inner_proc_unlock(target_proc);
5248 * Freezing the target. Prevent new transactions by
5249 * setting frozen state. If timeout specified, wait
5250 * for transactions to drain.
5252 binder_inner_proc_lock(target_proc);
5253 target_proc->sync_recv = false;
5254 target_proc->async_recv = false;
5255 target_proc->is_frozen = true;
5256 binder_inner_proc_unlock(target_proc);
5258 if (info->timeout_ms > 0)
5259 ret = wait_event_interruptible_timeout(
5260 target_proc->freeze_wait,
5261 (!target_proc->outstanding_txns),
5262 msecs_to_jiffies(info->timeout_ms));
5264 /* Check pending transactions that wait for reply */
5266 binder_inner_proc_lock(target_proc);
5267 if (binder_txns_pending_ilocked(target_proc))
5269 binder_inner_proc_unlock(target_proc);
5273 binder_inner_proc_lock(target_proc);
5274 target_proc->is_frozen = false;
5275 binder_inner_proc_unlock(target_proc);
5281 static int binder_ioctl_get_freezer_info(
5282 struct binder_frozen_status_info *info)
5284 struct binder_proc *target_proc;
5288 info->sync_recv = 0;
5289 info->async_recv = 0;
5291 mutex_lock(&binder_procs_lock);
5292 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5293 if (target_proc->pid == info->pid) {
5295 binder_inner_proc_lock(target_proc);
5296 txns_pending = binder_txns_pending_ilocked(target_proc);
5297 info->sync_recv |= target_proc->sync_recv |
5298 (txns_pending << 1);
5299 info->async_recv |= target_proc->async_recv;
5300 binder_inner_proc_unlock(target_proc);
5303 mutex_unlock(&binder_procs_lock);
5311 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5314 struct binder_extended_error ee;
5316 binder_inner_proc_lock(thread->proc);
5318 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5319 binder_inner_proc_unlock(thread->proc);
5321 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5327 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5330 struct binder_proc *proc = filp->private_data;
5331 struct binder_thread *thread;
5332 void __user *ubuf = (void __user *)arg;
5334 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5335 proc->pid, current->pid, cmd, arg);*/
5337 binder_selftest_alloc(&proc->alloc);
5339 trace_binder_ioctl(cmd, arg);
5341 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5345 thread = binder_get_thread(proc);
5346 if (thread == NULL) {
5352 case BINDER_WRITE_READ:
5353 ret = binder_ioctl_write_read(filp, arg, thread);
5357 case BINDER_SET_MAX_THREADS: {
5360 if (copy_from_user(&max_threads, ubuf,
5361 sizeof(max_threads))) {
5365 binder_inner_proc_lock(proc);
5366 proc->max_threads = max_threads;
5367 binder_inner_proc_unlock(proc);
5370 case BINDER_SET_CONTEXT_MGR_EXT: {
5371 struct flat_binder_object fbo;
5373 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5377 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5382 case BINDER_SET_CONTEXT_MGR:
5383 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5387 case BINDER_THREAD_EXIT:
5388 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5389 proc->pid, thread->pid);
5390 binder_thread_release(proc, thread);
5393 case BINDER_VERSION: {
5394 struct binder_version __user *ver = ubuf;
5396 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5397 &ver->protocol_version)) {
5403 case BINDER_GET_NODE_INFO_FOR_REF: {
5404 struct binder_node_info_for_ref info;
5406 if (copy_from_user(&info, ubuf, sizeof(info))) {
5411 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5415 if (copy_to_user(ubuf, &info, sizeof(info))) {
5422 case BINDER_GET_NODE_DEBUG_INFO: {
5423 struct binder_node_debug_info info;
5425 if (copy_from_user(&info, ubuf, sizeof(info))) {
5430 ret = binder_ioctl_get_node_debug_info(proc, &info);
5434 if (copy_to_user(ubuf, &info, sizeof(info))) {
5440 case BINDER_FREEZE: {
5441 struct binder_freeze_info info;
5442 struct binder_proc **target_procs = NULL, *target_proc;
5443 int target_procs_count = 0, i = 0;
5447 if (copy_from_user(&info, ubuf, sizeof(info))) {
5452 mutex_lock(&binder_procs_lock);
5453 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5454 if (target_proc->pid == info.pid)
5455 target_procs_count++;
5458 if (target_procs_count == 0) {
5459 mutex_unlock(&binder_procs_lock);
5464 target_procs = kcalloc(target_procs_count,
5465 sizeof(struct binder_proc *),
5468 if (!target_procs) {
5469 mutex_unlock(&binder_procs_lock);
5474 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5475 if (target_proc->pid != info.pid)
5478 binder_inner_proc_lock(target_proc);
5479 target_proc->tmp_ref++;
5480 binder_inner_proc_unlock(target_proc);
5482 target_procs[i++] = target_proc;
5484 mutex_unlock(&binder_procs_lock);
5486 for (i = 0; i < target_procs_count; i++) {
5488 ret = binder_ioctl_freeze(&info,
5491 binder_proc_dec_tmpref(target_procs[i]);
5494 kfree(target_procs);
5500 case BINDER_GET_FROZEN_INFO: {
5501 struct binder_frozen_status_info info;
5503 if (copy_from_user(&info, ubuf, sizeof(info))) {
5508 ret = binder_ioctl_get_freezer_info(&info);
5512 if (copy_to_user(ubuf, &info, sizeof(info))) {
5518 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5521 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5525 binder_inner_proc_lock(proc);
5526 proc->oneway_spam_detection_enabled = (bool)enable;
5527 binder_inner_proc_unlock(proc);
5530 case BINDER_GET_EXTENDED_ERROR:
5531 ret = binder_ioctl_get_extended_error(thread, ubuf);
5542 thread->looper_need_return = false;
5543 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5544 if (ret && ret != -EINTR)
5545 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5547 trace_binder_ioctl_done(ret);
5551 static void binder_vma_open(struct vm_area_struct *vma)
5553 struct binder_proc *proc = vma->vm_private_data;
5555 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5556 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5557 proc->pid, vma->vm_start, vma->vm_end,
5558 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5559 (unsigned long)pgprot_val(vma->vm_page_prot));
5562 static void binder_vma_close(struct vm_area_struct *vma)
5564 struct binder_proc *proc = vma->vm_private_data;
5566 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5567 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5568 proc->pid, vma->vm_start, vma->vm_end,
5569 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5570 (unsigned long)pgprot_val(vma->vm_page_prot));
5571 binder_alloc_vma_close(&proc->alloc);
5574 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5576 return VM_FAULT_SIGBUS;
5579 static const struct vm_operations_struct binder_vm_ops = {
5580 .open = binder_vma_open,
5581 .close = binder_vma_close,
5582 .fault = binder_vm_fault,
5585 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5587 struct binder_proc *proc = filp->private_data;
5589 if (proc->tsk != current->group_leader)
5592 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5593 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5594 __func__, proc->pid, vma->vm_start, vma->vm_end,
5595 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5596 (unsigned long)pgprot_val(vma->vm_page_prot));
5598 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5599 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5600 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5603 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5605 vma->vm_ops = &binder_vm_ops;
5606 vma->vm_private_data = proc;
5608 return binder_alloc_mmap_handler(&proc->alloc, vma);
5611 static int binder_open(struct inode *nodp, struct file *filp)
5613 struct binder_proc *proc, *itr;
5614 struct binder_device *binder_dev;
5615 struct binderfs_info *info;
5616 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5617 bool existing_pid = false;
5619 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5620 current->group_leader->pid, current->pid);
5622 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5625 spin_lock_init(&proc->inner_lock);
5626 spin_lock_init(&proc->outer_lock);
5627 get_task_struct(current->group_leader);
5628 proc->tsk = current->group_leader;
5629 proc->cred = get_cred(filp->f_cred);
5630 INIT_LIST_HEAD(&proc->todo);
5631 init_waitqueue_head(&proc->freeze_wait);
5632 proc->default_priority = task_nice(current);
5633 /* binderfs stashes devices in i_private */
5634 if (is_binderfs_device(nodp)) {
5635 binder_dev = nodp->i_private;
5636 info = nodp->i_sb->s_fs_info;
5637 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5639 binder_dev = container_of(filp->private_data,
5640 struct binder_device, miscdev);
5642 refcount_inc(&binder_dev->ref);
5643 proc->context = &binder_dev->context;
5644 binder_alloc_init(&proc->alloc);
5646 binder_stats_created(BINDER_STAT_PROC);
5647 proc->pid = current->group_leader->pid;
5648 INIT_LIST_HEAD(&proc->delivered_death);
5649 INIT_LIST_HEAD(&proc->waiting_threads);
5650 filp->private_data = proc;
5652 mutex_lock(&binder_procs_lock);
5653 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5654 if (itr->pid == proc->pid) {
5655 existing_pid = true;
5659 hlist_add_head(&proc->proc_node, &binder_procs);
5660 mutex_unlock(&binder_procs_lock);
5662 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5665 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5667 * proc debug entries are shared between contexts.
5668 * Only create for the first PID to avoid debugfs log spamming
5669 * The printing code will anyway print all contexts for a given
5670 * PID so this is not a problem.
5672 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5673 binder_debugfs_dir_entry_proc,
5674 (void *)(unsigned long)proc->pid,
5678 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5680 struct dentry *binderfs_entry;
5682 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5684 * Similar to debugfs, the process specific log file is shared
5685 * between contexts. Only create for the first PID.
5686 * This is ok since same as debugfs, the log file will contain
5687 * information on all contexts of a given PID.
5689 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5690 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5691 if (!IS_ERR(binderfs_entry)) {
5692 proc->binderfs_entry = binderfs_entry;
5696 error = PTR_ERR(binderfs_entry);
5697 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5705 static int binder_flush(struct file *filp, fl_owner_t id)
5707 struct binder_proc *proc = filp->private_data;
5709 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5714 static void binder_deferred_flush(struct binder_proc *proc)
5719 binder_inner_proc_lock(proc);
5720 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5721 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5723 thread->looper_need_return = true;
5724 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5725 wake_up_interruptible(&thread->wait);
5729 binder_inner_proc_unlock(proc);
5731 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5732 "binder_flush: %d woke %d threads\n", proc->pid,
5736 static int binder_release(struct inode *nodp, struct file *filp)
5738 struct binder_proc *proc = filp->private_data;
5740 debugfs_remove(proc->debugfs_entry);
5742 if (proc->binderfs_entry) {
5743 binderfs_remove_file(proc->binderfs_entry);
5744 proc->binderfs_entry = NULL;
5747 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5752 static int binder_node_release(struct binder_node *node, int refs)
5754 struct binder_ref *ref;
5756 struct binder_proc *proc = node->proc;
5758 binder_release_work(proc, &node->async_todo);
5760 binder_node_lock(node);
5761 binder_inner_proc_lock(proc);
5762 binder_dequeue_work_ilocked(&node->work);
5764 * The caller must have taken a temporary ref on the node,
5766 BUG_ON(!node->tmp_refs);
5767 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5768 binder_inner_proc_unlock(proc);
5769 binder_node_unlock(node);
5770 binder_free_node(node);
5776 node->local_strong_refs = 0;
5777 node->local_weak_refs = 0;
5778 binder_inner_proc_unlock(proc);
5780 spin_lock(&binder_dead_nodes_lock);
5781 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5782 spin_unlock(&binder_dead_nodes_lock);
5784 hlist_for_each_entry(ref, &node->refs, node_entry) {
5787 * Need the node lock to synchronize
5788 * with new notification requests and the
5789 * inner lock to synchronize with queued
5790 * death notifications.
5792 binder_inner_proc_lock(ref->proc);
5794 binder_inner_proc_unlock(ref->proc);
5800 BUG_ON(!list_empty(&ref->death->work.entry));
5801 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5802 binder_enqueue_work_ilocked(&ref->death->work,
5804 binder_wakeup_proc_ilocked(ref->proc);
5805 binder_inner_proc_unlock(ref->proc);
5808 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5809 "node %d now dead, refs %d, death %d\n",
5810 node->debug_id, refs, death);
5811 binder_node_unlock(node);
5812 binder_put_node(node);
5817 static void binder_deferred_release(struct binder_proc *proc)
5819 struct binder_context *context = proc->context;
5821 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5823 mutex_lock(&binder_procs_lock);
5824 hlist_del(&proc->proc_node);
5825 mutex_unlock(&binder_procs_lock);
5827 mutex_lock(&context->context_mgr_node_lock);
5828 if (context->binder_context_mgr_node &&
5829 context->binder_context_mgr_node->proc == proc) {
5830 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5831 "%s: %d context_mgr_node gone\n",
5832 __func__, proc->pid);
5833 context->binder_context_mgr_node = NULL;
5835 mutex_unlock(&context->context_mgr_node_lock);
5836 binder_inner_proc_lock(proc);
5838 * Make sure proc stays alive after we
5839 * remove all the threads
5843 proc->is_dead = true;
5844 proc->is_frozen = false;
5845 proc->sync_recv = false;
5846 proc->async_recv = false;
5848 active_transactions = 0;
5849 while ((n = rb_first(&proc->threads))) {
5850 struct binder_thread *thread;
5852 thread = rb_entry(n, struct binder_thread, rb_node);
5853 binder_inner_proc_unlock(proc);
5855 active_transactions += binder_thread_release(proc, thread);
5856 binder_inner_proc_lock(proc);
5861 while ((n = rb_first(&proc->nodes))) {
5862 struct binder_node *node;
5864 node = rb_entry(n, struct binder_node, rb_node);
5867 * take a temporary ref on the node before
5868 * calling binder_node_release() which will either
5869 * kfree() the node or call binder_put_node()
5871 binder_inc_node_tmpref_ilocked(node);
5872 rb_erase(&node->rb_node, &proc->nodes);
5873 binder_inner_proc_unlock(proc);
5874 incoming_refs = binder_node_release(node, incoming_refs);
5875 binder_inner_proc_lock(proc);
5877 binder_inner_proc_unlock(proc);
5880 binder_proc_lock(proc);
5881 while ((n = rb_first(&proc->refs_by_desc))) {
5882 struct binder_ref *ref;
5884 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5886 binder_cleanup_ref_olocked(ref);
5887 binder_proc_unlock(proc);
5888 binder_free_ref(ref);
5889 binder_proc_lock(proc);
5891 binder_proc_unlock(proc);
5893 binder_release_work(proc, &proc->todo);
5894 binder_release_work(proc, &proc->delivered_death);
5896 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5897 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5898 __func__, proc->pid, threads, nodes, incoming_refs,
5899 outgoing_refs, active_transactions);
5901 binder_proc_dec_tmpref(proc);
5904 static void binder_deferred_func(struct work_struct *work)
5906 struct binder_proc *proc;
5911 mutex_lock(&binder_deferred_lock);
5912 if (!hlist_empty(&binder_deferred_list)) {
5913 proc = hlist_entry(binder_deferred_list.first,
5914 struct binder_proc, deferred_work_node);
5915 hlist_del_init(&proc->deferred_work_node);
5916 defer = proc->deferred_work;
5917 proc->deferred_work = 0;
5922 mutex_unlock(&binder_deferred_lock);
5924 if (defer & BINDER_DEFERRED_FLUSH)
5925 binder_deferred_flush(proc);
5927 if (defer & BINDER_DEFERRED_RELEASE)
5928 binder_deferred_release(proc); /* frees proc */
5931 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5934 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5936 mutex_lock(&binder_deferred_lock);
5937 proc->deferred_work |= defer;
5938 if (hlist_unhashed(&proc->deferred_work_node)) {
5939 hlist_add_head(&proc->deferred_work_node,
5940 &binder_deferred_list);
5941 schedule_work(&binder_deferred_work);
5943 mutex_unlock(&binder_deferred_lock);
5946 static void print_binder_transaction_ilocked(struct seq_file *m,
5947 struct binder_proc *proc,
5949 struct binder_transaction *t)
5951 struct binder_proc *to_proc;
5952 struct binder_buffer *buffer = t->buffer;
5953 ktime_t current_time = ktime_get();
5955 spin_lock(&t->lock);
5956 to_proc = t->to_proc;
5958 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
5959 prefix, t->debug_id, t,
5962 to_proc ? to_proc->pid : 0,
5963 t->to_thread ? t->to_thread->pid : 0,
5964 t->code, t->flags, t->priority, t->need_reply,
5965 ktime_ms_delta(current_time, t->start_time));
5966 spin_unlock(&t->lock);
5968 if (proc != to_proc) {
5970 * Can only safely deref buffer if we are holding the
5971 * correct proc inner lock for this node
5977 if (buffer == NULL) {
5978 seq_puts(m, " buffer free\n");
5981 if (buffer->target_node)
5982 seq_printf(m, " node %d", buffer->target_node->debug_id);
5983 seq_printf(m, " size %zd:%zd offset %lx\n",
5984 buffer->data_size, buffer->offsets_size,
5985 proc->alloc.buffer - buffer->user_data);
5988 static void print_binder_work_ilocked(struct seq_file *m,
5989 struct binder_proc *proc,
5991 const char *transaction_prefix,
5992 struct binder_work *w)
5994 struct binder_node *node;
5995 struct binder_transaction *t;
5998 case BINDER_WORK_TRANSACTION:
5999 t = container_of(w, struct binder_transaction, work);
6000 print_binder_transaction_ilocked(
6001 m, proc, transaction_prefix, t);
6003 case BINDER_WORK_RETURN_ERROR: {
6004 struct binder_error *e = container_of(
6005 w, struct binder_error, work);
6007 seq_printf(m, "%stransaction error: %u\n",
6010 case BINDER_WORK_TRANSACTION_COMPLETE:
6011 seq_printf(m, "%stransaction complete\n", prefix);
6013 case BINDER_WORK_NODE:
6014 node = container_of(w, struct binder_node, work);
6015 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6016 prefix, node->debug_id,
6017 (u64)node->ptr, (u64)node->cookie);
6019 case BINDER_WORK_DEAD_BINDER:
6020 seq_printf(m, "%shas dead binder\n", prefix);
6022 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6023 seq_printf(m, "%shas cleared dead binder\n", prefix);
6025 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6026 seq_printf(m, "%shas cleared death notification\n", prefix);
6029 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6034 static void print_binder_thread_ilocked(struct seq_file *m,
6035 struct binder_thread *thread,
6038 struct binder_transaction *t;
6039 struct binder_work *w;
6040 size_t start_pos = m->count;
6043 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6044 thread->pid, thread->looper,
6045 thread->looper_need_return,
6046 atomic_read(&thread->tmp_ref));
6047 header_pos = m->count;
6048 t = thread->transaction_stack;
6050 if (t->from == thread) {
6051 print_binder_transaction_ilocked(m, thread->proc,
6052 " outgoing transaction", t);
6054 } else if (t->to_thread == thread) {
6055 print_binder_transaction_ilocked(m, thread->proc,
6056 " incoming transaction", t);
6059 print_binder_transaction_ilocked(m, thread->proc,
6060 " bad transaction", t);
6064 list_for_each_entry(w, &thread->todo, entry) {
6065 print_binder_work_ilocked(m, thread->proc, " ",
6066 " pending transaction", w);
6068 if (!print_always && m->count == header_pos)
6069 m->count = start_pos;
6072 static void print_binder_node_nilocked(struct seq_file *m,
6073 struct binder_node *node)
6075 struct binder_ref *ref;
6076 struct binder_work *w;
6080 hlist_for_each_entry(ref, &node->refs, node_entry)
6083 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6084 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6085 node->has_strong_ref, node->has_weak_ref,
6086 node->local_strong_refs, node->local_weak_refs,
6087 node->internal_strong_refs, count, node->tmp_refs);
6089 seq_puts(m, " proc");
6090 hlist_for_each_entry(ref, &node->refs, node_entry)
6091 seq_printf(m, " %d", ref->proc->pid);
6095 list_for_each_entry(w, &node->async_todo, entry)
6096 print_binder_work_ilocked(m, node->proc, " ",
6097 " pending async transaction", w);
6101 static void print_binder_ref_olocked(struct seq_file *m,
6102 struct binder_ref *ref)
6104 binder_node_lock(ref->node);
6105 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6106 ref->data.debug_id, ref->data.desc,
6107 ref->node->proc ? "" : "dead ",
6108 ref->node->debug_id, ref->data.strong,
6109 ref->data.weak, ref->death);
6110 binder_node_unlock(ref->node);
6113 static void print_binder_proc(struct seq_file *m,
6114 struct binder_proc *proc, int print_all)
6116 struct binder_work *w;
6118 size_t start_pos = m->count;
6120 struct binder_node *last_node = NULL;
6122 seq_printf(m, "proc %d\n", proc->pid);
6123 seq_printf(m, "context %s\n", proc->context->name);
6124 header_pos = m->count;
6126 binder_inner_proc_lock(proc);
6127 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6128 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6129 rb_node), print_all);
6131 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6132 struct binder_node *node = rb_entry(n, struct binder_node,
6134 if (!print_all && !node->has_async_transaction)
6138 * take a temporary reference on the node so it
6139 * survives and isn't removed from the tree
6140 * while we print it.
6142 binder_inc_node_tmpref_ilocked(node);
6143 /* Need to drop inner lock to take node lock */
6144 binder_inner_proc_unlock(proc);
6146 binder_put_node(last_node);
6147 binder_node_inner_lock(node);
6148 print_binder_node_nilocked(m, node);
6149 binder_node_inner_unlock(node);
6151 binder_inner_proc_lock(proc);
6153 binder_inner_proc_unlock(proc);
6155 binder_put_node(last_node);
6158 binder_proc_lock(proc);
6159 for (n = rb_first(&proc->refs_by_desc);
6162 print_binder_ref_olocked(m, rb_entry(n,
6165 binder_proc_unlock(proc);
6167 binder_alloc_print_allocated(m, &proc->alloc);
6168 binder_inner_proc_lock(proc);
6169 list_for_each_entry(w, &proc->todo, entry)
6170 print_binder_work_ilocked(m, proc, " ",
6171 " pending transaction", w);
6172 list_for_each_entry(w, &proc->delivered_death, entry) {
6173 seq_puts(m, " has delivered dead binder\n");
6176 binder_inner_proc_unlock(proc);
6177 if (!print_all && m->count == header_pos)
6178 m->count = start_pos;
6181 static const char * const binder_return_strings[] = {
6186 "BR_ACQUIRE_RESULT",
6188 "BR_TRANSACTION_COMPLETE",
6193 "BR_ATTEMPT_ACQUIRE",
6198 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6201 "BR_ONEWAY_SPAM_SUSPECT",
6202 "BR_TRANSACTION_PENDING_FROZEN"
6205 static const char * const binder_command_strings[] = {
6208 "BC_ACQUIRE_RESULT",
6216 "BC_ATTEMPT_ACQUIRE",
6217 "BC_REGISTER_LOOPER",
6220 "BC_REQUEST_DEATH_NOTIFICATION",
6221 "BC_CLEAR_DEATH_NOTIFICATION",
6222 "BC_DEAD_BINDER_DONE",
6223 "BC_TRANSACTION_SG",
6227 static const char * const binder_objstat_strings[] = {
6234 "transaction_complete"
6237 static void print_binder_stats(struct seq_file *m, const char *prefix,
6238 struct binder_stats *stats)
6242 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6243 ARRAY_SIZE(binder_command_strings));
6244 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6245 int temp = atomic_read(&stats->bc[i]);
6248 seq_printf(m, "%s%s: %d\n", prefix,
6249 binder_command_strings[i], temp);
6252 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6253 ARRAY_SIZE(binder_return_strings));
6254 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6255 int temp = atomic_read(&stats->br[i]);
6258 seq_printf(m, "%s%s: %d\n", prefix,
6259 binder_return_strings[i], temp);
6262 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6263 ARRAY_SIZE(binder_objstat_strings));
6264 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6265 ARRAY_SIZE(stats->obj_deleted));
6266 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6267 int created = atomic_read(&stats->obj_created[i]);
6268 int deleted = atomic_read(&stats->obj_deleted[i]);
6270 if (created || deleted)
6271 seq_printf(m, "%s%s: active %d total %d\n",
6273 binder_objstat_strings[i],
6279 static void print_binder_proc_stats(struct seq_file *m,
6280 struct binder_proc *proc)
6282 struct binder_work *w;
6283 struct binder_thread *thread;
6285 int count, strong, weak, ready_threads;
6286 size_t free_async_space =
6287 binder_alloc_get_free_async_space(&proc->alloc);
6289 seq_printf(m, "proc %d\n", proc->pid);
6290 seq_printf(m, "context %s\n", proc->context->name);
6293 binder_inner_proc_lock(proc);
6294 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6297 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6300 seq_printf(m, " threads: %d\n", count);
6301 seq_printf(m, " requested threads: %d+%d/%d\n"
6302 " ready threads %d\n"
6303 " free async space %zd\n", proc->requested_threads,
6304 proc->requested_threads_started, proc->max_threads,
6308 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6310 binder_inner_proc_unlock(proc);
6311 seq_printf(m, " nodes: %d\n", count);
6315 binder_proc_lock(proc);
6316 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6317 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6320 strong += ref->data.strong;
6321 weak += ref->data.weak;
6323 binder_proc_unlock(proc);
6324 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6326 count = binder_alloc_get_allocated_count(&proc->alloc);
6327 seq_printf(m, " buffers: %d\n", count);
6329 binder_alloc_print_pages(m, &proc->alloc);
6332 binder_inner_proc_lock(proc);
6333 list_for_each_entry(w, &proc->todo, entry) {
6334 if (w->type == BINDER_WORK_TRANSACTION)
6337 binder_inner_proc_unlock(proc);
6338 seq_printf(m, " pending transactions: %d\n", count);
6340 print_binder_stats(m, " ", &proc->stats);
6343 static int state_show(struct seq_file *m, void *unused)
6345 struct binder_proc *proc;
6346 struct binder_node *node;
6347 struct binder_node *last_node = NULL;
6349 seq_puts(m, "binder state:\n");
6351 spin_lock(&binder_dead_nodes_lock);
6352 if (!hlist_empty(&binder_dead_nodes))
6353 seq_puts(m, "dead nodes:\n");
6354 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6356 * take a temporary reference on the node so it
6357 * survives and isn't removed from the list
6358 * while we print it.
6361 spin_unlock(&binder_dead_nodes_lock);
6363 binder_put_node(last_node);
6364 binder_node_lock(node);
6365 print_binder_node_nilocked(m, node);
6366 binder_node_unlock(node);
6368 spin_lock(&binder_dead_nodes_lock);
6370 spin_unlock(&binder_dead_nodes_lock);
6372 binder_put_node(last_node);
6374 mutex_lock(&binder_procs_lock);
6375 hlist_for_each_entry(proc, &binder_procs, proc_node)
6376 print_binder_proc(m, proc, 1);
6377 mutex_unlock(&binder_procs_lock);
6382 static int stats_show(struct seq_file *m, void *unused)
6384 struct binder_proc *proc;
6386 seq_puts(m, "binder stats:\n");
6388 print_binder_stats(m, "", &binder_stats);
6390 mutex_lock(&binder_procs_lock);
6391 hlist_for_each_entry(proc, &binder_procs, proc_node)
6392 print_binder_proc_stats(m, proc);
6393 mutex_unlock(&binder_procs_lock);
6398 static int transactions_show(struct seq_file *m, void *unused)
6400 struct binder_proc *proc;
6402 seq_puts(m, "binder transactions:\n");
6403 mutex_lock(&binder_procs_lock);
6404 hlist_for_each_entry(proc, &binder_procs, proc_node)
6405 print_binder_proc(m, proc, 0);
6406 mutex_unlock(&binder_procs_lock);
6411 static int proc_show(struct seq_file *m, void *unused)
6413 struct binder_proc *itr;
6414 int pid = (unsigned long)m->private;
6416 mutex_lock(&binder_procs_lock);
6417 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6418 if (itr->pid == pid) {
6419 seq_puts(m, "binder proc state:\n");
6420 print_binder_proc(m, itr, 1);
6423 mutex_unlock(&binder_procs_lock);
6428 static void print_binder_transaction_log_entry(struct seq_file *m,
6429 struct binder_transaction_log_entry *e)
6431 int debug_id = READ_ONCE(e->debug_id_done);
6433 * read barrier to guarantee debug_id_done read before
6434 * we print the log values
6438 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6439 e->debug_id, (e->call_type == 2) ? "reply" :
6440 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6441 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6442 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6443 e->return_error, e->return_error_param,
6444 e->return_error_line);
6446 * read-barrier to guarantee read of debug_id_done after
6447 * done printing the fields of the entry
6450 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6451 "\n" : " (incomplete)\n");
6454 static int transaction_log_show(struct seq_file *m, void *unused)
6456 struct binder_transaction_log *log = m->private;
6457 unsigned int log_cur = atomic_read(&log->cur);
6462 count = log_cur + 1;
6463 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6464 0 : count % ARRAY_SIZE(log->entry);
6465 if (count > ARRAY_SIZE(log->entry) || log->full)
6466 count = ARRAY_SIZE(log->entry);
6467 for (i = 0; i < count; i++) {
6468 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6470 print_binder_transaction_log_entry(m, &log->entry[index]);
6475 const struct file_operations binder_fops = {
6476 .owner = THIS_MODULE,
6477 .poll = binder_poll,
6478 .unlocked_ioctl = binder_ioctl,
6479 .compat_ioctl = compat_ptr_ioctl,
6480 .mmap = binder_mmap,
6481 .open = binder_open,
6482 .flush = binder_flush,
6483 .release = binder_release,
6486 DEFINE_SHOW_ATTRIBUTE(state);
6487 DEFINE_SHOW_ATTRIBUTE(stats);
6488 DEFINE_SHOW_ATTRIBUTE(transactions);
6489 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6491 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6495 .fops = &state_fops,
6501 .fops = &stats_fops,
6505 .name = "transactions",
6507 .fops = &transactions_fops,
6511 .name = "transaction_log",
6513 .fops = &transaction_log_fops,
6514 .data = &binder_transaction_log,
6517 .name = "failed_transaction_log",
6519 .fops = &transaction_log_fops,
6520 .data = &binder_transaction_log_failed,
6525 static int __init init_binder_device(const char *name)
6528 struct binder_device *binder_device;
6530 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6534 binder_device->miscdev.fops = &binder_fops;
6535 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6536 binder_device->miscdev.name = name;
6538 refcount_set(&binder_device->ref, 1);
6539 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6540 binder_device->context.name = name;
6541 mutex_init(&binder_device->context.context_mgr_node_lock);
6543 ret = misc_register(&binder_device->miscdev);
6545 kfree(binder_device);
6549 hlist_add_head(&binder_device->hlist, &binder_devices);
6554 static int __init binder_init(void)
6557 char *device_name, *device_tmp;
6558 struct binder_device *device;
6559 struct hlist_node *tmp;
6560 char *device_names = NULL;
6561 const struct binder_debugfs_entry *db_entry;
6563 ret = binder_alloc_shrinker_init();
6567 atomic_set(&binder_transaction_log.cur, ~0U);
6568 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6570 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6572 binder_for_each_debugfs_entry(db_entry)
6573 debugfs_create_file(db_entry->name,
6575 binder_debugfs_dir_entry_root,
6579 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6580 binder_debugfs_dir_entry_root);
6582 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6583 strcmp(binder_devices_param, "") != 0) {
6585 * Copy the module_parameter string, because we don't want to
6586 * tokenize it in-place.
6588 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6589 if (!device_names) {
6591 goto err_alloc_device_names_failed;
6594 device_tmp = device_names;
6595 while ((device_name = strsep(&device_tmp, ","))) {
6596 ret = init_binder_device(device_name);
6598 goto err_init_binder_device_failed;
6602 ret = init_binderfs();
6604 goto err_init_binder_device_failed;
6608 err_init_binder_device_failed:
6609 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6610 misc_deregister(&device->miscdev);
6611 hlist_del(&device->hlist);
6615 kfree(device_names);
6617 err_alloc_device_names_failed:
6618 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6619 binder_alloc_shrinker_exit();
6624 device_initcall(binder_init);
6626 #define CREATE_TRACE_POINTS
6627 #include "binder_trace.h"
6629 MODULE_LICENSE("GPL v2");