3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
49 static DEFINE_MUTEX(binder_main_lock);
50 static DEFINE_MUTEX(binder_deferred_lock);
51 static DEFINE_MUTEX(binder_mmap_lock);
53 static HLIST_HEAD(binder_devices);
54 static HLIST_HEAD(binder_procs);
55 static HLIST_HEAD(binder_deferred_list);
56 static HLIST_HEAD(binder_dead_nodes);
58 static struct dentry *binder_debugfs_dir_entry_root;
59 static struct dentry *binder_debugfs_dir_entry_proc;
60 static int binder_last_id;
61 static struct workqueue_struct *binder_deferred_workqueue;
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 return single_open(file, binder_##name##_show, inode->i_private); \
69 static const struct file_operations binder_##name##_fops = { \
70 .owner = THIS_MODULE, \
71 .open = binder_##name##_open, \
73 .llseek = seq_lseek, \
74 .release = single_release, \
77 static int binder_proc_show(struct seq_file *m, void *unused);
78 BINDER_DEBUG_ENTRY(proc);
80 /* This is only defined in include/asm-arm/sizes.h */
86 #define SZ_4M 0x400000
89 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
94 BINDER_DEBUG_USER_ERROR = 1U << 0,
95 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
96 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
97 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
98 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
99 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
100 BINDER_DEBUG_READ_WRITE = 1U << 6,
101 BINDER_DEBUG_USER_REFS = 1U << 7,
102 BINDER_DEBUG_THREADS = 1U << 8,
103 BINDER_DEBUG_TRANSACTION = 1U << 9,
104 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
105 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
106 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
107 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
108 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
111 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
115 static bool binder_debug_no_lock;
116 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
118 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
124 static int binder_set_stop_on_user_error(const char *val,
125 struct kernel_param *kp)
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
137 #define binder_debug(mask, x...) \
139 if (binder_debug_mask & mask) \
143 #define binder_user_error(x...) \
145 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 if (binder_stop_on_user_error) \
148 binder_stop_on_user_error = 2; \
151 #define to_flat_binder_object(hdr) \
152 container_of(hdr, struct flat_binder_object, hdr)
154 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156 #define to_binder_buffer_object(hdr) \
157 container_of(hdr, struct binder_buffer_object, hdr)
159 #define to_binder_fd_array_object(hdr) \
160 container_of(hdr, struct binder_fd_array_object, hdr)
162 enum binder_stat_types {
168 BINDER_STAT_TRANSACTION,
169 BINDER_STAT_TRANSACTION_COMPLETE,
173 struct binder_stats {
174 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
175 int bc[_IOC_NR(BC_REPLY_SG) + 1];
176 int obj_created[BINDER_STAT_COUNT];
177 int obj_deleted[BINDER_STAT_COUNT];
180 static struct binder_stats binder_stats;
182 static inline void binder_stats_deleted(enum binder_stat_types type)
184 binder_stats.obj_deleted[type]++;
187 static inline void binder_stats_created(enum binder_stat_types type)
189 binder_stats.obj_created[type]++;
192 struct binder_transaction_log_entry {
203 const char *context_name;
205 struct binder_transaction_log {
208 struct binder_transaction_log_entry entry[32];
210 static struct binder_transaction_log binder_transaction_log;
211 static struct binder_transaction_log binder_transaction_log_failed;
213 static struct binder_transaction_log_entry *binder_transaction_log_add(
214 struct binder_transaction_log *log)
216 struct binder_transaction_log_entry *e;
218 e = &log->entry[log->next];
219 memset(e, 0, sizeof(*e));
221 if (log->next == ARRAY_SIZE(log->entry)) {
228 struct binder_context {
229 struct binder_node *binder_context_mgr_node;
230 kuid_t binder_context_mgr_uid;
234 struct binder_device {
235 struct hlist_node hlist;
236 struct miscdevice miscdev;
237 struct binder_context context;
241 struct list_head entry;
243 BINDER_WORK_TRANSACTION = 1,
244 BINDER_WORK_TRANSACTION_COMPLETE,
246 BINDER_WORK_DEAD_BINDER,
247 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
248 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
254 struct binder_work work;
256 struct rb_node rb_node;
257 struct hlist_node dead_node;
259 struct binder_proc *proc;
260 struct hlist_head refs;
261 int internal_strong_refs;
263 int local_strong_refs;
264 binder_uintptr_t ptr;
265 binder_uintptr_t cookie;
266 unsigned has_strong_ref:1;
267 unsigned pending_strong_ref:1;
268 unsigned has_weak_ref:1;
269 unsigned pending_weak_ref:1;
270 unsigned has_async_transaction:1;
271 unsigned accept_fds:1;
272 unsigned min_priority:8;
273 struct list_head async_todo;
276 struct binder_ref_death {
277 struct binder_work work;
278 binder_uintptr_t cookie;
282 /* Lookups needed: */
283 /* node + proc => ref (transaction) */
284 /* desc + proc => ref (transaction, inc/dec ref) */
285 /* node => refs + procs (proc exit) */
287 struct rb_node rb_node_desc;
288 struct rb_node rb_node_node;
289 struct hlist_node node_entry;
290 struct binder_proc *proc;
291 struct binder_node *node;
295 struct binder_ref_death *death;
298 struct binder_buffer {
299 struct list_head entry; /* free and allocated entries by address */
300 struct rb_node rb_node; /* free entry by size or allocated entry */
303 unsigned allow_user_free:1;
304 unsigned async_transaction:1;
305 unsigned debug_id:29;
307 struct binder_transaction *transaction;
309 struct binder_node *target_node;
312 size_t extra_buffers_size;
316 enum binder_deferred_state {
317 BINDER_DEFERRED_PUT_FILES = 0x01,
318 BINDER_DEFERRED_FLUSH = 0x02,
319 BINDER_DEFERRED_RELEASE = 0x04,
323 struct hlist_node proc_node;
324 struct rb_root threads;
325 struct rb_root nodes;
326 struct rb_root refs_by_desc;
327 struct rb_root refs_by_node;
329 struct vm_area_struct *vma;
330 struct mm_struct *vma_vm_mm;
331 struct task_struct *tsk;
332 struct files_struct *files;
333 struct hlist_node deferred_work_node;
336 ptrdiff_t user_buffer_offset;
338 struct list_head buffers;
339 struct rb_root free_buffers;
340 struct rb_root allocated_buffers;
341 size_t free_async_space;
345 uint32_t buffer_free;
346 struct list_head todo;
347 wait_queue_head_t wait;
348 struct binder_stats stats;
349 struct list_head delivered_death;
351 int requested_threads;
352 int requested_threads_started;
354 long default_priority;
355 struct dentry *debugfs_entry;
356 struct binder_context *context;
360 BINDER_LOOPER_STATE_REGISTERED = 0x01,
361 BINDER_LOOPER_STATE_ENTERED = 0x02,
362 BINDER_LOOPER_STATE_EXITED = 0x04,
363 BINDER_LOOPER_STATE_INVALID = 0x08,
364 BINDER_LOOPER_STATE_WAITING = 0x10,
365 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
368 struct binder_thread {
369 struct binder_proc *proc;
370 struct rb_node rb_node;
373 struct binder_transaction *transaction_stack;
374 struct list_head todo;
375 uint32_t return_error; /* Write failed, return error code in read buf */
376 uint32_t return_error2; /* Write failed, return error code in read */
377 /* buffer. Used when sending a reply to a dead process that */
378 /* we are also waiting on */
379 wait_queue_head_t wait;
380 struct binder_stats stats;
383 struct binder_transaction {
385 struct binder_work work;
386 struct binder_thread *from;
387 struct binder_transaction *from_parent;
388 struct binder_proc *to_proc;
389 struct binder_thread *to_thread;
390 struct binder_transaction *to_parent;
391 unsigned need_reply:1;
392 /* unsigned is_dead:1; */ /* not used at the moment */
394 struct binder_buffer *buffer;
403 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
405 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
407 struct files_struct *files = proc->files;
408 unsigned long rlim_cur;
414 if (!lock_task_sighand(proc->tsk, &irqs))
417 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
418 unlock_task_sighand(proc->tsk, &irqs);
420 return __alloc_fd(files, 0, rlim_cur, flags);
424 * copied from fd_install
426 static void task_fd_install(
427 struct binder_proc *proc, unsigned int fd, struct file *file)
430 __fd_install(proc->files, fd, file);
434 * copied from sys_close
436 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
440 if (proc->files == NULL)
443 retval = __close_fd(proc->files, fd);
444 /* can't restart close syscall because file table entry was cleared */
445 if (unlikely(retval == -ERESTARTSYS ||
446 retval == -ERESTARTNOINTR ||
447 retval == -ERESTARTNOHAND ||
448 retval == -ERESTART_RESTARTBLOCK))
454 static inline void binder_lock(const char *tag)
456 trace_binder_lock(tag);
457 mutex_lock(&binder_main_lock);
458 trace_binder_locked(tag);
461 static inline void binder_unlock(const char *tag)
463 trace_binder_unlock(tag);
464 mutex_unlock(&binder_main_lock);
467 static void binder_set_nice(long nice)
471 if (can_nice(current, nice)) {
472 set_user_nice(current, nice);
475 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
476 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
477 "%d: nice value %ld not allowed use %ld instead\n",
478 current->pid, nice, min_nice);
479 set_user_nice(current, min_nice);
480 if (min_nice <= MAX_NICE)
482 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
485 static size_t binder_buffer_size(struct binder_proc *proc,
486 struct binder_buffer *buffer)
488 if (list_is_last(&buffer->entry, &proc->buffers))
489 return proc->buffer + proc->buffer_size - (void *)buffer->data;
490 return (size_t)list_entry(buffer->entry.next,
491 struct binder_buffer, entry) - (size_t)buffer->data;
494 static void binder_insert_free_buffer(struct binder_proc *proc,
495 struct binder_buffer *new_buffer)
497 struct rb_node **p = &proc->free_buffers.rb_node;
498 struct rb_node *parent = NULL;
499 struct binder_buffer *buffer;
501 size_t new_buffer_size;
503 BUG_ON(!new_buffer->free);
505 new_buffer_size = binder_buffer_size(proc, new_buffer);
507 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
508 "%d: add free buffer, size %zd, at %p\n",
509 proc->pid, new_buffer_size, new_buffer);
513 buffer = rb_entry(parent, struct binder_buffer, rb_node);
514 BUG_ON(!buffer->free);
516 buffer_size = binder_buffer_size(proc, buffer);
518 if (new_buffer_size < buffer_size)
519 p = &parent->rb_left;
521 p = &parent->rb_right;
523 rb_link_node(&new_buffer->rb_node, parent, p);
524 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
527 static void binder_insert_allocated_buffer(struct binder_proc *proc,
528 struct binder_buffer *new_buffer)
530 struct rb_node **p = &proc->allocated_buffers.rb_node;
531 struct rb_node *parent = NULL;
532 struct binder_buffer *buffer;
534 BUG_ON(new_buffer->free);
538 buffer = rb_entry(parent, struct binder_buffer, rb_node);
539 BUG_ON(buffer->free);
541 if (new_buffer < buffer)
542 p = &parent->rb_left;
543 else if (new_buffer > buffer)
544 p = &parent->rb_right;
548 rb_link_node(&new_buffer->rb_node, parent, p);
549 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
552 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
555 struct rb_node *n = proc->allocated_buffers.rb_node;
556 struct binder_buffer *buffer;
557 struct binder_buffer *kern_ptr;
559 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
560 - offsetof(struct binder_buffer, data));
563 buffer = rb_entry(n, struct binder_buffer, rb_node);
564 BUG_ON(buffer->free);
566 if (kern_ptr < buffer)
568 else if (kern_ptr > buffer)
576 static int binder_update_page_range(struct binder_proc *proc, int allocate,
577 void *start, void *end,
578 struct vm_area_struct *vma)
581 unsigned long user_page_addr;
583 struct mm_struct *mm;
585 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
586 "%d: %s pages %p-%p\n", proc->pid,
587 allocate ? "allocate" : "free", start, end);
592 trace_binder_update_page_range(proc, allocate, start, end);
597 mm = get_task_mm(proc->tsk);
600 down_write(&mm->mmap_sem);
602 if (vma && mm != proc->vma_vm_mm) {
603 pr_err("%d: vma mm and task mm mismatch\n",
613 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
618 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
621 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
624 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
626 pr_err("%d: binder_alloc_buf failed for page at %p\n",
627 proc->pid, page_addr);
628 goto err_alloc_page_failed;
630 ret = map_kernel_range_noflush((unsigned long)page_addr,
631 PAGE_SIZE, PAGE_KERNEL, page);
632 flush_cache_vmap((unsigned long)page_addr,
633 (unsigned long)page_addr + PAGE_SIZE);
635 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
636 proc->pid, page_addr);
637 goto err_map_kernel_failed;
640 (uintptr_t)page_addr + proc->user_buffer_offset;
641 ret = vm_insert_page(vma, user_page_addr, page[0]);
643 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
644 proc->pid, user_page_addr);
645 goto err_vm_insert_page_failed;
647 /* vm_insert_page does not seem to increment the refcount */
650 up_write(&mm->mmap_sem);
656 for (page_addr = end - PAGE_SIZE; page_addr >= start;
657 page_addr -= PAGE_SIZE) {
658 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
660 zap_page_range(vma, (uintptr_t)page_addr +
661 proc->user_buffer_offset, PAGE_SIZE, NULL);
662 err_vm_insert_page_failed:
663 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
664 err_map_kernel_failed:
667 err_alloc_page_failed:
672 up_write(&mm->mmap_sem);
678 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
681 size_t extra_buffers_size,
684 struct rb_node *n = proc->free_buffers.rb_node;
685 struct binder_buffer *buffer;
687 struct rb_node *best_fit = NULL;
690 size_t size, data_offsets_size;
692 if (proc->vma == NULL) {
693 pr_err("%d: binder_alloc_buf, no vma\n",
698 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
699 ALIGN(offsets_size, sizeof(void *));
701 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
702 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
703 proc->pid, data_size, offsets_size);
706 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
707 if (size < data_offsets_size || size < extra_buffers_size) {
708 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
709 proc->pid, extra_buffers_size);
713 proc->free_async_space < size + sizeof(struct binder_buffer)) {
714 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
715 "%d: binder_alloc_buf size %zd failed, no async space left\n",
721 buffer = rb_entry(n, struct binder_buffer, rb_node);
722 BUG_ON(!buffer->free);
723 buffer_size = binder_buffer_size(proc, buffer);
725 if (size < buffer_size) {
728 } else if (size > buffer_size)
735 if (best_fit == NULL) {
736 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
741 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
742 buffer_size = binder_buffer_size(proc, buffer);
745 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
746 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
747 proc->pid, size, buffer, buffer_size);
750 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
752 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
753 buffer_size = size; /* no room for other buffers */
755 buffer_size = size + sizeof(struct binder_buffer);
758 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
759 if (end_page_addr > has_page_addr)
760 end_page_addr = has_page_addr;
761 if (binder_update_page_range(proc, 1,
762 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
765 rb_erase(best_fit, &proc->free_buffers);
767 binder_insert_allocated_buffer(proc, buffer);
768 if (buffer_size != size) {
769 struct binder_buffer *new_buffer = (void *)buffer->data + size;
771 list_add(&new_buffer->entry, &buffer->entry);
772 new_buffer->free = 1;
773 binder_insert_free_buffer(proc, new_buffer);
775 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
776 "%d: binder_alloc_buf size %zd got %p\n",
777 proc->pid, size, buffer);
778 buffer->data_size = data_size;
779 buffer->offsets_size = offsets_size;
780 buffer->extra_buffers_size = extra_buffers_size;
781 buffer->async_transaction = is_async;
783 proc->free_async_space -= size + sizeof(struct binder_buffer);
784 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
785 "%d: binder_alloc_buf size %zd async free %zd\n",
786 proc->pid, size, proc->free_async_space);
792 static void *buffer_start_page(struct binder_buffer *buffer)
794 return (void *)((uintptr_t)buffer & PAGE_MASK);
797 static void *buffer_end_page(struct binder_buffer *buffer)
799 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
802 static void binder_delete_free_buffer(struct binder_proc *proc,
803 struct binder_buffer *buffer)
805 struct binder_buffer *prev, *next = NULL;
806 int free_page_end = 1;
807 int free_page_start = 1;
809 BUG_ON(proc->buffers.next == &buffer->entry);
810 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
812 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
814 if (buffer_end_page(prev) == buffer_end_page(buffer))
816 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
817 "%d: merge free, buffer %p share page with %p\n",
818 proc->pid, buffer, prev);
821 if (!list_is_last(&buffer->entry, &proc->buffers)) {
822 next = list_entry(buffer->entry.next,
823 struct binder_buffer, entry);
824 if (buffer_start_page(next) == buffer_end_page(buffer)) {
826 if (buffer_start_page(next) ==
827 buffer_start_page(buffer))
829 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
830 "%d: merge free, buffer %p share page with %p\n",
831 proc->pid, buffer, prev);
834 list_del(&buffer->entry);
835 if (free_page_start || free_page_end) {
836 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
837 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
838 proc->pid, buffer, free_page_start ? "" : " end",
839 free_page_end ? "" : " start", prev, next);
840 binder_update_page_range(proc, 0, free_page_start ?
841 buffer_start_page(buffer) : buffer_end_page(buffer),
842 (free_page_end ? buffer_end_page(buffer) :
843 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
847 static void binder_free_buf(struct binder_proc *proc,
848 struct binder_buffer *buffer)
850 size_t size, buffer_size;
852 buffer_size = binder_buffer_size(proc, buffer);
854 size = ALIGN(buffer->data_size, sizeof(void *)) +
855 ALIGN(buffer->offsets_size, sizeof(void *)) +
856 ALIGN(buffer->extra_buffers_size, sizeof(void *));
858 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
859 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
860 proc->pid, buffer, size, buffer_size);
862 BUG_ON(buffer->free);
863 BUG_ON(size > buffer_size);
864 BUG_ON(buffer->transaction != NULL);
865 BUG_ON((void *)buffer < proc->buffer);
866 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
868 if (buffer->async_transaction) {
869 proc->free_async_space += size + sizeof(struct binder_buffer);
871 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
872 "%d: binder_free_buf size %zd async free %zd\n",
873 proc->pid, size, proc->free_async_space);
876 binder_update_page_range(proc, 0,
877 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
878 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
880 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
882 if (!list_is_last(&buffer->entry, &proc->buffers)) {
883 struct binder_buffer *next = list_entry(buffer->entry.next,
884 struct binder_buffer, entry);
887 rb_erase(&next->rb_node, &proc->free_buffers);
888 binder_delete_free_buffer(proc, next);
891 if (proc->buffers.next != &buffer->entry) {
892 struct binder_buffer *prev = list_entry(buffer->entry.prev,
893 struct binder_buffer, entry);
896 binder_delete_free_buffer(proc, buffer);
897 rb_erase(&prev->rb_node, &proc->free_buffers);
901 binder_insert_free_buffer(proc, buffer);
904 static struct binder_node *binder_get_node(struct binder_proc *proc,
905 binder_uintptr_t ptr)
907 struct rb_node *n = proc->nodes.rb_node;
908 struct binder_node *node;
911 node = rb_entry(n, struct binder_node, rb_node);
915 else if (ptr > node->ptr)
923 static struct binder_node *binder_new_node(struct binder_proc *proc,
924 binder_uintptr_t ptr,
925 binder_uintptr_t cookie)
927 struct rb_node **p = &proc->nodes.rb_node;
928 struct rb_node *parent = NULL;
929 struct binder_node *node;
933 node = rb_entry(parent, struct binder_node, rb_node);
937 else if (ptr > node->ptr)
943 node = kzalloc(sizeof(*node), GFP_KERNEL);
946 binder_stats_created(BINDER_STAT_NODE);
947 rb_link_node(&node->rb_node, parent, p);
948 rb_insert_color(&node->rb_node, &proc->nodes);
949 node->debug_id = ++binder_last_id;
952 node->cookie = cookie;
953 node->work.type = BINDER_WORK_NODE;
954 INIT_LIST_HEAD(&node->work.entry);
955 INIT_LIST_HEAD(&node->async_todo);
956 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
957 "%d:%d node %d u%016llx c%016llx created\n",
958 proc->pid, current->pid, node->debug_id,
959 (u64)node->ptr, (u64)node->cookie);
963 static int binder_inc_node(struct binder_node *node, int strong, int internal,
964 struct list_head *target_list)
968 if (target_list == NULL &&
969 node->internal_strong_refs == 0 &&
971 node == node->proc->context->
972 binder_context_mgr_node &&
973 node->has_strong_ref)) {
974 pr_err("invalid inc strong node for %d\n",
978 node->internal_strong_refs++;
980 node->local_strong_refs++;
981 if (!node->has_strong_ref && target_list) {
982 list_del_init(&node->work.entry);
983 list_add_tail(&node->work.entry, target_list);
987 node->local_weak_refs++;
988 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
989 if (target_list == NULL) {
990 pr_err("invalid inc weak node for %d\n",
994 list_add_tail(&node->work.entry, target_list);
1000 static int binder_dec_node(struct binder_node *node, int strong, int internal)
1004 node->internal_strong_refs--;
1006 node->local_strong_refs--;
1007 if (node->local_strong_refs || node->internal_strong_refs)
1011 node->local_weak_refs--;
1012 if (node->local_weak_refs || !hlist_empty(&node->refs))
1015 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1016 if (list_empty(&node->work.entry)) {
1017 list_add_tail(&node->work.entry, &node->proc->todo);
1018 wake_up_interruptible(&node->proc->wait);
1021 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1022 !node->local_weak_refs) {
1023 list_del_init(&node->work.entry);
1025 rb_erase(&node->rb_node, &node->proc->nodes);
1026 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1027 "refless node %d deleted\n",
1030 hlist_del(&node->dead_node);
1031 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1032 "dead node %d deleted\n",
1036 binder_stats_deleted(BINDER_STAT_NODE);
1044 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1045 u32 desc, bool need_strong_ref)
1047 struct rb_node *n = proc->refs_by_desc.rb_node;
1048 struct binder_ref *ref;
1051 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1053 if (desc < ref->desc) {
1055 } else if (desc > ref->desc) {
1057 } else if (need_strong_ref && !ref->strong) {
1058 binder_user_error("tried to use weak ref as strong ref\n");
1067 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1068 struct binder_node *node)
1071 struct rb_node **p = &proc->refs_by_node.rb_node;
1072 struct rb_node *parent = NULL;
1073 struct binder_ref *ref, *new_ref;
1074 struct binder_context *context = proc->context;
1078 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1080 if (node < ref->node)
1082 else if (node > ref->node)
1083 p = &(*p)->rb_right;
1087 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1088 if (new_ref == NULL)
1090 binder_stats_created(BINDER_STAT_REF);
1091 new_ref->debug_id = ++binder_last_id;
1092 new_ref->proc = proc;
1093 new_ref->node = node;
1094 rb_link_node(&new_ref->rb_node_node, parent, p);
1095 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1097 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1098 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1099 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1100 if (ref->desc > new_ref->desc)
1102 new_ref->desc = ref->desc + 1;
1105 p = &proc->refs_by_desc.rb_node;
1108 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1110 if (new_ref->desc < ref->desc)
1112 else if (new_ref->desc > ref->desc)
1113 p = &(*p)->rb_right;
1117 rb_link_node(&new_ref->rb_node_desc, parent, p);
1118 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1120 hlist_add_head(&new_ref->node_entry, &node->refs);
1122 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1123 "%d new ref %d desc %d for node %d\n",
1124 proc->pid, new_ref->debug_id, new_ref->desc,
1127 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1128 "%d new ref %d desc %d for dead node\n",
1129 proc->pid, new_ref->debug_id, new_ref->desc);
1134 static void binder_delete_ref(struct binder_ref *ref)
1136 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1137 "%d delete ref %d desc %d for node %d\n",
1138 ref->proc->pid, ref->debug_id, ref->desc,
1139 ref->node->debug_id);
1141 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1142 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1144 binder_dec_node(ref->node, 1, 1);
1145 hlist_del(&ref->node_entry);
1146 binder_dec_node(ref->node, 0, 1);
1148 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1149 "%d delete ref %d desc %d has death notification\n",
1150 ref->proc->pid, ref->debug_id, ref->desc);
1151 list_del(&ref->death->work.entry);
1153 binder_stats_deleted(BINDER_STAT_DEATH);
1156 binder_stats_deleted(BINDER_STAT_REF);
1159 static int binder_inc_ref(struct binder_ref *ref, int strong,
1160 struct list_head *target_list)
1165 if (ref->strong == 0) {
1166 ret = binder_inc_node(ref->node, 1, 1, target_list);
1172 if (ref->weak == 0) {
1173 ret = binder_inc_node(ref->node, 0, 1, target_list);
1183 static int binder_dec_ref(struct binder_ref *ref, int strong)
1186 if (ref->strong == 0) {
1187 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1188 ref->proc->pid, ref->debug_id,
1189 ref->desc, ref->strong, ref->weak);
1193 if (ref->strong == 0) {
1196 ret = binder_dec_node(ref->node, strong, 1);
1201 if (ref->weak == 0) {
1202 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1203 ref->proc->pid, ref->debug_id,
1204 ref->desc, ref->strong, ref->weak);
1209 if (ref->strong == 0 && ref->weak == 0)
1210 binder_delete_ref(ref);
1214 static void binder_pop_transaction(struct binder_thread *target_thread,
1215 struct binder_transaction *t)
1217 if (target_thread) {
1218 BUG_ON(target_thread->transaction_stack != t);
1219 BUG_ON(target_thread->transaction_stack->from != target_thread);
1220 target_thread->transaction_stack =
1221 target_thread->transaction_stack->from_parent;
1226 t->buffer->transaction = NULL;
1228 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1231 static void binder_send_failed_reply(struct binder_transaction *t,
1232 uint32_t error_code)
1234 struct binder_thread *target_thread;
1235 struct binder_transaction *next;
1237 BUG_ON(t->flags & TF_ONE_WAY);
1239 target_thread = t->from;
1240 if (target_thread) {
1241 if (target_thread->return_error != BR_OK &&
1242 target_thread->return_error2 == BR_OK) {
1243 target_thread->return_error2 =
1244 target_thread->return_error;
1245 target_thread->return_error = BR_OK;
1247 if (target_thread->return_error == BR_OK) {
1248 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1249 "send failed reply for transaction %d to %d:%d\n",
1251 target_thread->proc->pid,
1252 target_thread->pid);
1254 binder_pop_transaction(target_thread, t);
1255 target_thread->return_error = error_code;
1256 wake_up_interruptible(&target_thread->wait);
1258 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1259 target_thread->proc->pid,
1261 target_thread->return_error);
1265 next = t->from_parent;
1267 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1268 "send failed reply for transaction %d, target dead\n",
1271 binder_pop_transaction(target_thread, t);
1273 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1274 "reply failed, no target thread at root\n");
1278 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1279 "reply failed, no target thread -- retry %d\n",
1285 * binder_validate_object() - checks for a valid metadata object in a buffer.
1286 * @buffer: binder_buffer that we're parsing.
1287 * @offset: offset in the buffer at which to validate an object.
1289 * Return: If there's a valid metadata object at @offset in @buffer, the
1290 * size of that object. Otherwise, it returns zero.
1292 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1294 /* Check if we can read a header first */
1295 struct binder_object_header *hdr;
1296 size_t object_size = 0;
1298 if (offset > buffer->data_size - sizeof(*hdr) ||
1299 buffer->data_size < sizeof(*hdr) ||
1300 !IS_ALIGNED(offset, sizeof(u32)))
1303 /* Ok, now see if we can read a complete object. */
1304 hdr = (struct binder_object_header *)(buffer->data + offset);
1305 switch (hdr->type) {
1306 case BINDER_TYPE_BINDER:
1307 case BINDER_TYPE_WEAK_BINDER:
1308 case BINDER_TYPE_HANDLE:
1309 case BINDER_TYPE_WEAK_HANDLE:
1310 object_size = sizeof(struct flat_binder_object);
1312 case BINDER_TYPE_FD:
1313 object_size = sizeof(struct binder_fd_object);
1315 case BINDER_TYPE_PTR:
1316 object_size = sizeof(struct binder_buffer_object);
1318 case BINDER_TYPE_FDA:
1319 object_size = sizeof(struct binder_fd_array_object);
1324 if (offset <= buffer->data_size - object_size &&
1325 buffer->data_size >= object_size)
1332 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1333 * @b: binder_buffer containing the object
1334 * @index: index in offset array at which the binder_buffer_object is
1336 * @start: points to the start of the offset array
1337 * @num_valid: the number of valid offsets in the offset array
1339 * Return: If @index is within the valid range of the offset array
1340 * described by @start and @num_valid, and if there's a valid
1341 * binder_buffer_object at the offset found in index @index
1342 * of the offset array, that object is returned. Otherwise,
1343 * %NULL is returned.
1344 * Note that the offset found in index @index itself is not
1345 * verified; this function assumes that @num_valid elements
1346 * from @start were previously verified to have valid offsets.
1348 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1349 binder_size_t index,
1350 binder_size_t *start,
1351 binder_size_t num_valid)
1353 struct binder_buffer_object *buffer_obj;
1354 binder_size_t *offp;
1356 if (index >= num_valid)
1359 offp = start + index;
1360 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1361 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1368 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1369 * @b: transaction buffer
1370 * @objects_start start of objects buffer
1371 * @buffer: binder_buffer_object in which to fix up
1372 * @offset: start offset in @buffer to fix up
1373 * @last_obj: last binder_buffer_object that we fixed up in
1374 * @last_min_offset: minimum fixup offset in @last_obj
1376 * Return: %true if a fixup in buffer @buffer at offset @offset is
1379 * For safety reasons, we only allow fixups inside a buffer to happen
1380 * at increasing offsets; additionally, we only allow fixup on the last
1381 * buffer object that was verified, or one of its parents.
1383 * Example of what is allowed:
1386 * B (parent = A, offset = 0)
1387 * C (parent = A, offset = 16)
1388 * D (parent = C, offset = 0)
1389 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1391 * Examples of what is not allowed:
1393 * Decreasing offsets within the same parent:
1395 * C (parent = A, offset = 16)
1396 * B (parent = A, offset = 0) // decreasing offset within A
1398 * Referring to a parent that wasn't the last object or any of its parents:
1400 * B (parent = A, offset = 0)
1401 * C (parent = A, offset = 0)
1402 * C (parent = A, offset = 16)
1403 * D (parent = B, offset = 0) // B is not A or any of A's parents
1405 static bool binder_validate_fixup(struct binder_buffer *b,
1406 binder_size_t *objects_start,
1407 struct binder_buffer_object *buffer,
1408 binder_size_t fixup_offset,
1409 struct binder_buffer_object *last_obj,
1410 binder_size_t last_min_offset)
1413 /* Nothing to fix up in */
1417 while (last_obj != buffer) {
1419 * Safe to retrieve the parent of last_obj, since it
1420 * was already previously verified by the driver.
1422 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1424 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1425 last_obj = (struct binder_buffer_object *)
1426 (b->data + *(objects_start + last_obj->parent));
1428 return (fixup_offset >= last_min_offset);
1431 static void binder_transaction_buffer_release(struct binder_proc *proc,
1432 struct binder_buffer *buffer,
1433 binder_size_t *failed_at)
1435 binder_size_t *offp, *off_start, *off_end;
1436 int debug_id = buffer->debug_id;
1438 binder_debug(BINDER_DEBUG_TRANSACTION,
1439 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1440 proc->pid, buffer->debug_id,
1441 buffer->data_size, buffer->offsets_size, failed_at);
1443 if (buffer->target_node)
1444 binder_dec_node(buffer->target_node, 1, 0);
1446 off_start = (binder_size_t *)(buffer->data +
1447 ALIGN(buffer->data_size, sizeof(void *)));
1449 off_end = failed_at;
1451 off_end = (void *)off_start + buffer->offsets_size;
1452 for (offp = off_start; offp < off_end; offp++) {
1453 struct binder_object_header *hdr;
1454 size_t object_size = binder_validate_object(buffer, *offp);
1456 if (object_size == 0) {
1457 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1458 debug_id, (u64)*offp, buffer->data_size);
1461 hdr = (struct binder_object_header *)(buffer->data + *offp);
1462 switch (hdr->type) {
1463 case BINDER_TYPE_BINDER:
1464 case BINDER_TYPE_WEAK_BINDER: {
1465 struct flat_binder_object *fp;
1466 struct binder_node *node;
1468 fp = to_flat_binder_object(hdr);
1469 node = binder_get_node(proc, fp->binder);
1471 pr_err("transaction release %d bad node %016llx\n",
1472 debug_id, (u64)fp->binder);
1475 binder_debug(BINDER_DEBUG_TRANSACTION,
1476 " node %d u%016llx\n",
1477 node->debug_id, (u64)node->ptr);
1478 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1481 case BINDER_TYPE_HANDLE:
1482 case BINDER_TYPE_WEAK_HANDLE: {
1483 struct flat_binder_object *fp;
1484 struct binder_ref *ref;
1486 fp = to_flat_binder_object(hdr);
1487 ref = binder_get_ref(proc, fp->handle,
1488 hdr->type == BINDER_TYPE_HANDLE);
1491 pr_err("transaction release %d bad handle %d\n",
1492 debug_id, fp->handle);
1495 binder_debug(BINDER_DEBUG_TRANSACTION,
1496 " ref %d desc %d (node %d)\n",
1497 ref->debug_id, ref->desc, ref->node->debug_id);
1498 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1501 case BINDER_TYPE_FD: {
1502 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1504 binder_debug(BINDER_DEBUG_TRANSACTION,
1505 " fd %d\n", fp->fd);
1507 task_close_fd(proc, fp->fd);
1509 case BINDER_TYPE_PTR:
1511 * Nothing to do here, this will get cleaned up when the
1512 * transaction buffer gets freed
1515 case BINDER_TYPE_FDA: {
1516 struct binder_fd_array_object *fda;
1517 struct binder_buffer_object *parent;
1518 uintptr_t parent_buffer;
1521 binder_size_t fd_buf_size;
1523 fda = to_binder_fd_array_object(hdr);
1524 parent = binder_validate_ptr(buffer, fda->parent,
1528 pr_err("transaction release %d bad parent offset",
1533 * Since the parent was already fixed up, convert it
1534 * back to kernel address space to access it
1536 parent_buffer = parent->buffer -
1537 proc->user_buffer_offset;
1539 fd_buf_size = sizeof(u32) * fda->num_fds;
1540 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1541 pr_err("transaction release %d invalid number of fds (%lld)\n",
1542 debug_id, (u64)fda->num_fds);
1545 if (fd_buf_size > parent->length ||
1546 fda->parent_offset > parent->length - fd_buf_size) {
1547 /* No space for all file descriptors here. */
1548 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1549 debug_id, (u64)fda->num_fds);
1552 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1553 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1554 task_close_fd(proc, fd_array[fd_index]);
1557 pr_err("transaction release %d bad object type %x\n",
1558 debug_id, hdr->type);
1564 static int binder_translate_binder(struct flat_binder_object *fp,
1565 struct binder_transaction *t,
1566 struct binder_thread *thread)
1568 struct binder_node *node;
1569 struct binder_ref *ref;
1570 struct binder_proc *proc = thread->proc;
1571 struct binder_proc *target_proc = t->to_proc;
1573 node = binder_get_node(proc, fp->binder);
1575 node = binder_new_node(proc, fp->binder, fp->cookie);
1579 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1580 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1582 if (fp->cookie != node->cookie) {
1583 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1584 proc->pid, thread->pid, (u64)fp->binder,
1585 node->debug_id, (u64)fp->cookie,
1589 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1592 ref = binder_get_ref_for_node(target_proc, node);
1596 if (fp->hdr.type == BINDER_TYPE_BINDER)
1597 fp->hdr.type = BINDER_TYPE_HANDLE;
1599 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1601 fp->handle = ref->desc;
1603 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1605 trace_binder_transaction_node_to_ref(t, node, ref);
1606 binder_debug(BINDER_DEBUG_TRANSACTION,
1607 " node %d u%016llx -> ref %d desc %d\n",
1608 node->debug_id, (u64)node->ptr,
1609 ref->debug_id, ref->desc);
1614 static int binder_translate_handle(struct flat_binder_object *fp,
1615 struct binder_transaction *t,
1616 struct binder_thread *thread)
1618 struct binder_ref *ref;
1619 struct binder_proc *proc = thread->proc;
1620 struct binder_proc *target_proc = t->to_proc;
1622 ref = binder_get_ref(proc, fp->handle,
1623 fp->hdr.type == BINDER_TYPE_HANDLE);
1625 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1626 proc->pid, thread->pid, fp->handle);
1629 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1632 if (ref->node->proc == target_proc) {
1633 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1634 fp->hdr.type = BINDER_TYPE_BINDER;
1636 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1637 fp->binder = ref->node->ptr;
1638 fp->cookie = ref->node->cookie;
1639 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1641 trace_binder_transaction_ref_to_node(t, ref);
1642 binder_debug(BINDER_DEBUG_TRANSACTION,
1643 " ref %d desc %d -> node %d u%016llx\n",
1644 ref->debug_id, ref->desc, ref->node->debug_id,
1645 (u64)ref->node->ptr);
1647 struct binder_ref *new_ref;
1649 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1654 fp->handle = new_ref->desc;
1656 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1658 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1659 binder_debug(BINDER_DEBUG_TRANSACTION,
1660 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1661 ref->debug_id, ref->desc, new_ref->debug_id,
1662 new_ref->desc, ref->node->debug_id);
1667 static int binder_translate_fd(int fd,
1668 struct binder_transaction *t,
1669 struct binder_thread *thread,
1670 struct binder_transaction *in_reply_to)
1672 struct binder_proc *proc = thread->proc;
1673 struct binder_proc *target_proc = t->to_proc;
1677 bool target_allows_fd;
1680 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1682 target_allows_fd = t->buffer->target_node->accept_fds;
1683 if (!target_allows_fd) {
1684 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1685 proc->pid, thread->pid,
1686 in_reply_to ? "reply" : "transaction",
1689 goto err_fd_not_accepted;
1694 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1695 proc->pid, thread->pid, fd);
1699 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1705 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1706 if (target_fd < 0) {
1708 goto err_get_unused_fd;
1710 task_fd_install(target_proc, target_fd, file);
1711 trace_binder_transaction_fd(t, fd, target_fd);
1712 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1721 err_fd_not_accepted:
1725 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1726 struct binder_buffer_object *parent,
1727 struct binder_transaction *t,
1728 struct binder_thread *thread,
1729 struct binder_transaction *in_reply_to)
1731 binder_size_t fdi, fd_buf_size, num_installed_fds;
1733 uintptr_t parent_buffer;
1735 struct binder_proc *proc = thread->proc;
1736 struct binder_proc *target_proc = t->to_proc;
1738 fd_buf_size = sizeof(u32) * fda->num_fds;
1739 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1740 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1741 proc->pid, thread->pid, (u64)fda->num_fds);
1744 if (fd_buf_size > parent->length ||
1745 fda->parent_offset > parent->length - fd_buf_size) {
1746 /* No space for all file descriptors here. */
1747 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1748 proc->pid, thread->pid, (u64)fda->num_fds);
1752 * Since the parent was already fixed up, convert it
1753 * back to the kernel address space to access it
1755 parent_buffer = parent->buffer - target_proc->user_buffer_offset;
1756 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1757 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1758 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1759 proc->pid, thread->pid);
1762 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1763 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1766 goto err_translate_fd_failed;
1767 fd_array[fdi] = target_fd;
1771 err_translate_fd_failed:
1773 * Failed to allocate fd or security error, free fds
1776 num_installed_fds = fdi;
1777 for (fdi = 0; fdi < num_installed_fds; fdi++)
1778 task_close_fd(target_proc, fd_array[fdi]);
1782 static int binder_fixup_parent(struct binder_transaction *t,
1783 struct binder_thread *thread,
1784 struct binder_buffer_object *bp,
1785 binder_size_t *off_start,
1786 binder_size_t num_valid,
1787 struct binder_buffer_object *last_fixup_obj,
1788 binder_size_t last_fixup_min_off)
1790 struct binder_buffer_object *parent;
1792 struct binder_buffer *b = t->buffer;
1793 struct binder_proc *proc = thread->proc;
1794 struct binder_proc *target_proc = t->to_proc;
1796 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1799 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1801 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1802 proc->pid, thread->pid);
1806 if (!binder_validate_fixup(b, off_start,
1807 parent, bp->parent_offset,
1809 last_fixup_min_off)) {
1810 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1811 proc->pid, thread->pid);
1815 if (parent->length < sizeof(binder_uintptr_t) ||
1816 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1817 /* No space for a pointer here! */
1818 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1819 proc->pid, thread->pid);
1822 parent_buffer = (u8 *)(parent->buffer -
1823 target_proc->user_buffer_offset);
1824 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1829 static void binder_transaction(struct binder_proc *proc,
1830 struct binder_thread *thread,
1831 struct binder_transaction_data *tr, int reply,
1832 binder_size_t extra_buffers_size)
1835 struct binder_transaction *t;
1836 struct binder_work *tcomplete;
1837 binder_size_t *offp, *off_end, *off_start;
1838 binder_size_t off_min;
1839 u8 *sg_bufp, *sg_buf_end;
1840 struct binder_proc *target_proc;
1841 struct binder_thread *target_thread = NULL;
1842 struct binder_node *target_node = NULL;
1843 struct list_head *target_list;
1844 wait_queue_head_t *target_wait;
1845 struct binder_transaction *in_reply_to = NULL;
1846 struct binder_transaction_log_entry *e;
1847 uint32_t return_error;
1848 struct binder_buffer_object *last_fixup_obj = NULL;
1849 binder_size_t last_fixup_min_off = 0;
1850 struct binder_context *context = proc->context;
1852 e = binder_transaction_log_add(&binder_transaction_log);
1853 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1854 e->from_proc = proc->pid;
1855 e->from_thread = thread->pid;
1856 e->target_handle = tr->target.handle;
1857 e->data_size = tr->data_size;
1858 e->offsets_size = tr->offsets_size;
1859 e->context_name = proc->context->name;
1862 in_reply_to = thread->transaction_stack;
1863 if (in_reply_to == NULL) {
1864 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1865 proc->pid, thread->pid);
1866 return_error = BR_FAILED_REPLY;
1867 goto err_empty_call_stack;
1869 binder_set_nice(in_reply_to->saved_priority);
1870 if (in_reply_to->to_thread != thread) {
1871 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1872 proc->pid, thread->pid, in_reply_to->debug_id,
1873 in_reply_to->to_proc ?
1874 in_reply_to->to_proc->pid : 0,
1875 in_reply_to->to_thread ?
1876 in_reply_to->to_thread->pid : 0);
1877 return_error = BR_FAILED_REPLY;
1879 goto err_bad_call_stack;
1881 thread->transaction_stack = in_reply_to->to_parent;
1882 target_thread = in_reply_to->from;
1883 if (target_thread == NULL) {
1884 return_error = BR_DEAD_REPLY;
1885 goto err_dead_binder;
1887 if (target_thread->transaction_stack != in_reply_to) {
1888 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1889 proc->pid, thread->pid,
1890 target_thread->transaction_stack ?
1891 target_thread->transaction_stack->debug_id : 0,
1892 in_reply_to->debug_id);
1893 return_error = BR_FAILED_REPLY;
1895 target_thread = NULL;
1896 goto err_dead_binder;
1898 target_proc = target_thread->proc;
1900 if (tr->target.handle) {
1901 struct binder_ref *ref;
1903 ref = binder_get_ref(proc, tr->target.handle, true);
1905 binder_user_error("%d:%d got transaction to invalid handle\n",
1906 proc->pid, thread->pid);
1907 return_error = BR_FAILED_REPLY;
1908 goto err_invalid_target_handle;
1910 target_node = ref->node;
1912 target_node = context->binder_context_mgr_node;
1913 if (target_node == NULL) {
1914 return_error = BR_DEAD_REPLY;
1915 goto err_no_context_mgr_node;
1918 e->to_node = target_node->debug_id;
1919 target_proc = target_node->proc;
1920 if (target_proc == NULL) {
1921 return_error = BR_DEAD_REPLY;
1922 goto err_dead_binder;
1924 if (security_binder_transaction(proc->tsk,
1925 target_proc->tsk) < 0) {
1926 return_error = BR_FAILED_REPLY;
1927 goto err_invalid_target_handle;
1929 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1930 struct binder_transaction *tmp;
1932 tmp = thread->transaction_stack;
1933 if (tmp->to_thread != thread) {
1934 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1935 proc->pid, thread->pid, tmp->debug_id,
1936 tmp->to_proc ? tmp->to_proc->pid : 0,
1938 tmp->to_thread->pid : 0);
1939 return_error = BR_FAILED_REPLY;
1940 goto err_bad_call_stack;
1943 if (tmp->from && tmp->from->proc == target_proc)
1944 target_thread = tmp->from;
1945 tmp = tmp->from_parent;
1949 if (target_thread) {
1950 e->to_thread = target_thread->pid;
1951 target_list = &target_thread->todo;
1952 target_wait = &target_thread->wait;
1954 target_list = &target_proc->todo;
1955 target_wait = &target_proc->wait;
1957 e->to_proc = target_proc->pid;
1959 /* TODO: reuse incoming transaction for reply */
1960 t = kzalloc(sizeof(*t), GFP_KERNEL);
1962 return_error = BR_FAILED_REPLY;
1963 goto err_alloc_t_failed;
1965 binder_stats_created(BINDER_STAT_TRANSACTION);
1967 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1968 if (tcomplete == NULL) {
1969 return_error = BR_FAILED_REPLY;
1970 goto err_alloc_tcomplete_failed;
1972 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1974 t->debug_id = ++binder_last_id;
1975 e->debug_id = t->debug_id;
1978 binder_debug(BINDER_DEBUG_TRANSACTION,
1979 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1980 proc->pid, thread->pid, t->debug_id,
1981 target_proc->pid, target_thread->pid,
1982 (u64)tr->data.ptr.buffer,
1983 (u64)tr->data.ptr.offsets,
1984 (u64)tr->data_size, (u64)tr->offsets_size,
1985 (u64)extra_buffers_size);
1987 binder_debug(BINDER_DEBUG_TRANSACTION,
1988 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1989 proc->pid, thread->pid, t->debug_id,
1990 target_proc->pid, target_node->debug_id,
1991 (u64)tr->data.ptr.buffer,
1992 (u64)tr->data.ptr.offsets,
1993 (u64)tr->data_size, (u64)tr->offsets_size,
1994 (u64)extra_buffers_size);
1996 if (!reply && !(tr->flags & TF_ONE_WAY))
2000 t->sender_euid = task_euid(proc->tsk);
2001 t->to_proc = target_proc;
2002 t->to_thread = target_thread;
2004 t->flags = tr->flags;
2005 t->priority = task_nice(current);
2007 trace_binder_transaction(reply, t, target_node);
2009 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
2010 tr->offsets_size, extra_buffers_size,
2011 !reply && (t->flags & TF_ONE_WAY));
2012 if (t->buffer == NULL) {
2013 return_error = BR_FAILED_REPLY;
2014 goto err_binder_alloc_buf_failed;
2016 t->buffer->allow_user_free = 0;
2017 t->buffer->debug_id = t->debug_id;
2018 t->buffer->transaction = t;
2019 t->buffer->target_node = target_node;
2020 trace_binder_transaction_alloc_buf(t->buffer);
2022 binder_inc_node(target_node, 1, 0, NULL);
2024 off_start = (binder_size_t *)(t->buffer->data +
2025 ALIGN(tr->data_size, sizeof(void *)));
2028 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2029 tr->data.ptr.buffer, tr->data_size)) {
2030 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2031 proc->pid, thread->pid);
2032 return_error = BR_FAILED_REPLY;
2033 goto err_copy_data_failed;
2035 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2036 tr->data.ptr.offsets, tr->offsets_size)) {
2037 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2038 proc->pid, thread->pid);
2039 return_error = BR_FAILED_REPLY;
2040 goto err_copy_data_failed;
2042 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2043 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2044 proc->pid, thread->pid, (u64)tr->offsets_size);
2045 return_error = BR_FAILED_REPLY;
2046 goto err_bad_offset;
2048 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2049 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2050 proc->pid, thread->pid,
2051 (u64)extra_buffers_size);
2052 return_error = BR_FAILED_REPLY;
2053 goto err_bad_offset;
2055 off_end = (void *)off_start + tr->offsets_size;
2056 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2057 sg_buf_end = sg_bufp + extra_buffers_size;
2059 for (; offp < off_end; offp++) {
2060 struct binder_object_header *hdr;
2061 size_t object_size = binder_validate_object(t->buffer, *offp);
2063 if (object_size == 0 || *offp < off_min) {
2064 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2065 proc->pid, thread->pid, (u64)*offp,
2067 (u64)t->buffer->data_size);
2068 return_error = BR_FAILED_REPLY;
2069 goto err_bad_offset;
2072 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2073 off_min = *offp + object_size;
2074 switch (hdr->type) {
2075 case BINDER_TYPE_BINDER:
2076 case BINDER_TYPE_WEAK_BINDER: {
2077 struct flat_binder_object *fp;
2079 fp = to_flat_binder_object(hdr);
2080 ret = binder_translate_binder(fp, t, thread);
2082 return_error = BR_FAILED_REPLY;
2083 goto err_translate_failed;
2086 case BINDER_TYPE_HANDLE:
2087 case BINDER_TYPE_WEAK_HANDLE: {
2088 struct flat_binder_object *fp;
2090 fp = to_flat_binder_object(hdr);
2091 ret = binder_translate_handle(fp, t, thread);
2093 return_error = BR_FAILED_REPLY;
2094 goto err_translate_failed;
2098 case BINDER_TYPE_FD: {
2099 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2100 int target_fd = binder_translate_fd(fp->fd, t, thread,
2103 if (target_fd < 0) {
2104 return_error = BR_FAILED_REPLY;
2105 goto err_translate_failed;
2110 case BINDER_TYPE_FDA: {
2111 struct binder_fd_array_object *fda =
2112 to_binder_fd_array_object(hdr);
2113 struct binder_buffer_object *parent =
2114 binder_validate_ptr(t->buffer, fda->parent,
2118 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2119 proc->pid, thread->pid);
2120 return_error = BR_FAILED_REPLY;
2121 goto err_bad_parent;
2123 if (!binder_validate_fixup(t->buffer, off_start,
2124 parent, fda->parent_offset,
2126 last_fixup_min_off)) {
2127 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2128 proc->pid, thread->pid);
2129 return_error = BR_FAILED_REPLY;
2130 goto err_bad_parent;
2132 ret = binder_translate_fd_array(fda, parent, t, thread,
2135 return_error = BR_FAILED_REPLY;
2136 goto err_translate_failed;
2138 last_fixup_obj = parent;
2139 last_fixup_min_off =
2140 fda->parent_offset + sizeof(u32) * fda->num_fds;
2142 case BINDER_TYPE_PTR: {
2143 struct binder_buffer_object *bp =
2144 to_binder_buffer_object(hdr);
2145 size_t buf_left = sg_buf_end - sg_bufp;
2147 if (bp->length > buf_left) {
2148 binder_user_error("%d:%d got transaction with too large buffer\n",
2149 proc->pid, thread->pid);
2150 return_error = BR_FAILED_REPLY;
2151 goto err_bad_offset;
2153 if (copy_from_user(sg_bufp,
2154 (const void __user *)(uintptr_t)
2155 bp->buffer, bp->length)) {
2156 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2157 proc->pid, thread->pid);
2158 return_error = BR_FAILED_REPLY;
2159 goto err_copy_data_failed;
2161 /* Fixup buffer pointer to target proc address space */
2162 bp->buffer = (uintptr_t)sg_bufp +
2163 target_proc->user_buffer_offset;
2164 sg_bufp += ALIGN(bp->length, sizeof(u64));
2166 ret = binder_fixup_parent(t, thread, bp, off_start,
2169 last_fixup_min_off);
2171 return_error = BR_FAILED_REPLY;
2172 goto err_translate_failed;
2174 last_fixup_obj = bp;
2175 last_fixup_min_off = 0;
2178 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2179 proc->pid, thread->pid, hdr->type);
2180 return_error = BR_FAILED_REPLY;
2181 goto err_bad_object_type;
2185 BUG_ON(t->buffer->async_transaction != 0);
2186 binder_pop_transaction(target_thread, in_reply_to);
2187 } else if (!(t->flags & TF_ONE_WAY)) {
2188 BUG_ON(t->buffer->async_transaction != 0);
2190 t->from_parent = thread->transaction_stack;
2191 thread->transaction_stack = t;
2193 BUG_ON(target_node == NULL);
2194 BUG_ON(t->buffer->async_transaction != 1);
2195 if (target_node->has_async_transaction) {
2196 target_list = &target_node->async_todo;
2199 target_node->has_async_transaction = 1;
2201 t->work.type = BINDER_WORK_TRANSACTION;
2202 list_add_tail(&t->work.entry, target_list);
2203 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2204 list_add_tail(&tcomplete->entry, &thread->todo);
2206 wake_up_interruptible(target_wait);
2209 err_translate_failed:
2210 err_bad_object_type:
2213 err_copy_data_failed:
2214 trace_binder_transaction_failed_buffer_release(t->buffer);
2215 binder_transaction_buffer_release(target_proc, t->buffer, offp);
2216 t->buffer->transaction = NULL;
2217 binder_free_buf(target_proc, t->buffer);
2218 err_binder_alloc_buf_failed:
2220 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2221 err_alloc_tcomplete_failed:
2223 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2226 err_empty_call_stack:
2228 err_invalid_target_handle:
2229 err_no_context_mgr_node:
2230 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2231 "%d:%d transaction failed %d, size %lld-%lld\n",
2232 proc->pid, thread->pid, return_error,
2233 (u64)tr->data_size, (u64)tr->offsets_size);
2236 struct binder_transaction_log_entry *fe;
2238 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2242 BUG_ON(thread->return_error != BR_OK);
2244 thread->return_error = BR_TRANSACTION_COMPLETE;
2245 binder_send_failed_reply(in_reply_to, return_error);
2247 thread->return_error = return_error;
2250 static int binder_thread_write(struct binder_proc *proc,
2251 struct binder_thread *thread,
2252 binder_uintptr_t binder_buffer, size_t size,
2253 binder_size_t *consumed)
2256 struct binder_context *context = proc->context;
2257 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2258 void __user *ptr = buffer + *consumed;
2259 void __user *end = buffer + size;
2261 while (ptr < end && thread->return_error == BR_OK) {
2262 if (get_user(cmd, (uint32_t __user *)ptr))
2264 ptr += sizeof(uint32_t);
2265 trace_binder_command(cmd);
2266 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
2267 binder_stats.bc[_IOC_NR(cmd)]++;
2268 proc->stats.bc[_IOC_NR(cmd)]++;
2269 thread->stats.bc[_IOC_NR(cmd)]++;
2277 struct binder_ref *ref;
2278 const char *debug_string;
2280 if (get_user(target, (uint32_t __user *)ptr))
2282 ptr += sizeof(uint32_t);
2283 if (target == 0 && context->binder_context_mgr_node &&
2284 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
2285 ref = binder_get_ref_for_node(proc,
2286 context->binder_context_mgr_node);
2287 if (ref->desc != target) {
2288 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2289 proc->pid, thread->pid,
2293 ref = binder_get_ref(proc, target,
2294 cmd == BC_ACQUIRE ||
2297 binder_user_error("%d:%d refcount change on invalid ref %d\n",
2298 proc->pid, thread->pid, target);
2303 debug_string = "IncRefs";
2304 binder_inc_ref(ref, 0, NULL);
2307 debug_string = "Acquire";
2308 binder_inc_ref(ref, 1, NULL);
2311 debug_string = "Release";
2312 binder_dec_ref(ref, 1);
2316 debug_string = "DecRefs";
2317 binder_dec_ref(ref, 0);
2320 binder_debug(BINDER_DEBUG_USER_REFS,
2321 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
2322 proc->pid, thread->pid, debug_string, ref->debug_id,
2323 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
2326 case BC_INCREFS_DONE:
2327 case BC_ACQUIRE_DONE: {
2328 binder_uintptr_t node_ptr;
2329 binder_uintptr_t cookie;
2330 struct binder_node *node;
2332 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
2334 ptr += sizeof(binder_uintptr_t);
2335 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2337 ptr += sizeof(binder_uintptr_t);
2338 node = binder_get_node(proc, node_ptr);
2340 binder_user_error("%d:%d %s u%016llx no match\n",
2341 proc->pid, thread->pid,
2342 cmd == BC_INCREFS_DONE ?
2348 if (cookie != node->cookie) {
2349 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2350 proc->pid, thread->pid,
2351 cmd == BC_INCREFS_DONE ?
2352 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2353 (u64)node_ptr, node->debug_id,
2354 (u64)cookie, (u64)node->cookie);
2357 if (cmd == BC_ACQUIRE_DONE) {
2358 if (node->pending_strong_ref == 0) {
2359 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2360 proc->pid, thread->pid,
2364 node->pending_strong_ref = 0;
2366 if (node->pending_weak_ref == 0) {
2367 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2368 proc->pid, thread->pid,
2372 node->pending_weak_ref = 0;
2374 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2375 binder_debug(BINDER_DEBUG_USER_REFS,
2376 "%d:%d %s node %d ls %d lw %d\n",
2377 proc->pid, thread->pid,
2378 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2379 node->debug_id, node->local_strong_refs, node->local_weak_refs);
2382 case BC_ATTEMPT_ACQUIRE:
2383 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2385 case BC_ACQUIRE_RESULT:
2386 pr_err("BC_ACQUIRE_RESULT not supported\n");
2389 case BC_FREE_BUFFER: {
2390 binder_uintptr_t data_ptr;
2391 struct binder_buffer *buffer;
2393 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2395 ptr += sizeof(binder_uintptr_t);
2397 buffer = binder_buffer_lookup(proc, data_ptr);
2398 if (buffer == NULL) {
2399 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2400 proc->pid, thread->pid, (u64)data_ptr);
2403 if (!buffer->allow_user_free) {
2404 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2405 proc->pid, thread->pid, (u64)data_ptr);
2408 binder_debug(BINDER_DEBUG_FREE_BUFFER,
2409 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2410 proc->pid, thread->pid, (u64)data_ptr,
2412 buffer->transaction ? "active" : "finished");
2414 if (buffer->transaction) {
2415 buffer->transaction->buffer = NULL;
2416 buffer->transaction = NULL;
2418 if (buffer->async_transaction && buffer->target_node) {
2419 BUG_ON(!buffer->target_node->has_async_transaction);
2420 if (list_empty(&buffer->target_node->async_todo))
2421 buffer->target_node->has_async_transaction = 0;
2423 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2425 trace_binder_transaction_buffer_release(buffer);
2426 binder_transaction_buffer_release(proc, buffer, NULL);
2427 binder_free_buf(proc, buffer);
2431 case BC_TRANSACTION_SG:
2433 struct binder_transaction_data_sg tr;
2435 if (copy_from_user(&tr, ptr, sizeof(tr)))
2438 binder_transaction(proc, thread, &tr.transaction_data,
2439 cmd == BC_REPLY_SG, tr.buffers_size);
2442 case BC_TRANSACTION:
2444 struct binder_transaction_data tr;
2446 if (copy_from_user(&tr, ptr, sizeof(tr)))
2449 binder_transaction(proc, thread, &tr,
2450 cmd == BC_REPLY, 0);
2454 case BC_REGISTER_LOOPER:
2455 binder_debug(BINDER_DEBUG_THREADS,
2456 "%d:%d BC_REGISTER_LOOPER\n",
2457 proc->pid, thread->pid);
2458 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2459 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2460 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2461 proc->pid, thread->pid);
2462 } else if (proc->requested_threads == 0) {
2463 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2464 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2465 proc->pid, thread->pid);
2467 proc->requested_threads--;
2468 proc->requested_threads_started++;
2470 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2472 case BC_ENTER_LOOPER:
2473 binder_debug(BINDER_DEBUG_THREADS,
2474 "%d:%d BC_ENTER_LOOPER\n",
2475 proc->pid, thread->pid);
2476 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2477 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2478 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2479 proc->pid, thread->pid);
2481 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2483 case BC_EXIT_LOOPER:
2484 binder_debug(BINDER_DEBUG_THREADS,
2485 "%d:%d BC_EXIT_LOOPER\n",
2486 proc->pid, thread->pid);
2487 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2490 case BC_REQUEST_DEATH_NOTIFICATION:
2491 case BC_CLEAR_DEATH_NOTIFICATION: {
2493 binder_uintptr_t cookie;
2494 struct binder_ref *ref;
2495 struct binder_ref_death *death;
2497 if (get_user(target, (uint32_t __user *)ptr))
2499 ptr += sizeof(uint32_t);
2500 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2502 ptr += sizeof(binder_uintptr_t);
2503 ref = binder_get_ref(proc, target, false);
2505 binder_user_error("%d:%d %s invalid ref %d\n",
2506 proc->pid, thread->pid,
2507 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2508 "BC_REQUEST_DEATH_NOTIFICATION" :
2509 "BC_CLEAR_DEATH_NOTIFICATION",
2514 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2515 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2516 proc->pid, thread->pid,
2517 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2518 "BC_REQUEST_DEATH_NOTIFICATION" :
2519 "BC_CLEAR_DEATH_NOTIFICATION",
2520 (u64)cookie, ref->debug_id, ref->desc,
2521 ref->strong, ref->weak, ref->node->debug_id);
2523 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2525 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2526 proc->pid, thread->pid);
2529 death = kzalloc(sizeof(*death), GFP_KERNEL);
2530 if (death == NULL) {
2531 thread->return_error = BR_ERROR;
2532 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2533 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2534 proc->pid, thread->pid);
2537 binder_stats_created(BINDER_STAT_DEATH);
2538 INIT_LIST_HEAD(&death->work.entry);
2539 death->cookie = cookie;
2541 if (ref->node->proc == NULL) {
2542 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2543 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2544 list_add_tail(&ref->death->work.entry, &thread->todo);
2546 list_add_tail(&ref->death->work.entry, &proc->todo);
2547 wake_up_interruptible(&proc->wait);
2551 if (ref->death == NULL) {
2552 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2553 proc->pid, thread->pid);
2557 if (death->cookie != cookie) {
2558 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2559 proc->pid, thread->pid,
2565 if (list_empty(&death->work.entry)) {
2566 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2567 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2568 list_add_tail(&death->work.entry, &thread->todo);
2570 list_add_tail(&death->work.entry, &proc->todo);
2571 wake_up_interruptible(&proc->wait);
2574 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2575 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2579 case BC_DEAD_BINDER_DONE: {
2580 struct binder_work *w;
2581 binder_uintptr_t cookie;
2582 struct binder_ref_death *death = NULL;
2584 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2587 ptr += sizeof(cookie);
2588 list_for_each_entry(w, &proc->delivered_death, entry) {
2589 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2591 if (tmp_death->cookie == cookie) {
2596 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2597 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2598 proc->pid, thread->pid, (u64)cookie,
2600 if (death == NULL) {
2601 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2602 proc->pid, thread->pid, (u64)cookie);
2606 list_del_init(&death->work.entry);
2607 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2608 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2609 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2610 list_add_tail(&death->work.entry, &thread->todo);
2612 list_add_tail(&death->work.entry, &proc->todo);
2613 wake_up_interruptible(&proc->wait);
2619 pr_err("%d:%d unknown command %d\n",
2620 proc->pid, thread->pid, cmd);
2623 *consumed = ptr - buffer;
2628 static void binder_stat_br(struct binder_proc *proc,
2629 struct binder_thread *thread, uint32_t cmd)
2631 trace_binder_return(cmd);
2632 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2633 binder_stats.br[_IOC_NR(cmd)]++;
2634 proc->stats.br[_IOC_NR(cmd)]++;
2635 thread->stats.br[_IOC_NR(cmd)]++;
2639 static int binder_has_proc_work(struct binder_proc *proc,
2640 struct binder_thread *thread)
2642 return !list_empty(&proc->todo) ||
2643 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2646 static int binder_has_thread_work(struct binder_thread *thread)
2648 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2649 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2652 static int binder_thread_read(struct binder_proc *proc,
2653 struct binder_thread *thread,
2654 binder_uintptr_t binder_buffer, size_t size,
2655 binder_size_t *consumed, int non_block)
2657 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2658 void __user *ptr = buffer + *consumed;
2659 void __user *end = buffer + size;
2662 int wait_for_proc_work;
2664 if (*consumed == 0) {
2665 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2667 ptr += sizeof(uint32_t);
2671 wait_for_proc_work = thread->transaction_stack == NULL &&
2672 list_empty(&thread->todo);
2674 if (thread->return_error != BR_OK && ptr < end) {
2675 if (thread->return_error2 != BR_OK) {
2676 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2678 ptr += sizeof(uint32_t);
2679 binder_stat_br(proc, thread, thread->return_error2);
2682 thread->return_error2 = BR_OK;
2684 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2686 ptr += sizeof(uint32_t);
2687 binder_stat_br(proc, thread, thread->return_error);
2688 thread->return_error = BR_OK;
2693 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2694 if (wait_for_proc_work)
2695 proc->ready_threads++;
2697 binder_unlock(__func__);
2699 trace_binder_wait_for_work(wait_for_proc_work,
2700 !!thread->transaction_stack,
2701 !list_empty(&thread->todo));
2702 if (wait_for_proc_work) {
2703 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2704 BINDER_LOOPER_STATE_ENTERED))) {
2705 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2706 proc->pid, thread->pid, thread->looper);
2707 wait_event_interruptible(binder_user_error_wait,
2708 binder_stop_on_user_error < 2);
2710 binder_set_nice(proc->default_priority);
2712 if (!binder_has_proc_work(proc, thread))
2715 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2718 if (!binder_has_thread_work(thread))
2721 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2724 binder_lock(__func__);
2726 if (wait_for_proc_work)
2727 proc->ready_threads--;
2728 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2735 struct binder_transaction_data tr;
2736 struct binder_work *w;
2737 struct binder_transaction *t = NULL;
2739 if (!list_empty(&thread->todo)) {
2740 w = list_first_entry(&thread->todo, struct binder_work,
2742 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2743 w = list_first_entry(&proc->todo, struct binder_work,
2747 if (ptr - buffer == 4 &&
2748 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2753 if (end - ptr < sizeof(tr) + 4)
2757 case BINDER_WORK_TRANSACTION: {
2758 t = container_of(w, struct binder_transaction, work);
2760 case BINDER_WORK_TRANSACTION_COMPLETE: {
2761 cmd = BR_TRANSACTION_COMPLETE;
2762 if (put_user(cmd, (uint32_t __user *)ptr))
2764 ptr += sizeof(uint32_t);
2766 binder_stat_br(proc, thread, cmd);
2767 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2768 "%d:%d BR_TRANSACTION_COMPLETE\n",
2769 proc->pid, thread->pid);
2771 list_del(&w->entry);
2773 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2775 case BINDER_WORK_NODE: {
2776 struct binder_node *node = container_of(w, struct binder_node, work);
2777 uint32_t cmd = BR_NOOP;
2778 const char *cmd_name;
2779 int strong = node->internal_strong_refs || node->local_strong_refs;
2780 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2782 if (weak && !node->has_weak_ref) {
2784 cmd_name = "BR_INCREFS";
2785 node->has_weak_ref = 1;
2786 node->pending_weak_ref = 1;
2787 node->local_weak_refs++;
2788 } else if (strong && !node->has_strong_ref) {
2790 cmd_name = "BR_ACQUIRE";
2791 node->has_strong_ref = 1;
2792 node->pending_strong_ref = 1;
2793 node->local_strong_refs++;
2794 } else if (!strong && node->has_strong_ref) {
2796 cmd_name = "BR_RELEASE";
2797 node->has_strong_ref = 0;
2798 } else if (!weak && node->has_weak_ref) {
2800 cmd_name = "BR_DECREFS";
2801 node->has_weak_ref = 0;
2803 if (cmd != BR_NOOP) {
2804 if (put_user(cmd, (uint32_t __user *)ptr))
2806 ptr += sizeof(uint32_t);
2807 if (put_user(node->ptr,
2808 (binder_uintptr_t __user *)ptr))
2810 ptr += sizeof(binder_uintptr_t);
2811 if (put_user(node->cookie,
2812 (binder_uintptr_t __user *)ptr))
2814 ptr += sizeof(binder_uintptr_t);
2816 binder_stat_br(proc, thread, cmd);
2817 binder_debug(BINDER_DEBUG_USER_REFS,
2818 "%d:%d %s %d u%016llx c%016llx\n",
2819 proc->pid, thread->pid, cmd_name,
2821 (u64)node->ptr, (u64)node->cookie);
2823 list_del_init(&w->entry);
2824 if (!weak && !strong) {
2825 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2826 "%d:%d node %d u%016llx c%016llx deleted\n",
2827 proc->pid, thread->pid,
2831 rb_erase(&node->rb_node, &proc->nodes);
2833 binder_stats_deleted(BINDER_STAT_NODE);
2835 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2836 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2837 proc->pid, thread->pid,
2844 case BINDER_WORK_DEAD_BINDER:
2845 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2846 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2847 struct binder_ref_death *death;
2850 death = container_of(w, struct binder_ref_death, work);
2851 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2852 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2854 cmd = BR_DEAD_BINDER;
2855 if (put_user(cmd, (uint32_t __user *)ptr))
2857 ptr += sizeof(uint32_t);
2858 if (put_user(death->cookie,
2859 (binder_uintptr_t __user *)ptr))
2861 ptr += sizeof(binder_uintptr_t);
2862 binder_stat_br(proc, thread, cmd);
2863 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2864 "%d:%d %s %016llx\n",
2865 proc->pid, thread->pid,
2866 cmd == BR_DEAD_BINDER ?
2868 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2869 (u64)death->cookie);
2871 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2872 list_del(&w->entry);
2874 binder_stats_deleted(BINDER_STAT_DEATH);
2876 list_move(&w->entry, &proc->delivered_death);
2877 if (cmd == BR_DEAD_BINDER)
2878 goto done; /* DEAD_BINDER notifications can cause transactions */
2885 BUG_ON(t->buffer == NULL);
2886 if (t->buffer->target_node) {
2887 struct binder_node *target_node = t->buffer->target_node;
2889 tr.target.ptr = target_node->ptr;
2890 tr.cookie = target_node->cookie;
2891 t->saved_priority = task_nice(current);
2892 if (t->priority < target_node->min_priority &&
2893 !(t->flags & TF_ONE_WAY))
2894 binder_set_nice(t->priority);
2895 else if (!(t->flags & TF_ONE_WAY) ||
2896 t->saved_priority > target_node->min_priority)
2897 binder_set_nice(target_node->min_priority);
2898 cmd = BR_TRANSACTION;
2905 tr.flags = t->flags;
2906 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2909 struct task_struct *sender = t->from->proc->tsk;
2911 tr.sender_pid = task_tgid_nr_ns(sender,
2912 task_active_pid_ns(current));
2917 tr.data_size = t->buffer->data_size;
2918 tr.offsets_size = t->buffer->offsets_size;
2919 tr.data.ptr.buffer = (binder_uintptr_t)(
2920 (uintptr_t)t->buffer->data +
2921 proc->user_buffer_offset);
2922 tr.data.ptr.offsets = tr.data.ptr.buffer +
2923 ALIGN(t->buffer->data_size,
2926 if (put_user(cmd, (uint32_t __user *)ptr))
2928 ptr += sizeof(uint32_t);
2929 if (copy_to_user(ptr, &tr, sizeof(tr)))
2933 trace_binder_transaction_received(t);
2934 binder_stat_br(proc, thread, cmd);
2935 binder_debug(BINDER_DEBUG_TRANSACTION,
2936 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2937 proc->pid, thread->pid,
2938 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2940 t->debug_id, t->from ? t->from->proc->pid : 0,
2941 t->from ? t->from->pid : 0, cmd,
2942 t->buffer->data_size, t->buffer->offsets_size,
2943 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2945 list_del(&t->work.entry);
2946 t->buffer->allow_user_free = 1;
2947 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2948 t->to_parent = thread->transaction_stack;
2949 t->to_thread = thread;
2950 thread->transaction_stack = t;
2952 t->buffer->transaction = NULL;
2954 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2961 *consumed = ptr - buffer;
2962 if (proc->requested_threads + proc->ready_threads == 0 &&
2963 proc->requested_threads_started < proc->max_threads &&
2964 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2965 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2966 /*spawn a new thread if we leave this out */) {
2967 proc->requested_threads++;
2968 binder_debug(BINDER_DEBUG_THREADS,
2969 "%d:%d BR_SPAWN_LOOPER\n",
2970 proc->pid, thread->pid);
2971 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2973 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2978 static void binder_release_work(struct list_head *list)
2980 struct binder_work *w;
2982 while (!list_empty(list)) {
2983 w = list_first_entry(list, struct binder_work, entry);
2984 list_del_init(&w->entry);
2986 case BINDER_WORK_TRANSACTION: {
2987 struct binder_transaction *t;
2989 t = container_of(w, struct binder_transaction, work);
2990 if (t->buffer->target_node &&
2991 !(t->flags & TF_ONE_WAY)) {
2992 binder_send_failed_reply(t, BR_DEAD_REPLY);
2994 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2995 "undelivered transaction %d\n",
2997 t->buffer->transaction = NULL;
2999 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3002 case BINDER_WORK_TRANSACTION_COMPLETE: {
3003 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3004 "undelivered TRANSACTION_COMPLETE\n");
3006 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3008 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3009 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3010 struct binder_ref_death *death;
3012 death = container_of(w, struct binder_ref_death, work);
3013 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3014 "undelivered death notification, %016llx\n",
3015 (u64)death->cookie);
3017 binder_stats_deleted(BINDER_STAT_DEATH);
3020 pr_err("unexpected work type, %d, not freed\n",
3028 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3030 struct binder_thread *thread = NULL;
3031 struct rb_node *parent = NULL;
3032 struct rb_node **p = &proc->threads.rb_node;
3036 thread = rb_entry(parent, struct binder_thread, rb_node);
3038 if (current->pid < thread->pid)
3040 else if (current->pid > thread->pid)
3041 p = &(*p)->rb_right;
3046 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3049 binder_stats_created(BINDER_STAT_THREAD);
3050 thread->proc = proc;
3051 thread->pid = current->pid;
3052 init_waitqueue_head(&thread->wait);
3053 INIT_LIST_HEAD(&thread->todo);
3054 rb_link_node(&thread->rb_node, parent, p);
3055 rb_insert_color(&thread->rb_node, &proc->threads);
3056 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3057 thread->return_error = BR_OK;
3058 thread->return_error2 = BR_OK;
3063 static int binder_free_thread(struct binder_proc *proc,
3064 struct binder_thread *thread)
3066 struct binder_transaction *t;
3067 struct binder_transaction *send_reply = NULL;
3068 int active_transactions = 0;
3070 rb_erase(&thread->rb_node, &proc->threads);
3071 t = thread->transaction_stack;
3072 if (t && t->to_thread == thread)
3075 active_transactions++;
3076 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3077 "release %d:%d transaction %d %s, still active\n",
3078 proc->pid, thread->pid,
3080 (t->to_thread == thread) ? "in" : "out");
3082 if (t->to_thread == thread) {
3084 t->to_thread = NULL;
3086 t->buffer->transaction = NULL;
3090 } else if (t->from == thread) {
3097 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3098 binder_release_work(&thread->todo);
3100 binder_stats_deleted(BINDER_STAT_THREAD);
3101 return active_transactions;
3104 static unsigned int binder_poll(struct file *filp,
3105 struct poll_table_struct *wait)
3107 struct binder_proc *proc = filp->private_data;
3108 struct binder_thread *thread = NULL;
3109 int wait_for_proc_work;
3111 binder_lock(__func__);
3113 thread = binder_get_thread(proc);
3115 wait_for_proc_work = thread->transaction_stack == NULL &&
3116 list_empty(&thread->todo) && thread->return_error == BR_OK;
3118 binder_unlock(__func__);
3120 if (wait_for_proc_work) {
3121 if (binder_has_proc_work(proc, thread))
3123 poll_wait(filp, &proc->wait, wait);
3124 if (binder_has_proc_work(proc, thread))
3127 if (binder_has_thread_work(thread))
3129 poll_wait(filp, &thread->wait, wait);
3130 if (binder_has_thread_work(thread))
3136 static int binder_ioctl_write_read(struct file *filp,
3137 unsigned int cmd, unsigned long arg,
3138 struct binder_thread *thread)
3141 struct binder_proc *proc = filp->private_data;
3142 unsigned int size = _IOC_SIZE(cmd);
3143 void __user *ubuf = (void __user *)arg;
3144 struct binder_write_read bwr;
3146 if (size != sizeof(struct binder_write_read)) {
3150 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3154 binder_debug(BINDER_DEBUG_READ_WRITE,
3155 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3156 proc->pid, thread->pid,
3157 (u64)bwr.write_size, (u64)bwr.write_buffer,
3158 (u64)bwr.read_size, (u64)bwr.read_buffer);
3160 if (bwr.write_size > 0) {
3161 ret = binder_thread_write(proc, thread,
3164 &bwr.write_consumed);
3165 trace_binder_write_done(ret);
3167 bwr.read_consumed = 0;
3168 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3173 if (bwr.read_size > 0) {
3174 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3177 filp->f_flags & O_NONBLOCK);
3178 trace_binder_read_done(ret);
3179 if (!list_empty(&proc->todo))
3180 wake_up_interruptible(&proc->wait);
3182 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3187 binder_debug(BINDER_DEBUG_READ_WRITE,
3188 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3189 proc->pid, thread->pid,
3190 (u64)bwr.write_consumed, (u64)bwr.write_size,
3191 (u64)bwr.read_consumed, (u64)bwr.read_size);
3192 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3200 static int binder_ioctl_set_ctx_mgr(struct file *filp)
3203 struct binder_proc *proc = filp->private_data;
3204 struct binder_context *context = proc->context;
3206 kuid_t curr_euid = current_euid();
3208 if (context->binder_context_mgr_node) {
3209 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3213 ret = security_binder_set_context_mgr(proc->tsk);
3216 if (uid_valid(context->binder_context_mgr_uid)) {
3217 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
3218 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3219 from_kuid(&init_user_ns, curr_euid),
3220 from_kuid(&init_user_ns,
3221 context->binder_context_mgr_uid));
3226 context->binder_context_mgr_uid = curr_euid;
3228 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
3229 if (!context->binder_context_mgr_node) {
3233 context->binder_context_mgr_node->local_weak_refs++;
3234 context->binder_context_mgr_node->local_strong_refs++;
3235 context->binder_context_mgr_node->has_strong_ref = 1;
3236 context->binder_context_mgr_node->has_weak_ref = 1;
3241 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3244 struct binder_proc *proc = filp->private_data;
3245 struct binder_thread *thread;
3246 unsigned int size = _IOC_SIZE(cmd);
3247 void __user *ubuf = (void __user *)arg;
3249 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3250 proc->pid, current->pid, cmd, arg);*/
3252 trace_binder_ioctl(cmd, arg);
3254 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3258 binder_lock(__func__);
3259 thread = binder_get_thread(proc);
3260 if (thread == NULL) {
3266 case BINDER_WRITE_READ:
3267 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3271 case BINDER_SET_MAX_THREADS:
3272 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3277 case BINDER_SET_CONTEXT_MGR:
3278 ret = binder_ioctl_set_ctx_mgr(filp);
3282 case BINDER_THREAD_EXIT:
3283 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
3284 proc->pid, thread->pid);
3285 binder_free_thread(proc, thread);
3288 case BINDER_VERSION: {
3289 struct binder_version __user *ver = ubuf;
3291 if (size != sizeof(struct binder_version)) {
3295 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3296 &ver->protocol_version)) {
3309 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
3310 binder_unlock(__func__);
3311 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3312 if (ret && ret != -ERESTARTSYS)
3313 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
3315 trace_binder_ioctl_done(ret);
3319 static void binder_vma_open(struct vm_area_struct *vma)
3321 struct binder_proc *proc = vma->vm_private_data;
3323 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3324 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3325 proc->pid, vma->vm_start, vma->vm_end,
3326 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3327 (unsigned long)pgprot_val(vma->vm_page_prot));
3330 static void binder_vma_close(struct vm_area_struct *vma)
3332 struct binder_proc *proc = vma->vm_private_data;
3334 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3335 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3336 proc->pid, vma->vm_start, vma->vm_end,
3337 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3338 (unsigned long)pgprot_val(vma->vm_page_prot));
3340 proc->vma_vm_mm = NULL;
3341 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3344 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3346 return VM_FAULT_SIGBUS;
3349 static const struct vm_operations_struct binder_vm_ops = {
3350 .open = binder_vma_open,
3351 .close = binder_vma_close,
3352 .fault = binder_vm_fault,
3355 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3358 struct vm_struct *area;
3359 struct binder_proc *proc = filp->private_data;
3360 const char *failure_string;
3361 struct binder_buffer *buffer;
3363 if (proc->tsk != current)
3366 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3367 vma->vm_end = vma->vm_start + SZ_4M;
3369 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3370 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3371 proc->pid, vma->vm_start, vma->vm_end,
3372 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3373 (unsigned long)pgprot_val(vma->vm_page_prot));
3375 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3377 failure_string = "bad vm_flags";
3380 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3382 mutex_lock(&binder_mmap_lock);
3385 failure_string = "already mapped";
3386 goto err_already_mapped;
3389 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
3392 failure_string = "get_vm_area";
3393 goto err_get_vm_area_failed;
3395 proc->buffer = area->addr;
3396 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
3397 mutex_unlock(&binder_mmap_lock);
3399 #ifdef CONFIG_CPU_CACHE_VIPT
3400 if (cache_is_vipt_aliasing()) {
3401 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
3402 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
3403 vma->vm_start += PAGE_SIZE;
3407 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
3408 if (proc->pages == NULL) {
3410 failure_string = "alloc page array";
3411 goto err_alloc_pages_failed;
3413 proc->buffer_size = vma->vm_end - vma->vm_start;
3415 vma->vm_ops = &binder_vm_ops;
3416 vma->vm_private_data = proc;
3418 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
3420 failure_string = "alloc small buf";
3421 goto err_alloc_small_buf_failed;
3423 buffer = proc->buffer;
3424 INIT_LIST_HEAD(&proc->buffers);
3425 list_add(&buffer->entry, &proc->buffers);
3427 binder_insert_free_buffer(proc, buffer);
3428 proc->free_async_space = proc->buffer_size / 2;
3430 proc->files = get_files_struct(current);
3432 proc->vma_vm_mm = vma->vm_mm;
3434 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
3435 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3438 err_alloc_small_buf_failed:
3441 err_alloc_pages_failed:
3442 mutex_lock(&binder_mmap_lock);
3443 vfree(proc->buffer);
3444 proc->buffer = NULL;
3445 err_get_vm_area_failed:
3447 mutex_unlock(&binder_mmap_lock);
3449 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3450 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3454 static int binder_open(struct inode *nodp, struct file *filp)
3456 struct binder_proc *proc;
3457 struct binder_device *binder_dev;
3459 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3460 current->group_leader->pid, current->pid);
3462 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3465 get_task_struct(current);
3466 proc->tsk = current;
3467 INIT_LIST_HEAD(&proc->todo);
3468 init_waitqueue_head(&proc->wait);
3469 proc->default_priority = task_nice(current);
3470 binder_dev = container_of(filp->private_data, struct binder_device,
3472 proc->context = &binder_dev->context;
3474 binder_lock(__func__);
3476 binder_stats_created(BINDER_STAT_PROC);
3477 hlist_add_head(&proc->proc_node, &binder_procs);
3478 proc->pid = current->group_leader->pid;
3479 INIT_LIST_HEAD(&proc->delivered_death);
3480 filp->private_data = proc;
3482 binder_unlock(__func__);
3484 if (binder_debugfs_dir_entry_proc) {
3487 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3489 * proc debug entries are shared between contexts, so
3490 * this will fail if the process tries to open the driver
3491 * again with a different context. The priting code will
3492 * anyway print all contexts that a given PID has, so this
3495 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3496 binder_debugfs_dir_entry_proc,
3497 (void *)(unsigned long)proc->pid,
3504 static int binder_flush(struct file *filp, fl_owner_t id)
3506 struct binder_proc *proc = filp->private_data;
3508 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3513 static void binder_deferred_flush(struct binder_proc *proc)
3518 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3519 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3521 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3522 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3523 wake_up_interruptible(&thread->wait);
3527 wake_up_interruptible_all(&proc->wait);
3529 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3530 "binder_flush: %d woke %d threads\n", proc->pid,
3534 static int binder_release(struct inode *nodp, struct file *filp)
3536 struct binder_proc *proc = filp->private_data;
3538 debugfs_remove(proc->debugfs_entry);
3539 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3544 static int binder_node_release(struct binder_node *node, int refs)
3546 struct binder_ref *ref;
3549 list_del_init(&node->work.entry);
3550 binder_release_work(&node->async_todo);
3552 if (hlist_empty(&node->refs)) {
3554 binder_stats_deleted(BINDER_STAT_NODE);
3560 node->local_strong_refs = 0;
3561 node->local_weak_refs = 0;
3562 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3564 hlist_for_each_entry(ref, &node->refs, node_entry) {
3572 if (list_empty(&ref->death->work.entry)) {
3573 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3574 list_add_tail(&ref->death->work.entry,
3576 wake_up_interruptible(&ref->proc->wait);
3581 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3582 "node %d now dead, refs %d, death %d\n",
3583 node->debug_id, refs, death);
3588 static void binder_deferred_release(struct binder_proc *proc)
3590 struct binder_transaction *t;
3591 struct binder_context *context = proc->context;
3593 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3594 active_transactions, page_count;
3597 BUG_ON(proc->files);
3599 hlist_del(&proc->proc_node);
3601 if (context->binder_context_mgr_node &&
3602 context->binder_context_mgr_node->proc == proc) {
3603 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3604 "%s: %d context_mgr_node gone\n",
3605 __func__, proc->pid);
3606 context->binder_context_mgr_node = NULL;
3610 active_transactions = 0;
3611 while ((n = rb_first(&proc->threads))) {
3612 struct binder_thread *thread;
3614 thread = rb_entry(n, struct binder_thread, rb_node);
3616 active_transactions += binder_free_thread(proc, thread);
3621 while ((n = rb_first(&proc->nodes))) {
3622 struct binder_node *node;
3624 node = rb_entry(n, struct binder_node, rb_node);
3626 rb_erase(&node->rb_node, &proc->nodes);
3627 incoming_refs = binder_node_release(node, incoming_refs);
3631 while ((n = rb_first(&proc->refs_by_desc))) {
3632 struct binder_ref *ref;
3634 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3636 binder_delete_ref(ref);
3639 binder_release_work(&proc->todo);
3640 binder_release_work(&proc->delivered_death);
3643 while ((n = rb_first(&proc->allocated_buffers))) {
3644 struct binder_buffer *buffer;
3646 buffer = rb_entry(n, struct binder_buffer, rb_node);
3648 t = buffer->transaction;
3651 buffer->transaction = NULL;
3652 pr_err("release proc %d, transaction %d, not freed\n",
3653 proc->pid, t->debug_id);
3657 binder_free_buf(proc, buffer);
3661 binder_stats_deleted(BINDER_STAT_PROC);
3667 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3670 if (!proc->pages[i])
3673 page_addr = proc->buffer + i * PAGE_SIZE;
3674 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3675 "%s: %d: page %d at %p not freed\n",
3676 __func__, proc->pid, i, page_addr);
3677 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3678 __free_page(proc->pages[i]);
3682 vfree(proc->buffer);
3685 put_task_struct(proc->tsk);
3687 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3688 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3689 __func__, proc->pid, threads, nodes, incoming_refs,
3690 outgoing_refs, active_transactions, buffers, page_count);
3695 static void binder_deferred_func(struct work_struct *work)
3697 struct binder_proc *proc;
3698 struct files_struct *files;
3703 binder_lock(__func__);
3704 mutex_lock(&binder_deferred_lock);
3705 if (!hlist_empty(&binder_deferred_list)) {
3706 proc = hlist_entry(binder_deferred_list.first,
3707 struct binder_proc, deferred_work_node);
3708 hlist_del_init(&proc->deferred_work_node);
3709 defer = proc->deferred_work;
3710 proc->deferred_work = 0;
3715 mutex_unlock(&binder_deferred_lock);
3718 if (defer & BINDER_DEFERRED_PUT_FILES) {
3719 files = proc->files;
3724 if (defer & BINDER_DEFERRED_FLUSH)
3725 binder_deferred_flush(proc);
3727 if (defer & BINDER_DEFERRED_RELEASE)
3728 binder_deferred_release(proc); /* frees proc */
3730 binder_unlock(__func__);
3732 put_files_struct(files);
3735 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3738 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3740 mutex_lock(&binder_deferred_lock);
3741 proc->deferred_work |= defer;
3742 if (hlist_unhashed(&proc->deferred_work_node)) {
3743 hlist_add_head(&proc->deferred_work_node,
3744 &binder_deferred_list);
3745 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3747 mutex_unlock(&binder_deferred_lock);
3750 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3751 struct binder_transaction *t)
3754 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3755 prefix, t->debug_id, t,
3756 t->from ? t->from->proc->pid : 0,
3757 t->from ? t->from->pid : 0,
3758 t->to_proc ? t->to_proc->pid : 0,
3759 t->to_thread ? t->to_thread->pid : 0,
3760 t->code, t->flags, t->priority, t->need_reply);
3761 if (t->buffer == NULL) {
3762 seq_puts(m, " buffer free\n");
3765 if (t->buffer->target_node)
3766 seq_printf(m, " node %d",
3767 t->buffer->target_node->debug_id);
3768 seq_printf(m, " size %zd:%zd data %p\n",
3769 t->buffer->data_size, t->buffer->offsets_size,
3773 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3774 struct binder_buffer *buffer)
3776 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3777 prefix, buffer->debug_id, buffer->data,
3778 buffer->data_size, buffer->offsets_size,
3779 buffer->transaction ? "active" : "delivered");
3782 static void print_binder_work(struct seq_file *m, const char *prefix,
3783 const char *transaction_prefix,
3784 struct binder_work *w)
3786 struct binder_node *node;
3787 struct binder_transaction *t;
3790 case BINDER_WORK_TRANSACTION:
3791 t = container_of(w, struct binder_transaction, work);
3792 print_binder_transaction(m, transaction_prefix, t);
3794 case BINDER_WORK_TRANSACTION_COMPLETE:
3795 seq_printf(m, "%stransaction complete\n", prefix);
3797 case BINDER_WORK_NODE:
3798 node = container_of(w, struct binder_node, work);
3799 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3800 prefix, node->debug_id,
3801 (u64)node->ptr, (u64)node->cookie);
3803 case BINDER_WORK_DEAD_BINDER:
3804 seq_printf(m, "%shas dead binder\n", prefix);
3806 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3807 seq_printf(m, "%shas cleared dead binder\n", prefix);
3809 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3810 seq_printf(m, "%shas cleared death notification\n", prefix);
3813 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3818 static void print_binder_thread(struct seq_file *m,
3819 struct binder_thread *thread,
3822 struct binder_transaction *t;
3823 struct binder_work *w;
3824 size_t start_pos = m->count;
3827 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3828 header_pos = m->count;
3829 t = thread->transaction_stack;
3831 if (t->from == thread) {
3832 print_binder_transaction(m,
3833 " outgoing transaction", t);
3835 } else if (t->to_thread == thread) {
3836 print_binder_transaction(m,
3837 " incoming transaction", t);
3840 print_binder_transaction(m, " bad transaction", t);
3844 list_for_each_entry(w, &thread->todo, entry) {
3845 print_binder_work(m, " ", " pending transaction", w);
3847 if (!print_always && m->count == header_pos)
3848 m->count = start_pos;
3851 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3853 struct binder_ref *ref;
3854 struct binder_work *w;
3858 hlist_for_each_entry(ref, &node->refs, node_entry)
3861 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3862 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3863 node->has_strong_ref, node->has_weak_ref,
3864 node->local_strong_refs, node->local_weak_refs,
3865 node->internal_strong_refs, count);
3867 seq_puts(m, " proc");
3868 hlist_for_each_entry(ref, &node->refs, node_entry)
3869 seq_printf(m, " %d", ref->proc->pid);
3872 list_for_each_entry(w, &node->async_todo, entry)
3873 print_binder_work(m, " ",
3874 " pending async transaction", w);
3877 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3879 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3880 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3881 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3884 static void print_binder_proc(struct seq_file *m,
3885 struct binder_proc *proc, int print_all)
3887 struct binder_work *w;
3889 size_t start_pos = m->count;
3892 seq_printf(m, "proc %d\n", proc->pid);
3893 seq_printf(m, "context %s\n", proc->context->name);
3894 header_pos = m->count;
3896 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3897 print_binder_thread(m, rb_entry(n, struct binder_thread,
3898 rb_node), print_all);
3899 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3900 struct binder_node *node = rb_entry(n, struct binder_node,
3902 if (print_all || node->has_async_transaction)
3903 print_binder_node(m, node);
3906 for (n = rb_first(&proc->refs_by_desc);
3909 print_binder_ref(m, rb_entry(n, struct binder_ref,
3912 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3913 print_binder_buffer(m, " buffer",
3914 rb_entry(n, struct binder_buffer, rb_node));
3915 list_for_each_entry(w, &proc->todo, entry)
3916 print_binder_work(m, " ", " pending transaction", w);
3917 list_for_each_entry(w, &proc->delivered_death, entry) {
3918 seq_puts(m, " has delivered dead binder\n");
3921 if (!print_all && m->count == header_pos)
3922 m->count = start_pos;
3925 static const char * const binder_return_strings[] = {
3930 "BR_ACQUIRE_RESULT",
3932 "BR_TRANSACTION_COMPLETE",
3937 "BR_ATTEMPT_ACQUIRE",
3942 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3946 static const char * const binder_command_strings[] = {
3949 "BC_ACQUIRE_RESULT",
3957 "BC_ATTEMPT_ACQUIRE",
3958 "BC_REGISTER_LOOPER",
3961 "BC_REQUEST_DEATH_NOTIFICATION",
3962 "BC_CLEAR_DEATH_NOTIFICATION",
3963 "BC_DEAD_BINDER_DONE",
3964 "BC_TRANSACTION_SG",
3968 static const char * const binder_objstat_strings[] = {
3975 "transaction_complete"
3978 static void print_binder_stats(struct seq_file *m, const char *prefix,
3979 struct binder_stats *stats)
3983 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3984 ARRAY_SIZE(binder_command_strings));
3985 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3987 seq_printf(m, "%s%s: %d\n", prefix,
3988 binder_command_strings[i], stats->bc[i]);
3991 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3992 ARRAY_SIZE(binder_return_strings));
3993 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3995 seq_printf(m, "%s%s: %d\n", prefix,
3996 binder_return_strings[i], stats->br[i]);
3999 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4000 ARRAY_SIZE(binder_objstat_strings));
4001 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4002 ARRAY_SIZE(stats->obj_deleted));
4003 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
4004 if (stats->obj_created[i] || stats->obj_deleted[i])
4005 seq_printf(m, "%s%s: active %d total %d\n", prefix,
4006 binder_objstat_strings[i],
4007 stats->obj_created[i] - stats->obj_deleted[i],
4008 stats->obj_created[i]);
4012 static void print_binder_proc_stats(struct seq_file *m,
4013 struct binder_proc *proc)
4015 struct binder_work *w;
4017 int count, strong, weak;
4019 seq_printf(m, "proc %d\n", proc->pid);
4020 seq_printf(m, "context %s\n", proc->context->name);
4022 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4024 seq_printf(m, " threads: %d\n", count);
4025 seq_printf(m, " requested threads: %d+%d/%d\n"
4026 " ready threads %d\n"
4027 " free async space %zd\n", proc->requested_threads,
4028 proc->requested_threads_started, proc->max_threads,
4029 proc->ready_threads, proc->free_async_space);
4031 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4033 seq_printf(m, " nodes: %d\n", count);
4037 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4038 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4041 strong += ref->strong;
4044 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
4047 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
4049 seq_printf(m, " buffers: %d\n", count);
4052 list_for_each_entry(w, &proc->todo, entry) {
4054 case BINDER_WORK_TRANSACTION:
4061 seq_printf(m, " pending transactions: %d\n", count);
4063 print_binder_stats(m, " ", &proc->stats);
4067 static int binder_state_show(struct seq_file *m, void *unused)
4069 struct binder_proc *proc;
4070 struct binder_node *node;
4071 int do_lock = !binder_debug_no_lock;
4074 binder_lock(__func__);
4076 seq_puts(m, "binder state:\n");
4078 if (!hlist_empty(&binder_dead_nodes))
4079 seq_puts(m, "dead nodes:\n");
4080 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
4081 print_binder_node(m, node);
4083 hlist_for_each_entry(proc, &binder_procs, proc_node)
4084 print_binder_proc(m, proc, 1);
4086 binder_unlock(__func__);
4090 static int binder_stats_show(struct seq_file *m, void *unused)
4092 struct binder_proc *proc;
4093 int do_lock = !binder_debug_no_lock;
4096 binder_lock(__func__);
4098 seq_puts(m, "binder stats:\n");
4100 print_binder_stats(m, "", &binder_stats);
4102 hlist_for_each_entry(proc, &binder_procs, proc_node)
4103 print_binder_proc_stats(m, proc);
4105 binder_unlock(__func__);
4109 static int binder_transactions_show(struct seq_file *m, void *unused)
4111 struct binder_proc *proc;
4112 int do_lock = !binder_debug_no_lock;
4115 binder_lock(__func__);
4117 seq_puts(m, "binder transactions:\n");
4118 hlist_for_each_entry(proc, &binder_procs, proc_node)
4119 print_binder_proc(m, proc, 0);
4121 binder_unlock(__func__);
4125 static int binder_proc_show(struct seq_file *m, void *unused)
4127 struct binder_proc *itr;
4128 int pid = (unsigned long)m->private;
4129 int do_lock = !binder_debug_no_lock;
4132 binder_lock(__func__);
4134 hlist_for_each_entry(itr, &binder_procs, proc_node) {
4135 if (itr->pid == pid) {
4136 seq_puts(m, "binder proc state:\n");
4137 print_binder_proc(m, itr, 1);
4141 binder_unlock(__func__);
4145 static void print_binder_transaction_log_entry(struct seq_file *m,
4146 struct binder_transaction_log_entry *e)
4149 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
4150 e->debug_id, (e->call_type == 2) ? "reply" :
4151 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
4152 e->from_thread, e->to_proc, e->to_thread, e->context_name,
4153 e->to_node, e->target_handle, e->data_size, e->offsets_size);
4156 static int binder_transaction_log_show(struct seq_file *m, void *unused)
4158 struct binder_transaction_log *log = m->private;
4162 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
4163 print_binder_transaction_log_entry(m, &log->entry[i]);
4165 for (i = 0; i < log->next; i++)
4166 print_binder_transaction_log_entry(m, &log->entry[i]);
4170 static const struct file_operations binder_fops = {
4171 .owner = THIS_MODULE,
4172 .poll = binder_poll,
4173 .unlocked_ioctl = binder_ioctl,
4174 .compat_ioctl = binder_ioctl,
4175 .mmap = binder_mmap,
4176 .open = binder_open,
4177 .flush = binder_flush,
4178 .release = binder_release,
4181 BINDER_DEBUG_ENTRY(state);
4182 BINDER_DEBUG_ENTRY(stats);
4183 BINDER_DEBUG_ENTRY(transactions);
4184 BINDER_DEBUG_ENTRY(transaction_log);
4186 static int __init init_binder_device(const char *name)
4189 struct binder_device *binder_device;
4191 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4195 binder_device->miscdev.fops = &binder_fops;
4196 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4197 binder_device->miscdev.name = name;
4199 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4200 binder_device->context.name = name;
4202 ret = misc_register(&binder_device->miscdev);
4204 kfree(binder_device);
4208 hlist_add_head(&binder_device->hlist, &binder_devices);
4213 static int __init binder_init(void)
4216 char *device_name, *device_names;
4217 struct binder_device *device;
4218 struct hlist_node *tmp;
4220 binder_deferred_workqueue = create_singlethread_workqueue("binder");
4221 if (!binder_deferred_workqueue)
4224 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4225 if (binder_debugfs_dir_entry_root)
4226 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4227 binder_debugfs_dir_entry_root);
4229 if (binder_debugfs_dir_entry_root) {
4230 debugfs_create_file("state",
4232 binder_debugfs_dir_entry_root,
4234 &binder_state_fops);
4235 debugfs_create_file("stats",
4237 binder_debugfs_dir_entry_root,
4239 &binder_stats_fops);
4240 debugfs_create_file("transactions",
4242 binder_debugfs_dir_entry_root,
4244 &binder_transactions_fops);
4245 debugfs_create_file("transaction_log",
4247 binder_debugfs_dir_entry_root,
4248 &binder_transaction_log,
4249 &binder_transaction_log_fops);
4250 debugfs_create_file("failed_transaction_log",
4252 binder_debugfs_dir_entry_root,
4253 &binder_transaction_log_failed,
4254 &binder_transaction_log_fops);
4258 * Copy the module_parameter string, because we don't want to
4259 * tokenize it in-place.
4261 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4262 if (!device_names) {
4264 goto err_alloc_device_names_failed;
4266 strcpy(device_names, binder_devices_param);
4268 while ((device_name = strsep(&device_names, ","))) {
4269 ret = init_binder_device(device_name);
4271 goto err_init_binder_device_failed;
4276 err_init_binder_device_failed:
4277 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4278 misc_deregister(&device->miscdev);
4279 hlist_del(&device->hlist);
4282 err_alloc_device_names_failed:
4283 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4285 destroy_workqueue(binder_deferred_workqueue);
4290 device_initcall(binder_init);
4292 #define CREATE_TRACE_POINTS
4293 #include "binder_trace.h"
4295 MODULE_LICENSE("GPL v2");