GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / android / binder.c
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 /*
19  * Locking overview
20  *
21  * There are 3 main spinlocks which must be acquired in the
22  * order shown:
23  *
24  * 1) proc->outer_lock : protects binder_ref
25  *    binder_proc_lock() and binder_proc_unlock() are
26  *    used to acq/rel.
27  * 2) node->lock : protects most fields of binder_node.
28  *    binder_node_lock() and binder_node_unlock() are
29  *    used to acq/rel
30  * 3) proc->inner_lock : protects the thread and node lists
31  *    (proc->threads, proc->waiting_threads, proc->nodes)
32  *    and all todo lists associated with the binder_proc
33  *    (proc->todo, thread->todo, proc->delivered_death and
34  *    node->async_todo), as well as thread->transaction_stack
35  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
36  *    are used to acq/rel
37  *
38  * Any lock under procA must never be nested under any lock at the same
39  * level or below on procB.
40  *
41  * Functions that require a lock held on entry indicate which lock
42  * in the suffix of the function name:
43  *
44  * foo_olocked() : requires node->outer_lock
45  * foo_nlocked() : requires node->lock
46  * foo_ilocked() : requires proc->inner_lock
47  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48  * foo_nilocked(): requires node->lock and proc->inner_lock
49  * ...
50  */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
74
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
77 #endif
78
79 #include <uapi/linux/android/binder.h>
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
82
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96
97 #define BINDER_DEBUG_ENTRY(name) \
98 static int binder_##name##_open(struct inode *inode, struct file *file) \
99 { \
100         return single_open(file, binder_##name##_show, inode->i_private); \
101 } \
102 \
103 static const struct file_operations binder_##name##_fops = { \
104         .owner = THIS_MODULE, \
105         .open = binder_##name##_open, \
106         .read = seq_read, \
107         .llseek = seq_lseek, \
108         .release = single_release, \
109 }
110
111 static int binder_proc_show(struct seq_file *m, void *unused);
112 BINDER_DEBUG_ENTRY(proc);
113
114 /* This is only defined in include/asm-arm/sizes.h */
115 #ifndef SZ_1K
116 #define SZ_1K                               0x400
117 #endif
118
119 #ifndef SZ_4M
120 #define SZ_4M                               0x400000
121 #endif
122
123 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
124
125 enum {
126         BINDER_DEBUG_USER_ERROR             = 1U << 0,
127         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
128         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
129         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
130         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
131         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
132         BINDER_DEBUG_READ_WRITE             = 1U << 6,
133         BINDER_DEBUG_USER_REFS              = 1U << 7,
134         BINDER_DEBUG_THREADS                = 1U << 8,
135         BINDER_DEBUG_TRANSACTION            = 1U << 9,
136         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
137         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
138         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
139         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
140         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
141 };
142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
145
146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147 module_param_named(devices, binder_devices_param, charp, 0444);
148
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150 static int binder_stop_on_user_error;
151
152 static int binder_set_stop_on_user_error(const char *val,
153                                          struct kernel_param *kp)
154 {
155         int ret;
156
157         ret = param_set_int(val, kp);
158         if (binder_stop_on_user_error < 2)
159                 wake_up(&binder_user_error_wait);
160         return ret;
161 }
162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163         param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
164
165 #define binder_debug(mask, x...) \
166         do { \
167                 if (binder_debug_mask & mask) \
168                         pr_info(x); \
169         } while (0)
170
171 #define binder_user_error(x...) \
172         do { \
173                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
174                         pr_info(x); \
175                 if (binder_stop_on_user_error) \
176                         binder_stop_on_user_error = 2; \
177         } while (0)
178
179 #define to_flat_binder_object(hdr) \
180         container_of(hdr, struct flat_binder_object, hdr)
181
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
184 #define to_binder_buffer_object(hdr) \
185         container_of(hdr, struct binder_buffer_object, hdr)
186
187 #define to_binder_fd_array_object(hdr) \
188         container_of(hdr, struct binder_fd_array_object, hdr)
189
190 enum binder_stat_types {
191         BINDER_STAT_PROC,
192         BINDER_STAT_THREAD,
193         BINDER_STAT_NODE,
194         BINDER_STAT_REF,
195         BINDER_STAT_DEATH,
196         BINDER_STAT_TRANSACTION,
197         BINDER_STAT_TRANSACTION_COMPLETE,
198         BINDER_STAT_COUNT
199 };
200
201 struct binder_stats {
202         atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203         atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204         atomic_t obj_created[BINDER_STAT_COUNT];
205         atomic_t obj_deleted[BINDER_STAT_COUNT];
206 };
207
208 static struct binder_stats binder_stats;
209
210 static inline void binder_stats_deleted(enum binder_stat_types type)
211 {
212         atomic_inc(&binder_stats.obj_deleted[type]);
213 }
214
215 static inline void binder_stats_created(enum binder_stat_types type)
216 {
217         atomic_inc(&binder_stats.obj_created[type]);
218 }
219
220 struct binder_transaction_log_entry {
221         int debug_id;
222         int debug_id_done;
223         int call_type;
224         int from_proc;
225         int from_thread;
226         int target_handle;
227         int to_proc;
228         int to_thread;
229         int to_node;
230         int data_size;
231         int offsets_size;
232         int return_error_line;
233         uint32_t return_error;
234         uint32_t return_error_param;
235         const char *context_name;
236 };
237 struct binder_transaction_log {
238         atomic_t cur;
239         bool full;
240         struct binder_transaction_log_entry entry[32];
241 };
242 static struct binder_transaction_log binder_transaction_log;
243 static struct binder_transaction_log binder_transaction_log_failed;
244
245 static struct binder_transaction_log_entry *binder_transaction_log_add(
246         struct binder_transaction_log *log)
247 {
248         struct binder_transaction_log_entry *e;
249         unsigned int cur = atomic_inc_return(&log->cur);
250
251         if (cur >= ARRAY_SIZE(log->entry))
252                 log->full = 1;
253         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254         WRITE_ONCE(e->debug_id_done, 0);
255         /*
256          * write-barrier to synchronize access to e->debug_id_done.
257          * We make sure the initialized 0 value is seen before
258          * memset() other fields are zeroed by memset.
259          */
260         smp_wmb();
261         memset(e, 0, sizeof(*e));
262         return e;
263 }
264
265 struct binder_context {
266         struct binder_node *binder_context_mgr_node;
267         struct mutex context_mgr_node_lock;
268
269         kuid_t binder_context_mgr_uid;
270         const char *name;
271 };
272
273 struct binder_device {
274         struct hlist_node hlist;
275         struct miscdevice miscdev;
276         struct binder_context context;
277 };
278
279 /**
280  * struct binder_work - work enqueued on a worklist
281  * @entry:             node enqueued on list
282  * @type:              type of work to be performed
283  *
284  * There are separate work lists for proc, thread, and node (async).
285  */
286 struct binder_work {
287         struct list_head entry;
288
289         enum binder_work_type {
290                 BINDER_WORK_TRANSACTION = 1,
291                 BINDER_WORK_TRANSACTION_COMPLETE,
292                 BINDER_WORK_RETURN_ERROR,
293                 BINDER_WORK_NODE,
294                 BINDER_WORK_DEAD_BINDER,
295                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297         } type;
298 };
299
300 struct binder_error {
301         struct binder_work work;
302         uint32_t cmd;
303 };
304
305 /**
306  * struct binder_node - binder node bookkeeping
307  * @debug_id:             unique ID for debugging
308  *                        (invariant after initialized)
309  * @lock:                 lock for node fields
310  * @work:                 worklist element for node work
311  *                        (protected by @proc->inner_lock)
312  * @rb_node:              element for proc->nodes tree
313  *                        (protected by @proc->inner_lock)
314  * @dead_node:            element for binder_dead_nodes list
315  *                        (protected by binder_dead_nodes_lock)
316  * @proc:                 binder_proc that owns this node
317  *                        (invariant after initialized)
318  * @refs:                 list of references on this node
319  *                        (protected by @lock)
320  * @internal_strong_refs: used to take strong references when
321  *                        initiating a transaction
322  *                        (protected by @proc->inner_lock if @proc
323  *                        and by @lock)
324  * @local_weak_refs:      weak user refs from local process
325  *                        (protected by @proc->inner_lock if @proc
326  *                        and by @lock)
327  * @local_strong_refs:    strong user refs from local process
328  *                        (protected by @proc->inner_lock if @proc
329  *                        and by @lock)
330  * @tmp_refs:             temporary kernel refs
331  *                        (protected by @proc->inner_lock while @proc
332  *                        is valid, and by binder_dead_nodes_lock
333  *                        if @proc is NULL. During inc/dec and node release
334  *                        it is also protected by @lock to provide safety
335  *                        as the node dies and @proc becomes NULL)
336  * @ptr:                  userspace pointer for node
337  *                        (invariant, no lock needed)
338  * @cookie:               userspace cookie for node
339  *                        (invariant, no lock needed)
340  * @has_strong_ref:       userspace notified of strong ref
341  *                        (protected by @proc->inner_lock if @proc
342  *                        and by @lock)
343  * @pending_strong_ref:   userspace has acked notification of strong ref
344  *                        (protected by @proc->inner_lock if @proc
345  *                        and by @lock)
346  * @has_weak_ref:         userspace notified of weak ref
347  *                        (protected by @proc->inner_lock if @proc
348  *                        and by @lock)
349  * @pending_weak_ref:     userspace has acked notification of weak ref
350  *                        (protected by @proc->inner_lock if @proc
351  *                        and by @lock)
352  * @has_async_transaction: async transaction to node in progress
353  *                        (protected by @lock)
354  * @accept_fds:           file descriptor operations supported for node
355  *                        (invariant after initialized)
356  * @min_priority:         minimum scheduling priority
357  *                        (invariant after initialized)
358  * @async_todo:           list of async work items
359  *                        (protected by @proc->inner_lock)
360  *
361  * Bookkeeping structure for binder nodes.
362  */
363 struct binder_node {
364         int debug_id;
365         spinlock_t lock;
366         struct binder_work work;
367         union {
368                 struct rb_node rb_node;
369                 struct hlist_node dead_node;
370         };
371         struct binder_proc *proc;
372         struct hlist_head refs;
373         int internal_strong_refs;
374         int local_weak_refs;
375         int local_strong_refs;
376         int tmp_refs;
377         binder_uintptr_t ptr;
378         binder_uintptr_t cookie;
379         struct {
380                 /*
381                  * bitfield elements protected by
382                  * proc inner_lock
383                  */
384                 u8 has_strong_ref:1;
385                 u8 pending_strong_ref:1;
386                 u8 has_weak_ref:1;
387                 u8 pending_weak_ref:1;
388         };
389         struct {
390                 /*
391                  * invariant after initialization
392                  */
393                 u8 accept_fds:1;
394                 u8 min_priority;
395         };
396         bool has_async_transaction;
397         struct list_head async_todo;
398 };
399
400 struct binder_ref_death {
401         /**
402          * @work: worklist element for death notifications
403          *        (protected by inner_lock of the proc that
404          *        this ref belongs to)
405          */
406         struct binder_work work;
407         binder_uintptr_t cookie;
408 };
409
410 /**
411  * struct binder_ref_data - binder_ref counts and id
412  * @debug_id:        unique ID for the ref
413  * @desc:            unique userspace handle for ref
414  * @strong:          strong ref count (debugging only if not locked)
415  * @weak:            weak ref count (debugging only if not locked)
416  *
417  * Structure to hold ref count and ref id information. Since
418  * the actual ref can only be accessed with a lock, this structure
419  * is used to return information about the ref to callers of
420  * ref inc/dec functions.
421  */
422 struct binder_ref_data {
423         int debug_id;
424         uint32_t desc;
425         int strong;
426         int weak;
427 };
428
429 /**
430  * struct binder_ref - struct to track references on nodes
431  * @data:        binder_ref_data containing id, handle, and current refcounts
432  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433  * @rb_node_node: node for lookup by @node in proc's rb_tree
434  * @node_entry:  list entry for node->refs list in target node
435  *               (protected by @node->lock)
436  * @proc:        binder_proc containing ref
437  * @node:        binder_node of target node. When cleaning up a
438  *               ref for deletion in binder_cleanup_ref, a non-NULL
439  *               @node indicates the node must be freed
440  * @death:       pointer to death notification (ref_death) if requested
441  *               (protected by @node->lock)
442  *
443  * Structure to track references from procA to target node (on procB). This
444  * structure is unsafe to access without holding @proc->outer_lock.
445  */
446 struct binder_ref {
447         /* Lookups needed: */
448         /*   node + proc => ref (transaction) */
449         /*   desc + proc => ref (transaction, inc/dec ref) */
450         /*   node => refs + procs (proc exit) */
451         struct binder_ref_data data;
452         struct rb_node rb_node_desc;
453         struct rb_node rb_node_node;
454         struct hlist_node node_entry;
455         struct binder_proc *proc;
456         struct binder_node *node;
457         struct binder_ref_death *death;
458 };
459
460 enum binder_deferred_state {
461         BINDER_DEFERRED_PUT_FILES    = 0x01,
462         BINDER_DEFERRED_FLUSH        = 0x02,
463         BINDER_DEFERRED_RELEASE      = 0x04,
464 };
465
466 /**
467  * struct binder_proc - binder process bookkeeping
468  * @proc_node:            element for binder_procs list
469  * @threads:              rbtree of binder_threads in this proc
470  *                        (protected by @inner_lock)
471  * @nodes:                rbtree of binder nodes associated with
472  *                        this proc ordered by node->ptr
473  *                        (protected by @inner_lock)
474  * @refs_by_desc:         rbtree of refs ordered by ref->desc
475  *                        (protected by @outer_lock)
476  * @refs_by_node:         rbtree of refs ordered by ref->node
477  *                        (protected by @outer_lock)
478  * @waiting_threads:      threads currently waiting for proc work
479  *                        (protected by @inner_lock)
480  * @pid                   PID of group_leader of process
481  *                        (invariant after initialized)
482  * @tsk                   task_struct for group_leader of process
483  *                        (invariant after initialized)
484  * @files                 files_struct for process
485  *                        (protected by @files_lock)
486  * @files_lock            mutex to protect @files
487  * @cred                  struct cred associated with the `struct file`
488  *                        in binder_open()
489  *                        (invariant after initialized)
490  * @deferred_work_node:   element for binder_deferred_list
491  *                        (protected by binder_deferred_lock)
492  * @deferred_work:        bitmap of deferred work to perform
493  *                        (protected by binder_deferred_lock)
494  * @is_dead:              process is dead and awaiting free
495  *                        when outstanding transactions are cleaned up
496  *                        (protected by @inner_lock)
497  * @todo:                 list of work for this process
498  *                        (protected by @inner_lock)
499  * @wait:                 wait queue head to wait for proc work
500  *                        (invariant after initialized)
501  * @stats:                per-process binder statistics
502  *                        (atomics, no lock needed)
503  * @delivered_death:      list of delivered death notification
504  *                        (protected by @inner_lock)
505  * @max_threads:          cap on number of binder threads
506  *                        (protected by @inner_lock)
507  * @requested_threads:    number of binder threads requested but not
508  *                        yet started. In current implementation, can
509  *                        only be 0 or 1.
510  *                        (protected by @inner_lock)
511  * @requested_threads_started: number binder threads started
512  *                        (protected by @inner_lock)
513  * @tmp_ref:              temporary reference to indicate proc is in use
514  *                        (protected by @inner_lock)
515  * @default_priority:     default scheduler priority
516  *                        (invariant after initialized)
517  * @debugfs_entry:        debugfs node
518  * @alloc:                binder allocator bookkeeping
519  * @context:              binder_context for this proc
520  *                        (invariant after initialized)
521  * @inner_lock:           can nest under outer_lock and/or node lock
522  * @outer_lock:           no nesting under innor or node lock
523  *                        Lock order: 1) outer, 2) node, 3) inner
524  *
525  * Bookkeeping structure for binder processes
526  */
527 struct binder_proc {
528         struct hlist_node proc_node;
529         struct rb_root threads;
530         struct rb_root nodes;
531         struct rb_root refs_by_desc;
532         struct rb_root refs_by_node;
533         struct list_head waiting_threads;
534         int pid;
535         struct task_struct *tsk;
536         struct files_struct *files;
537         struct mutex files_lock;
538         const struct cred *cred;
539         struct hlist_node deferred_work_node;
540         int deferred_work;
541         bool is_dead;
542
543         struct list_head todo;
544         wait_queue_head_t wait;
545         struct binder_stats stats;
546         struct list_head delivered_death;
547         int max_threads;
548         int requested_threads;
549         int requested_threads_started;
550         int tmp_ref;
551         long default_priority;
552         struct dentry *debugfs_entry;
553         struct binder_alloc alloc;
554         struct binder_context *context;
555         spinlock_t inner_lock;
556         spinlock_t outer_lock;
557 };
558
559 enum {
560         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
561         BINDER_LOOPER_STATE_ENTERED     = 0x02,
562         BINDER_LOOPER_STATE_EXITED      = 0x04,
563         BINDER_LOOPER_STATE_INVALID     = 0x08,
564         BINDER_LOOPER_STATE_WAITING     = 0x10,
565         BINDER_LOOPER_STATE_POLL        = 0x20,
566 };
567
568 /**
569  * struct binder_thread - binder thread bookkeeping
570  * @proc:                 binder process for this thread
571  *                        (invariant after initialization)
572  * @rb_node:              element for proc->threads rbtree
573  *                        (protected by @proc->inner_lock)
574  * @waiting_thread_node:  element for @proc->waiting_threads list
575  *                        (protected by @proc->inner_lock)
576  * @pid:                  PID for this thread
577  *                        (invariant after initialization)
578  * @looper:               bitmap of looping state
579  *                        (only accessed by this thread)
580  * @looper_needs_return:  looping thread needs to exit driver
581  *                        (no lock needed)
582  * @transaction_stack:    stack of in-progress transactions for this thread
583  *                        (protected by @proc->inner_lock)
584  * @todo:                 list of work to do for this thread
585  *                        (protected by @proc->inner_lock)
586  * @return_error:         transaction errors reported by this thread
587  *                        (only accessed by this thread)
588  * @reply_error:          transaction errors reported by target thread
589  *                        (protected by @proc->inner_lock)
590  * @wait:                 wait queue for thread work
591  * @stats:                per-thread statistics
592  *                        (atomics, no lock needed)
593  * @tmp_ref:              temporary reference to indicate thread is in use
594  *                        (atomic since @proc->inner_lock cannot
595  *                        always be acquired)
596  * @is_dead:              thread is dead and awaiting free
597  *                        when outstanding transactions are cleaned up
598  *                        (protected by @proc->inner_lock)
599  *
600  * Bookkeeping structure for binder threads.
601  */
602 struct binder_thread {
603         struct binder_proc *proc;
604         struct rb_node rb_node;
605         struct list_head waiting_thread_node;
606         int pid;
607         int looper;              /* only modified by this thread */
608         bool looper_need_return; /* can be written by other thread */
609         struct binder_transaction *transaction_stack;
610         struct list_head todo;
611         struct binder_error return_error;
612         struct binder_error reply_error;
613         wait_queue_head_t wait;
614         struct binder_stats stats;
615         atomic_t tmp_ref;
616         bool is_dead;
617 };
618
619 struct binder_transaction {
620         int debug_id;
621         struct binder_work work;
622         struct binder_thread *from;
623         struct binder_transaction *from_parent;
624         struct binder_proc *to_proc;
625         struct binder_thread *to_thread;
626         struct binder_transaction *to_parent;
627         unsigned need_reply:1;
628         /* unsigned is_dead:1; */       /* not used at the moment */
629
630         struct binder_buffer *buffer;
631         unsigned int    code;
632         unsigned int    flags;
633         long    priority;
634         long    saved_priority;
635         kuid_t  sender_euid;
636         /**
637          * @lock:  protects @from, @to_proc, and @to_thread
638          *
639          * @from, @to_proc, and @to_thread can be set to NULL
640          * during thread teardown
641          */
642         spinlock_t lock;
643 };
644
645 /**
646  * binder_proc_lock() - Acquire outer lock for given binder_proc
647  * @proc:         struct binder_proc to acquire
648  *
649  * Acquires proc->outer_lock. Used to protect binder_ref
650  * structures associated with the given proc.
651  */
652 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
653 static void
654 _binder_proc_lock(struct binder_proc *proc, int line)
655 {
656         binder_debug(BINDER_DEBUG_SPINLOCKS,
657                      "%s: line=%d\n", __func__, line);
658         spin_lock(&proc->outer_lock);
659 }
660
661 /**
662  * binder_proc_unlock() - Release spinlock for given binder_proc
663  * @proc:         struct binder_proc to acquire
664  *
665  * Release lock acquired via binder_proc_lock()
666  */
667 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
668 static void
669 _binder_proc_unlock(struct binder_proc *proc, int line)
670 {
671         binder_debug(BINDER_DEBUG_SPINLOCKS,
672                      "%s: line=%d\n", __func__, line);
673         spin_unlock(&proc->outer_lock);
674 }
675
676 /**
677  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
678  * @proc:         struct binder_proc to acquire
679  *
680  * Acquires proc->inner_lock. Used to protect todo lists
681  */
682 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
683 static void
684 _binder_inner_proc_lock(struct binder_proc *proc, int line)
685 {
686         binder_debug(BINDER_DEBUG_SPINLOCKS,
687                      "%s: line=%d\n", __func__, line);
688         spin_lock(&proc->inner_lock);
689 }
690
691 /**
692  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
693  * @proc:         struct binder_proc to acquire
694  *
695  * Release lock acquired via binder_inner_proc_lock()
696  */
697 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
698 static void
699 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
700 {
701         binder_debug(BINDER_DEBUG_SPINLOCKS,
702                      "%s: line=%d\n", __func__, line);
703         spin_unlock(&proc->inner_lock);
704 }
705
706 /**
707  * binder_node_lock() - Acquire spinlock for given binder_node
708  * @node:         struct binder_node to acquire
709  *
710  * Acquires node->lock. Used to protect binder_node fields
711  */
712 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
713 static void
714 _binder_node_lock(struct binder_node *node, int line)
715 {
716         binder_debug(BINDER_DEBUG_SPINLOCKS,
717                      "%s: line=%d\n", __func__, line);
718         spin_lock(&node->lock);
719 }
720
721 /**
722  * binder_node_unlock() - Release spinlock for given binder_proc
723  * @node:         struct binder_node to acquire
724  *
725  * Release lock acquired via binder_node_lock()
726  */
727 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
728 static void
729 _binder_node_unlock(struct binder_node *node, int line)
730 {
731         binder_debug(BINDER_DEBUG_SPINLOCKS,
732                      "%s: line=%d\n", __func__, line);
733         spin_unlock(&node->lock);
734 }
735
736 /**
737  * binder_node_inner_lock() - Acquire node and inner locks
738  * @node:         struct binder_node to acquire
739  *
740  * Acquires node->lock. If node->proc also acquires
741  * proc->inner_lock. Used to protect binder_node fields
742  */
743 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
744 static void
745 _binder_node_inner_lock(struct binder_node *node, int line)
746 {
747         binder_debug(BINDER_DEBUG_SPINLOCKS,
748                      "%s: line=%d\n", __func__, line);
749         spin_lock(&node->lock);
750         if (node->proc)
751                 binder_inner_proc_lock(node->proc);
752 }
753
754 /**
755  * binder_node_unlock() - Release node and inner locks
756  * @node:         struct binder_node to acquire
757  *
758  * Release lock acquired via binder_node_lock()
759  */
760 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
761 static void
762 _binder_node_inner_unlock(struct binder_node *node, int line)
763 {
764         struct binder_proc *proc = node->proc;
765
766         binder_debug(BINDER_DEBUG_SPINLOCKS,
767                      "%s: line=%d\n", __func__, line);
768         if (proc)
769                 binder_inner_proc_unlock(proc);
770         spin_unlock(&node->lock);
771 }
772
773 static bool binder_worklist_empty_ilocked(struct list_head *list)
774 {
775         return list_empty(list);
776 }
777
778 /**
779  * binder_worklist_empty() - Check if no items on the work list
780  * @proc:       binder_proc associated with list
781  * @list:       list to check
782  *
783  * Return: true if there are no items on list, else false
784  */
785 static bool binder_worklist_empty(struct binder_proc *proc,
786                                   struct list_head *list)
787 {
788         bool ret;
789
790         binder_inner_proc_lock(proc);
791         ret = binder_worklist_empty_ilocked(list);
792         binder_inner_proc_unlock(proc);
793         return ret;
794 }
795
796 static void
797 binder_enqueue_work_ilocked(struct binder_work *work,
798                            struct list_head *target_list)
799 {
800         BUG_ON(target_list == NULL);
801         BUG_ON(work->entry.next && !list_empty(&work->entry));
802         list_add_tail(&work->entry, target_list);
803 }
804
805 /**
806  * binder_enqueue_work() - Add an item to the work list
807  * @proc:         binder_proc associated with list
808  * @work:         struct binder_work to add to list
809  * @target_list:  list to add work to
810  *
811  * Adds the work to the specified list. Asserts that work
812  * is not already on a list.
813  */
814 static void
815 binder_enqueue_work(struct binder_proc *proc,
816                     struct binder_work *work,
817                     struct list_head *target_list)
818 {
819         binder_inner_proc_lock(proc);
820         binder_enqueue_work_ilocked(work, target_list);
821         binder_inner_proc_unlock(proc);
822 }
823
824 static void
825 binder_dequeue_work_ilocked(struct binder_work *work)
826 {
827         list_del_init(&work->entry);
828 }
829
830 /**
831  * binder_dequeue_work() - Removes an item from the work list
832  * @proc:         binder_proc associated with list
833  * @work:         struct binder_work to remove from list
834  *
835  * Removes the specified work item from whatever list it is on.
836  * Can safely be called if work is not on any list.
837  */
838 static void
839 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
840 {
841         binder_inner_proc_lock(proc);
842         binder_dequeue_work_ilocked(work);
843         binder_inner_proc_unlock(proc);
844 }
845
846 static struct binder_work *binder_dequeue_work_head_ilocked(
847                                         struct list_head *list)
848 {
849         struct binder_work *w;
850
851         w = list_first_entry_or_null(list, struct binder_work, entry);
852         if (w)
853                 list_del_init(&w->entry);
854         return w;
855 }
856
857 static void
858 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
859 static void binder_free_thread(struct binder_thread *thread);
860 static void binder_free_proc(struct binder_proc *proc);
861 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
862
863 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
864 {
865         unsigned long rlim_cur;
866         unsigned long irqs;
867         int ret;
868
869         mutex_lock(&proc->files_lock);
870         if (proc->files == NULL) {
871                 ret = -ESRCH;
872                 goto err;
873         }
874         if (!lock_task_sighand(proc->tsk, &irqs)) {
875                 ret = -EMFILE;
876                 goto err;
877         }
878         rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
879         unlock_task_sighand(proc->tsk, &irqs);
880
881         ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
882 err:
883         mutex_unlock(&proc->files_lock);
884         return ret;
885 }
886
887 /*
888  * copied from fd_install
889  */
890 static void task_fd_install(
891         struct binder_proc *proc, unsigned int fd, struct file *file)
892 {
893         mutex_lock(&proc->files_lock);
894         if (proc->files)
895                 __fd_install(proc->files, fd, file);
896         mutex_unlock(&proc->files_lock);
897 }
898
899 /*
900  * copied from sys_close
901  */
902 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
903 {
904         int retval;
905
906         mutex_lock(&proc->files_lock);
907         if (proc->files == NULL) {
908                 retval = -ESRCH;
909                 goto err;
910         }
911         retval = __close_fd(proc->files, fd);
912         /* can't restart close syscall because file table entry was cleared */
913         if (unlikely(retval == -ERESTARTSYS ||
914                      retval == -ERESTARTNOINTR ||
915                      retval == -ERESTARTNOHAND ||
916                      retval == -ERESTART_RESTARTBLOCK))
917                 retval = -EINTR;
918 err:
919         mutex_unlock(&proc->files_lock);
920         return retval;
921 }
922
923 static bool binder_has_work_ilocked(struct binder_thread *thread,
924                                     bool do_proc_work)
925 {
926         return !binder_worklist_empty_ilocked(&thread->todo) ||
927                 thread->looper_need_return ||
928                 (do_proc_work &&
929                  !binder_worklist_empty_ilocked(&thread->proc->todo));
930 }
931
932 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
933 {
934         bool has_work;
935
936         binder_inner_proc_lock(thread->proc);
937         has_work = binder_has_work_ilocked(thread, do_proc_work);
938         binder_inner_proc_unlock(thread->proc);
939
940         return has_work;
941 }
942
943 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
944 {
945         return !thread->transaction_stack &&
946                 binder_worklist_empty_ilocked(&thread->todo) &&
947                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
948                                    BINDER_LOOPER_STATE_REGISTERED));
949 }
950
951 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
952                                                bool sync)
953 {
954         struct rb_node *n;
955         struct binder_thread *thread;
956
957         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
958                 thread = rb_entry(n, struct binder_thread, rb_node);
959                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
960                     binder_available_for_proc_work_ilocked(thread)) {
961                         if (sync)
962                                 wake_up_interruptible_sync(&thread->wait);
963                         else
964                                 wake_up_interruptible(&thread->wait);
965                 }
966         }
967 }
968
969 /**
970  * binder_select_thread_ilocked() - selects a thread for doing proc work.
971  * @proc:       process to select a thread from
972  *
973  * Note that calling this function moves the thread off the waiting_threads
974  * list, so it can only be woken up by the caller of this function, or a
975  * signal. Therefore, callers *should* always wake up the thread this function
976  * returns.
977  *
978  * Return:      If there's a thread currently waiting for process work,
979  *              returns that thread. Otherwise returns NULL.
980  */
981 static struct binder_thread *
982 binder_select_thread_ilocked(struct binder_proc *proc)
983 {
984         struct binder_thread *thread;
985
986         assert_spin_locked(&proc->inner_lock);
987         thread = list_first_entry_or_null(&proc->waiting_threads,
988                                           struct binder_thread,
989                                           waiting_thread_node);
990
991         if (thread)
992                 list_del_init(&thread->waiting_thread_node);
993
994         return thread;
995 }
996
997 /**
998  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
999  * @proc:       process to wake up a thread in
1000  * @thread:     specific thread to wake-up (may be NULL)
1001  * @sync:       whether to do a synchronous wake-up
1002  *
1003  * This function wakes up a thread in the @proc process.
1004  * The caller may provide a specific thread to wake-up in
1005  * the @thread parameter. If @thread is NULL, this function
1006  * will wake up threads that have called poll().
1007  *
1008  * Note that for this function to work as expected, callers
1009  * should first call binder_select_thread() to find a thread
1010  * to handle the work (if they don't have a thread already),
1011  * and pass the result into the @thread parameter.
1012  */
1013 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1014                                          struct binder_thread *thread,
1015                                          bool sync)
1016 {
1017         assert_spin_locked(&proc->inner_lock);
1018
1019         if (thread) {
1020                 if (sync)
1021                         wake_up_interruptible_sync(&thread->wait);
1022                 else
1023                         wake_up_interruptible(&thread->wait);
1024                 return;
1025         }
1026
1027         /* Didn't find a thread waiting for proc work; this can happen
1028          * in two scenarios:
1029          * 1. All threads are busy handling transactions
1030          *    In that case, one of those threads should call back into
1031          *    the kernel driver soon and pick up this work.
1032          * 2. Threads are using the (e)poll interface, in which case
1033          *    they may be blocked on the waitqueue without having been
1034          *    added to waiting_threads. For this case, we just iterate
1035          *    over all threads not handling transaction work, and
1036          *    wake them all up. We wake all because we don't know whether
1037          *    a thread that called into (e)poll is handling non-binder
1038          *    work currently.
1039          */
1040         binder_wakeup_poll_threads_ilocked(proc, sync);
1041 }
1042
1043 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1044 {
1045         struct binder_thread *thread = binder_select_thread_ilocked(proc);
1046
1047         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1048 }
1049
1050 static void binder_set_nice(long nice)
1051 {
1052         long min_nice;
1053
1054         if (can_nice(current, nice)) {
1055                 set_user_nice(current, nice);
1056                 return;
1057         }
1058         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1059         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1060                      "%d: nice value %ld not allowed use %ld instead\n",
1061                       current->pid, nice, min_nice);
1062         set_user_nice(current, min_nice);
1063         if (min_nice <= MAX_NICE)
1064                 return;
1065         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1066 }
1067
1068 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1069                                                    binder_uintptr_t ptr)
1070 {
1071         struct rb_node *n = proc->nodes.rb_node;
1072         struct binder_node *node;
1073
1074         assert_spin_locked(&proc->inner_lock);
1075
1076         while (n) {
1077                 node = rb_entry(n, struct binder_node, rb_node);
1078
1079                 if (ptr < node->ptr)
1080                         n = n->rb_left;
1081                 else if (ptr > node->ptr)
1082                         n = n->rb_right;
1083                 else {
1084                         /*
1085                          * take an implicit weak reference
1086                          * to ensure node stays alive until
1087                          * call to binder_put_node()
1088                          */
1089                         binder_inc_node_tmpref_ilocked(node);
1090                         return node;
1091                 }
1092         }
1093         return NULL;
1094 }
1095
1096 static struct binder_node *binder_get_node(struct binder_proc *proc,
1097                                            binder_uintptr_t ptr)
1098 {
1099         struct binder_node *node;
1100
1101         binder_inner_proc_lock(proc);
1102         node = binder_get_node_ilocked(proc, ptr);
1103         binder_inner_proc_unlock(proc);
1104         return node;
1105 }
1106
1107 static struct binder_node *binder_init_node_ilocked(
1108                                                 struct binder_proc *proc,
1109                                                 struct binder_node *new_node,
1110                                                 struct flat_binder_object *fp)
1111 {
1112         struct rb_node **p = &proc->nodes.rb_node;
1113         struct rb_node *parent = NULL;
1114         struct binder_node *node;
1115         binder_uintptr_t ptr = fp ? fp->binder : 0;
1116         binder_uintptr_t cookie = fp ? fp->cookie : 0;
1117         __u32 flags = fp ? fp->flags : 0;
1118
1119         assert_spin_locked(&proc->inner_lock);
1120
1121         while (*p) {
1122
1123                 parent = *p;
1124                 node = rb_entry(parent, struct binder_node, rb_node);
1125
1126                 if (ptr < node->ptr)
1127                         p = &(*p)->rb_left;
1128                 else if (ptr > node->ptr)
1129                         p = &(*p)->rb_right;
1130                 else {
1131                         /*
1132                          * A matching node is already in
1133                          * the rb tree. Abandon the init
1134                          * and return it.
1135                          */
1136                         binder_inc_node_tmpref_ilocked(node);
1137                         return node;
1138                 }
1139         }
1140         node = new_node;
1141         binder_stats_created(BINDER_STAT_NODE);
1142         node->tmp_refs++;
1143         rb_link_node(&node->rb_node, parent, p);
1144         rb_insert_color(&node->rb_node, &proc->nodes);
1145         node->debug_id = atomic_inc_return(&binder_last_id);
1146         node->proc = proc;
1147         node->ptr = ptr;
1148         node->cookie = cookie;
1149         node->work.type = BINDER_WORK_NODE;
1150         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1151         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1152         spin_lock_init(&node->lock);
1153         INIT_LIST_HEAD(&node->work.entry);
1154         INIT_LIST_HEAD(&node->async_todo);
1155         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1156                      "%d:%d node %d u%016llx c%016llx created\n",
1157                      proc->pid, current->pid, node->debug_id,
1158                      (u64)node->ptr, (u64)node->cookie);
1159
1160         return node;
1161 }
1162
1163 static struct binder_node *binder_new_node(struct binder_proc *proc,
1164                                            struct flat_binder_object *fp)
1165 {
1166         struct binder_node *node;
1167         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1168
1169         if (!new_node)
1170                 return NULL;
1171         binder_inner_proc_lock(proc);
1172         node = binder_init_node_ilocked(proc, new_node, fp);
1173         binder_inner_proc_unlock(proc);
1174         if (node != new_node)
1175                 /*
1176                  * The node was already added by another thread
1177                  */
1178                 kfree(new_node);
1179
1180         return node;
1181 }
1182
1183 static void binder_free_node(struct binder_node *node)
1184 {
1185         kfree(node);
1186         binder_stats_deleted(BINDER_STAT_NODE);
1187 }
1188
1189 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1190                                     int internal,
1191                                     struct list_head *target_list)
1192 {
1193         struct binder_proc *proc = node->proc;
1194
1195         assert_spin_locked(&node->lock);
1196         if (proc)
1197                 assert_spin_locked(&proc->inner_lock);
1198         if (strong) {
1199                 if (internal) {
1200                         if (target_list == NULL &&
1201                             node->internal_strong_refs == 0 &&
1202                             !(node->proc &&
1203                               node == node->proc->context->binder_context_mgr_node &&
1204                               node->has_strong_ref)) {
1205                                 pr_err("invalid inc strong node for %d\n",
1206                                         node->debug_id);
1207                                 return -EINVAL;
1208                         }
1209                         node->internal_strong_refs++;
1210                 } else
1211                         node->local_strong_refs++;
1212                 if (!node->has_strong_ref && target_list) {
1213                         binder_dequeue_work_ilocked(&node->work);
1214                         binder_enqueue_work_ilocked(&node->work, target_list);
1215                 }
1216         } else {
1217                 if (!internal)
1218                         node->local_weak_refs++;
1219                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1220                         if (target_list == NULL) {
1221                                 pr_err("invalid inc weak node for %d\n",
1222                                         node->debug_id);
1223                                 return -EINVAL;
1224                         }
1225                         binder_enqueue_work_ilocked(&node->work, target_list);
1226                 }
1227         }
1228         return 0;
1229 }
1230
1231 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1232                            struct list_head *target_list)
1233 {
1234         int ret;
1235
1236         binder_node_inner_lock(node);
1237         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1238         binder_node_inner_unlock(node);
1239
1240         return ret;
1241 }
1242
1243 static bool binder_dec_node_nilocked(struct binder_node *node,
1244                                      int strong, int internal)
1245 {
1246         struct binder_proc *proc = node->proc;
1247
1248         assert_spin_locked(&node->lock);
1249         if (proc)
1250                 assert_spin_locked(&proc->inner_lock);
1251         if (strong) {
1252                 if (internal)
1253                         node->internal_strong_refs--;
1254                 else
1255                         node->local_strong_refs--;
1256                 if (node->local_strong_refs || node->internal_strong_refs)
1257                         return false;
1258         } else {
1259                 if (!internal)
1260                         node->local_weak_refs--;
1261                 if (node->local_weak_refs || node->tmp_refs ||
1262                                 !hlist_empty(&node->refs))
1263                         return false;
1264         }
1265
1266         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1267                 if (list_empty(&node->work.entry)) {
1268                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
1269                         binder_wakeup_proc_ilocked(proc);
1270                 }
1271         } else {
1272                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1273                     !node->local_weak_refs && !node->tmp_refs) {
1274                         if (proc) {
1275                                 binder_dequeue_work_ilocked(&node->work);
1276                                 rb_erase(&node->rb_node, &proc->nodes);
1277                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1278                                              "refless node %d deleted\n",
1279                                              node->debug_id);
1280                         } else {
1281                                 BUG_ON(!list_empty(&node->work.entry));
1282                                 spin_lock(&binder_dead_nodes_lock);
1283                                 /*
1284                                  * tmp_refs could have changed so
1285                                  * check it again
1286                                  */
1287                                 if (node->tmp_refs) {
1288                                         spin_unlock(&binder_dead_nodes_lock);
1289                                         return false;
1290                                 }
1291                                 hlist_del(&node->dead_node);
1292                                 spin_unlock(&binder_dead_nodes_lock);
1293                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1294                                              "dead node %d deleted\n",
1295                                              node->debug_id);
1296                         }
1297                         return true;
1298                 }
1299         }
1300         return false;
1301 }
1302
1303 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1304 {
1305         bool free_node;
1306
1307         binder_node_inner_lock(node);
1308         free_node = binder_dec_node_nilocked(node, strong, internal);
1309         binder_node_inner_unlock(node);
1310         if (free_node)
1311                 binder_free_node(node);
1312 }
1313
1314 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1315 {
1316         /*
1317          * No call to binder_inc_node() is needed since we
1318          * don't need to inform userspace of any changes to
1319          * tmp_refs
1320          */
1321         node->tmp_refs++;
1322 }
1323
1324 /**
1325  * binder_inc_node_tmpref() - take a temporary reference on node
1326  * @node:       node to reference
1327  *
1328  * Take reference on node to prevent the node from being freed
1329  * while referenced only by a local variable. The inner lock is
1330  * needed to serialize with the node work on the queue (which
1331  * isn't needed after the node is dead). If the node is dead
1332  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1333  * node->tmp_refs against dead-node-only cases where the node
1334  * lock cannot be acquired (eg traversing the dead node list to
1335  * print nodes)
1336  */
1337 static void binder_inc_node_tmpref(struct binder_node *node)
1338 {
1339         binder_node_lock(node);
1340         if (node->proc)
1341                 binder_inner_proc_lock(node->proc);
1342         else
1343                 spin_lock(&binder_dead_nodes_lock);
1344         binder_inc_node_tmpref_ilocked(node);
1345         if (node->proc)
1346                 binder_inner_proc_unlock(node->proc);
1347         else
1348                 spin_unlock(&binder_dead_nodes_lock);
1349         binder_node_unlock(node);
1350 }
1351
1352 /**
1353  * binder_dec_node_tmpref() - remove a temporary reference on node
1354  * @node:       node to reference
1355  *
1356  * Release temporary reference on node taken via binder_inc_node_tmpref()
1357  */
1358 static void binder_dec_node_tmpref(struct binder_node *node)
1359 {
1360         bool free_node;
1361
1362         binder_node_inner_lock(node);
1363         if (!node->proc)
1364                 spin_lock(&binder_dead_nodes_lock);
1365         node->tmp_refs--;
1366         BUG_ON(node->tmp_refs < 0);
1367         if (!node->proc)
1368                 spin_unlock(&binder_dead_nodes_lock);
1369         /*
1370          * Call binder_dec_node() to check if all refcounts are 0
1371          * and cleanup is needed. Calling with strong=0 and internal=1
1372          * causes no actual reference to be released in binder_dec_node().
1373          * If that changes, a change is needed here too.
1374          */
1375         free_node = binder_dec_node_nilocked(node, 0, 1);
1376         binder_node_inner_unlock(node);
1377         if (free_node)
1378                 binder_free_node(node);
1379 }
1380
1381 static void binder_put_node(struct binder_node *node)
1382 {
1383         binder_dec_node_tmpref(node);
1384 }
1385
1386 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1387                                                  u32 desc, bool need_strong_ref)
1388 {
1389         struct rb_node *n = proc->refs_by_desc.rb_node;
1390         struct binder_ref *ref;
1391
1392         while (n) {
1393                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1394
1395                 if (desc < ref->data.desc) {
1396                         n = n->rb_left;
1397                 } else if (desc > ref->data.desc) {
1398                         n = n->rb_right;
1399                 } else if (need_strong_ref && !ref->data.strong) {
1400                         binder_user_error("tried to use weak ref as strong ref\n");
1401                         return NULL;
1402                 } else {
1403                         return ref;
1404                 }
1405         }
1406         return NULL;
1407 }
1408
1409 /**
1410  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1411  * @proc:       binder_proc that owns the ref
1412  * @node:       binder_node of target
1413  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1414  *
1415  * Look up the ref for the given node and return it if it exists
1416  *
1417  * If it doesn't exist and the caller provides a newly allocated
1418  * ref, initialize the fields of the newly allocated ref and insert
1419  * into the given proc rb_trees and node refs list.
1420  *
1421  * Return:      the ref for node. It is possible that another thread
1422  *              allocated/initialized the ref first in which case the
1423  *              returned ref would be different than the passed-in
1424  *              new_ref. new_ref must be kfree'd by the caller in
1425  *              this case.
1426  */
1427 static struct binder_ref *binder_get_ref_for_node_olocked(
1428                                         struct binder_proc *proc,
1429                                         struct binder_node *node,
1430                                         struct binder_ref *new_ref)
1431 {
1432         struct binder_context *context = proc->context;
1433         struct rb_node **p = &proc->refs_by_node.rb_node;
1434         struct rb_node *parent = NULL;
1435         struct binder_ref *ref;
1436         struct rb_node *n;
1437
1438         while (*p) {
1439                 parent = *p;
1440                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1441
1442                 if (node < ref->node)
1443                         p = &(*p)->rb_left;
1444                 else if (node > ref->node)
1445                         p = &(*p)->rb_right;
1446                 else
1447                         return ref;
1448         }
1449         if (!new_ref)
1450                 return NULL;
1451
1452         binder_stats_created(BINDER_STAT_REF);
1453         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1454         new_ref->proc = proc;
1455         new_ref->node = node;
1456         rb_link_node(&new_ref->rb_node_node, parent, p);
1457         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1458
1459         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1460         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1461                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1462                 if (ref->data.desc > new_ref->data.desc)
1463                         break;
1464                 new_ref->data.desc = ref->data.desc + 1;
1465         }
1466
1467         p = &proc->refs_by_desc.rb_node;
1468         while (*p) {
1469                 parent = *p;
1470                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1471
1472                 if (new_ref->data.desc < ref->data.desc)
1473                         p = &(*p)->rb_left;
1474                 else if (new_ref->data.desc > ref->data.desc)
1475                         p = &(*p)->rb_right;
1476                 else
1477                         BUG();
1478         }
1479         rb_link_node(&new_ref->rb_node_desc, parent, p);
1480         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1481
1482         binder_node_lock(node);
1483         hlist_add_head(&new_ref->node_entry, &node->refs);
1484
1485         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1486                      "%d new ref %d desc %d for node %d\n",
1487                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1488                       node->debug_id);
1489         binder_node_unlock(node);
1490         return new_ref;
1491 }
1492
1493 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1494 {
1495         bool delete_node = false;
1496
1497         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1498                      "%d delete ref %d desc %d for node %d\n",
1499                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1500                       ref->node->debug_id);
1501
1502         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1503         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1504
1505         binder_node_inner_lock(ref->node);
1506         if (ref->data.strong)
1507                 binder_dec_node_nilocked(ref->node, 1, 1);
1508
1509         hlist_del(&ref->node_entry);
1510         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1511         binder_node_inner_unlock(ref->node);
1512         /*
1513          * Clear ref->node unless we want the caller to free the node
1514          */
1515         if (!delete_node) {
1516                 /*
1517                  * The caller uses ref->node to determine
1518                  * whether the node needs to be freed. Clear
1519                  * it since the node is still alive.
1520                  */
1521                 ref->node = NULL;
1522         }
1523
1524         if (ref->death) {
1525                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1526                              "%d delete ref %d desc %d has death notification\n",
1527                               ref->proc->pid, ref->data.debug_id,
1528                               ref->data.desc);
1529                 binder_dequeue_work(ref->proc, &ref->death->work);
1530                 binder_stats_deleted(BINDER_STAT_DEATH);
1531         }
1532         binder_stats_deleted(BINDER_STAT_REF);
1533 }
1534
1535 /**
1536  * binder_inc_ref_olocked() - increment the ref for given handle
1537  * @ref:         ref to be incremented
1538  * @strong:      if true, strong increment, else weak
1539  * @target_list: list to queue node work on
1540  *
1541  * Increment the ref. @ref->proc->outer_lock must be held on entry
1542  *
1543  * Return: 0, if successful, else errno
1544  */
1545 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1546                                   struct list_head *target_list)
1547 {
1548         int ret;
1549
1550         if (strong) {
1551                 if (ref->data.strong == 0) {
1552                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1553                         if (ret)
1554                                 return ret;
1555                 }
1556                 ref->data.strong++;
1557         } else {
1558                 if (ref->data.weak == 0) {
1559                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1560                         if (ret)
1561                                 return ret;
1562                 }
1563                 ref->data.weak++;
1564         }
1565         return 0;
1566 }
1567
1568 /**
1569  * binder_dec_ref() - dec the ref for given handle
1570  * @ref:        ref to be decremented
1571  * @strong:     if true, strong decrement, else weak
1572  *
1573  * Decrement the ref.
1574  *
1575  * Return: true if ref is cleaned up and ready to be freed
1576  */
1577 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1578 {
1579         if (strong) {
1580                 if (ref->data.strong == 0) {
1581                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1582                                           ref->proc->pid, ref->data.debug_id,
1583                                           ref->data.desc, ref->data.strong,
1584                                           ref->data.weak);
1585                         return false;
1586                 }
1587                 ref->data.strong--;
1588                 if (ref->data.strong == 0)
1589                         binder_dec_node(ref->node, strong, 1);
1590         } else {
1591                 if (ref->data.weak == 0) {
1592                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1593                                           ref->proc->pid, ref->data.debug_id,
1594                                           ref->data.desc, ref->data.strong,
1595                                           ref->data.weak);
1596                         return false;
1597                 }
1598                 ref->data.weak--;
1599         }
1600         if (ref->data.strong == 0 && ref->data.weak == 0) {
1601                 binder_cleanup_ref_olocked(ref);
1602                 return true;
1603         }
1604         return false;
1605 }
1606
1607 /**
1608  * binder_get_node_from_ref() - get the node from the given proc/desc
1609  * @proc:       proc containing the ref
1610  * @desc:       the handle associated with the ref
1611  * @need_strong_ref: if true, only return node if ref is strong
1612  * @rdata:      the id/refcount data for the ref
1613  *
1614  * Given a proc and ref handle, return the associated binder_node
1615  *
1616  * Return: a binder_node or NULL if not found or not strong when strong required
1617  */
1618 static struct binder_node *binder_get_node_from_ref(
1619                 struct binder_proc *proc,
1620                 u32 desc, bool need_strong_ref,
1621                 struct binder_ref_data *rdata)
1622 {
1623         struct binder_node *node;
1624         struct binder_ref *ref;
1625
1626         binder_proc_lock(proc);
1627         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1628         if (!ref)
1629                 goto err_no_ref;
1630         node = ref->node;
1631         /*
1632          * Take an implicit reference on the node to ensure
1633          * it stays alive until the call to binder_put_node()
1634          */
1635         binder_inc_node_tmpref(node);
1636         if (rdata)
1637                 *rdata = ref->data;
1638         binder_proc_unlock(proc);
1639
1640         return node;
1641
1642 err_no_ref:
1643         binder_proc_unlock(proc);
1644         return NULL;
1645 }
1646
1647 /**
1648  * binder_free_ref() - free the binder_ref
1649  * @ref:        ref to free
1650  *
1651  * Free the binder_ref. Free the binder_node indicated by ref->node
1652  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1653  */
1654 static void binder_free_ref(struct binder_ref *ref)
1655 {
1656         if (ref->node)
1657                 binder_free_node(ref->node);
1658         kfree(ref->death);
1659         kfree(ref);
1660 }
1661
1662 /**
1663  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1664  * @proc:       proc containing the ref
1665  * @desc:       the handle associated with the ref
1666  * @increment:  true=inc reference, false=dec reference
1667  * @strong:     true=strong reference, false=weak reference
1668  * @rdata:      the id/refcount data for the ref
1669  *
1670  * Given a proc and ref handle, increment or decrement the ref
1671  * according to "increment" arg.
1672  *
1673  * Return: 0 if successful, else errno
1674  */
1675 static int binder_update_ref_for_handle(struct binder_proc *proc,
1676                 uint32_t desc, bool increment, bool strong,
1677                 struct binder_ref_data *rdata)
1678 {
1679         int ret = 0;
1680         struct binder_ref *ref;
1681         bool delete_ref = false;
1682
1683         binder_proc_lock(proc);
1684         ref = binder_get_ref_olocked(proc, desc, strong);
1685         if (!ref) {
1686                 ret = -EINVAL;
1687                 goto err_no_ref;
1688         }
1689         if (increment)
1690                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1691         else
1692                 delete_ref = binder_dec_ref_olocked(ref, strong);
1693
1694         if (rdata)
1695                 *rdata = ref->data;
1696         binder_proc_unlock(proc);
1697
1698         if (delete_ref)
1699                 binder_free_ref(ref);
1700         return ret;
1701
1702 err_no_ref:
1703         binder_proc_unlock(proc);
1704         return ret;
1705 }
1706
1707 /**
1708  * binder_dec_ref_for_handle() - dec the ref for given handle
1709  * @proc:       proc containing the ref
1710  * @desc:       the handle associated with the ref
1711  * @strong:     true=strong reference, false=weak reference
1712  * @rdata:      the id/refcount data for the ref
1713  *
1714  * Just calls binder_update_ref_for_handle() to decrement the ref.
1715  *
1716  * Return: 0 if successful, else errno
1717  */
1718 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1719                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1720 {
1721         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1722 }
1723
1724
1725 /**
1726  * binder_inc_ref_for_node() - increment the ref for given proc/node
1727  * @proc:        proc containing the ref
1728  * @node:        target node
1729  * @strong:      true=strong reference, false=weak reference
1730  * @target_list: worklist to use if node is incremented
1731  * @rdata:       the id/refcount data for the ref
1732  *
1733  * Given a proc and node, increment the ref. Create the ref if it
1734  * doesn't already exist
1735  *
1736  * Return: 0 if successful, else errno
1737  */
1738 static int binder_inc_ref_for_node(struct binder_proc *proc,
1739                         struct binder_node *node,
1740                         bool strong,
1741                         struct list_head *target_list,
1742                         struct binder_ref_data *rdata)
1743 {
1744         struct binder_ref *ref;
1745         struct binder_ref *new_ref = NULL;
1746         int ret = 0;
1747
1748         binder_proc_lock(proc);
1749         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1750         if (!ref) {
1751                 binder_proc_unlock(proc);
1752                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1753                 if (!new_ref)
1754                         return -ENOMEM;
1755                 binder_proc_lock(proc);
1756                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1757         }
1758         ret = binder_inc_ref_olocked(ref, strong, target_list);
1759         *rdata = ref->data;
1760         binder_proc_unlock(proc);
1761         if (new_ref && ref != new_ref)
1762                 /*
1763                  * Another thread created the ref first so
1764                  * free the one we allocated
1765                  */
1766                 kfree(new_ref);
1767         return ret;
1768 }
1769
1770 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1771                                            struct binder_transaction *t)
1772 {
1773         BUG_ON(!target_thread);
1774         assert_spin_locked(&target_thread->proc->inner_lock);
1775         BUG_ON(target_thread->transaction_stack != t);
1776         BUG_ON(target_thread->transaction_stack->from != target_thread);
1777         target_thread->transaction_stack =
1778                 target_thread->transaction_stack->from_parent;
1779         t->from = NULL;
1780 }
1781
1782 /**
1783  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1784  * @thread:     thread to decrement
1785  *
1786  * A thread needs to be kept alive while being used to create or
1787  * handle a transaction. binder_get_txn_from() is used to safely
1788  * extract t->from from a binder_transaction and keep the thread
1789  * indicated by t->from from being freed. When done with that
1790  * binder_thread, this function is called to decrement the
1791  * tmp_ref and free if appropriate (thread has been released
1792  * and no transaction being processed by the driver)
1793  */
1794 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1795 {
1796         /*
1797          * atomic is used to protect the counter value while
1798          * it cannot reach zero or thread->is_dead is false
1799          */
1800         binder_inner_proc_lock(thread->proc);
1801         atomic_dec(&thread->tmp_ref);
1802         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1803                 binder_inner_proc_unlock(thread->proc);
1804                 binder_free_thread(thread);
1805                 return;
1806         }
1807         binder_inner_proc_unlock(thread->proc);
1808 }
1809
1810 /**
1811  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1812  * @proc:       proc to decrement
1813  *
1814  * A binder_proc needs to be kept alive while being used to create or
1815  * handle a transaction. proc->tmp_ref is incremented when
1816  * creating a new transaction or the binder_proc is currently in-use
1817  * by threads that are being released. When done with the binder_proc,
1818  * this function is called to decrement the counter and free the
1819  * proc if appropriate (proc has been released, all threads have
1820  * been released and not currenly in-use to process a transaction).
1821  */
1822 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1823 {
1824         binder_inner_proc_lock(proc);
1825         proc->tmp_ref--;
1826         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1827                         !proc->tmp_ref) {
1828                 binder_inner_proc_unlock(proc);
1829                 binder_free_proc(proc);
1830                 return;
1831         }
1832         binder_inner_proc_unlock(proc);
1833 }
1834
1835 /**
1836  * binder_get_txn_from() - safely extract the "from" thread in transaction
1837  * @t:  binder transaction for t->from
1838  *
1839  * Atomically return the "from" thread and increment the tmp_ref
1840  * count for the thread to ensure it stays alive until
1841  * binder_thread_dec_tmpref() is called.
1842  *
1843  * Return: the value of t->from
1844  */
1845 static struct binder_thread *binder_get_txn_from(
1846                 struct binder_transaction *t)
1847 {
1848         struct binder_thread *from;
1849
1850         spin_lock(&t->lock);
1851         from = t->from;
1852         if (from)
1853                 atomic_inc(&from->tmp_ref);
1854         spin_unlock(&t->lock);
1855         return from;
1856 }
1857
1858 /**
1859  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1860  * @t:  binder transaction for t->from
1861  *
1862  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1863  * to guarantee that the thread cannot be released while operating on it.
1864  * The caller must call binder_inner_proc_unlock() to release the inner lock
1865  * as well as call binder_dec_thread_txn() to release the reference.
1866  *
1867  * Return: the value of t->from
1868  */
1869 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1870                 struct binder_transaction *t)
1871 {
1872         struct binder_thread *from;
1873
1874         from = binder_get_txn_from(t);
1875         if (!from)
1876                 return NULL;
1877         binder_inner_proc_lock(from->proc);
1878         if (t->from) {
1879                 BUG_ON(from != t->from);
1880                 return from;
1881         }
1882         binder_inner_proc_unlock(from->proc);
1883         binder_thread_dec_tmpref(from);
1884         return NULL;
1885 }
1886
1887 static void binder_free_transaction(struct binder_transaction *t)
1888 {
1889         struct binder_proc *target_proc = t->to_proc;
1890
1891         if (target_proc) {
1892                 binder_inner_proc_lock(target_proc);
1893                 if (t->buffer)
1894                         t->buffer->transaction = NULL;
1895                 binder_inner_proc_unlock(target_proc);
1896         }
1897         /*
1898          * If the transaction has no target_proc, then
1899          * t->buffer->transaction has already been cleared.
1900          */
1901         kfree(t);
1902         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1903 }
1904
1905 static void binder_send_failed_reply(struct binder_transaction *t,
1906                                      uint32_t error_code)
1907 {
1908         struct binder_thread *target_thread;
1909         struct binder_transaction *next;
1910
1911         BUG_ON(t->flags & TF_ONE_WAY);
1912         while (1) {
1913                 target_thread = binder_get_txn_from_and_acq_inner(t);
1914                 if (target_thread) {
1915                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1916                                      "send failed reply for transaction %d to %d:%d\n",
1917                                       t->debug_id,
1918                                       target_thread->proc->pid,
1919                                       target_thread->pid);
1920
1921                         binder_pop_transaction_ilocked(target_thread, t);
1922                         if (target_thread->reply_error.cmd == BR_OK) {
1923                                 target_thread->reply_error.cmd = error_code;
1924                                 binder_enqueue_work_ilocked(
1925                                         &target_thread->reply_error.work,
1926                                         &target_thread->todo);
1927                                 wake_up_interruptible(&target_thread->wait);
1928                         } else {
1929                                 /*
1930                                  * Cannot get here for normal operation, but
1931                                  * we can if multiple synchronous transactions
1932                                  * are sent without blocking for responses.
1933                                  * Just ignore the 2nd error in this case.
1934                                  */
1935                                 pr_warn("Unexpected reply error: %u\n",
1936                                         target_thread->reply_error.cmd);
1937                         }
1938                         binder_inner_proc_unlock(target_thread->proc);
1939                         binder_thread_dec_tmpref(target_thread);
1940                         binder_free_transaction(t);
1941                         return;
1942                 }
1943                 next = t->from_parent;
1944
1945                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1946                              "send failed reply for transaction %d, target dead\n",
1947                              t->debug_id);
1948
1949                 binder_free_transaction(t);
1950                 if (next == NULL) {
1951                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1952                                      "reply failed, no target thread at root\n");
1953                         return;
1954                 }
1955                 t = next;
1956                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1957                              "reply failed, no target thread -- retry %d\n",
1958                               t->debug_id);
1959         }
1960 }
1961
1962 /**
1963  * binder_cleanup_transaction() - cleans up undelivered transaction
1964  * @t:          transaction that needs to be cleaned up
1965  * @reason:     reason the transaction wasn't delivered
1966  * @error_code: error to return to caller (if synchronous call)
1967  */
1968 static void binder_cleanup_transaction(struct binder_transaction *t,
1969                                        const char *reason,
1970                                        uint32_t error_code)
1971 {
1972         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1973                 binder_send_failed_reply(t, error_code);
1974         } else {
1975                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1976                         "undelivered transaction %d, %s\n",
1977                         t->debug_id, reason);
1978                 binder_free_transaction(t);
1979         }
1980 }
1981
1982 /**
1983  * binder_validate_object() - checks for a valid metadata object in a buffer.
1984  * @buffer:     binder_buffer that we're parsing.
1985  * @offset:     offset in the buffer at which to validate an object.
1986  *
1987  * Return:      If there's a valid metadata object at @offset in @buffer, the
1988  *              size of that object. Otherwise, it returns zero.
1989  */
1990 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1991 {
1992         /* Check if we can read a header first */
1993         struct binder_object_header *hdr;
1994         size_t object_size = 0;
1995
1996         if (offset > buffer->data_size - sizeof(*hdr) ||
1997             buffer->data_size < sizeof(*hdr) ||
1998             !IS_ALIGNED(offset, sizeof(u32)))
1999                 return 0;
2000
2001         /* Ok, now see if we can read a complete object. */
2002         hdr = (struct binder_object_header *)(buffer->data + offset);
2003         switch (hdr->type) {
2004         case BINDER_TYPE_BINDER:
2005         case BINDER_TYPE_WEAK_BINDER:
2006         case BINDER_TYPE_HANDLE:
2007         case BINDER_TYPE_WEAK_HANDLE:
2008                 object_size = sizeof(struct flat_binder_object);
2009                 break;
2010         case BINDER_TYPE_FD:
2011                 object_size = sizeof(struct binder_fd_object);
2012                 break;
2013         case BINDER_TYPE_PTR:
2014                 object_size = sizeof(struct binder_buffer_object);
2015                 break;
2016         case BINDER_TYPE_FDA:
2017                 object_size = sizeof(struct binder_fd_array_object);
2018                 break;
2019         default:
2020                 return 0;
2021         }
2022         if (offset <= buffer->data_size - object_size &&
2023             buffer->data_size >= object_size)
2024                 return object_size;
2025         else
2026                 return 0;
2027 }
2028
2029 /**
2030  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2031  * @b:          binder_buffer containing the object
2032  * @index:      index in offset array at which the binder_buffer_object is
2033  *              located
2034  * @start:      points to the start of the offset array
2035  * @num_valid:  the number of valid offsets in the offset array
2036  *
2037  * Return:      If @index is within the valid range of the offset array
2038  *              described by @start and @num_valid, and if there's a valid
2039  *              binder_buffer_object at the offset found in index @index
2040  *              of the offset array, that object is returned. Otherwise,
2041  *              %NULL is returned.
2042  *              Note that the offset found in index @index itself is not
2043  *              verified; this function assumes that @num_valid elements
2044  *              from @start were previously verified to have valid offsets.
2045  */
2046 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2047                                                         binder_size_t index,
2048                                                         binder_size_t *start,
2049                                                         binder_size_t num_valid)
2050 {
2051         struct binder_buffer_object *buffer_obj;
2052         binder_size_t *offp;
2053
2054         if (index >= num_valid)
2055                 return NULL;
2056
2057         offp = start + index;
2058         buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2059         if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2060                 return NULL;
2061
2062         return buffer_obj;
2063 }
2064
2065 /**
2066  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2067  * @b:                  transaction buffer
2068  * @objects_start       start of objects buffer
2069  * @buffer:             binder_buffer_object in which to fix up
2070  * @offset:             start offset in @buffer to fix up
2071  * @last_obj:           last binder_buffer_object that we fixed up in
2072  * @last_min_offset:    minimum fixup offset in @last_obj
2073  *
2074  * Return:              %true if a fixup in buffer @buffer at offset @offset is
2075  *                      allowed.
2076  *
2077  * For safety reasons, we only allow fixups inside a buffer to happen
2078  * at increasing offsets; additionally, we only allow fixup on the last
2079  * buffer object that was verified, or one of its parents.
2080  *
2081  * Example of what is allowed:
2082  *
2083  * A
2084  *   B (parent = A, offset = 0)
2085  *   C (parent = A, offset = 16)
2086  *     D (parent = C, offset = 0)
2087  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2088  *
2089  * Examples of what is not allowed:
2090  *
2091  * Decreasing offsets within the same parent:
2092  * A
2093  *   C (parent = A, offset = 16)
2094  *   B (parent = A, offset = 0) // decreasing offset within A
2095  *
2096  * Referring to a parent that wasn't the last object or any of its parents:
2097  * A
2098  *   B (parent = A, offset = 0)
2099  *   C (parent = A, offset = 0)
2100  *   C (parent = A, offset = 16)
2101  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2102  */
2103 static bool binder_validate_fixup(struct binder_buffer *b,
2104                                   binder_size_t *objects_start,
2105                                   struct binder_buffer_object *buffer,
2106                                   binder_size_t fixup_offset,
2107                                   struct binder_buffer_object *last_obj,
2108                                   binder_size_t last_min_offset)
2109 {
2110         if (!last_obj) {
2111                 /* Nothing to fix up in */
2112                 return false;
2113         }
2114
2115         while (last_obj != buffer) {
2116                 /*
2117                  * Safe to retrieve the parent of last_obj, since it
2118                  * was already previously verified by the driver.
2119                  */
2120                 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2121                         return false;
2122                 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2123                 last_obj = (struct binder_buffer_object *)
2124                         (b->data + *(objects_start + last_obj->parent));
2125         }
2126         return (fixup_offset >= last_min_offset);
2127 }
2128
2129 static void binder_transaction_buffer_release(struct binder_proc *proc,
2130                                               struct binder_buffer *buffer,
2131                                               binder_size_t *failed_at)
2132 {
2133         binder_size_t *offp, *off_start, *off_end;
2134         int debug_id = buffer->debug_id;
2135
2136         binder_debug(BINDER_DEBUG_TRANSACTION,
2137                      "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2138                      proc->pid, buffer->debug_id,
2139                      buffer->data_size, buffer->offsets_size, failed_at);
2140
2141         if (buffer->target_node)
2142                 binder_dec_node(buffer->target_node, 1, 0);
2143
2144         off_start = (binder_size_t *)(buffer->data +
2145                                       ALIGN(buffer->data_size, sizeof(void *)));
2146         if (failed_at)
2147                 off_end = failed_at;
2148         else
2149                 off_end = (void *)off_start + buffer->offsets_size;
2150         for (offp = off_start; offp < off_end; offp++) {
2151                 struct binder_object_header *hdr;
2152                 size_t object_size = binder_validate_object(buffer, *offp);
2153
2154                 if (object_size == 0) {
2155                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2156                                debug_id, (u64)*offp, buffer->data_size);
2157                         continue;
2158                 }
2159                 hdr = (struct binder_object_header *)(buffer->data + *offp);
2160                 switch (hdr->type) {
2161                 case BINDER_TYPE_BINDER:
2162                 case BINDER_TYPE_WEAK_BINDER: {
2163                         struct flat_binder_object *fp;
2164                         struct binder_node *node;
2165
2166                         fp = to_flat_binder_object(hdr);
2167                         node = binder_get_node(proc, fp->binder);
2168                         if (node == NULL) {
2169                                 pr_err("transaction release %d bad node %016llx\n",
2170                                        debug_id, (u64)fp->binder);
2171                                 break;
2172                         }
2173                         binder_debug(BINDER_DEBUG_TRANSACTION,
2174                                      "        node %d u%016llx\n",
2175                                      node->debug_id, (u64)node->ptr);
2176                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2177                                         0);
2178                         binder_put_node(node);
2179                 } break;
2180                 case BINDER_TYPE_HANDLE:
2181                 case BINDER_TYPE_WEAK_HANDLE: {
2182                         struct flat_binder_object *fp;
2183                         struct binder_ref_data rdata;
2184                         int ret;
2185
2186                         fp = to_flat_binder_object(hdr);
2187                         ret = binder_dec_ref_for_handle(proc, fp->handle,
2188                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2189
2190                         if (ret) {
2191                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
2192                                  debug_id, fp->handle, ret);
2193                                 break;
2194                         }
2195                         binder_debug(BINDER_DEBUG_TRANSACTION,
2196                                      "        ref %d desc %d\n",
2197                                      rdata.debug_id, rdata.desc);
2198                 } break;
2199
2200                 case BINDER_TYPE_FD: {
2201                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2202
2203                         binder_debug(BINDER_DEBUG_TRANSACTION,
2204                                      "        fd %d\n", fp->fd);
2205                         if (failed_at)
2206                                 task_close_fd(proc, fp->fd);
2207                 } break;
2208                 case BINDER_TYPE_PTR:
2209                         /*
2210                          * Nothing to do here, this will get cleaned up when the
2211                          * transaction buffer gets freed
2212                          */
2213                         break;
2214                 case BINDER_TYPE_FDA: {
2215                         struct binder_fd_array_object *fda;
2216                         struct binder_buffer_object *parent;
2217                         uintptr_t parent_buffer;
2218                         u32 *fd_array;
2219                         size_t fd_index;
2220                         binder_size_t fd_buf_size;
2221
2222                         fda = to_binder_fd_array_object(hdr);
2223                         parent = binder_validate_ptr(buffer, fda->parent,
2224                                                      off_start,
2225                                                      offp - off_start);
2226                         if (!parent) {
2227                                 pr_err("transaction release %d bad parent offset",
2228                                        debug_id);
2229                                 continue;
2230                         }
2231                         /*
2232                          * Since the parent was already fixed up, convert it
2233                          * back to kernel address space to access it
2234                          */
2235                         parent_buffer = parent->buffer -
2236                                 binder_alloc_get_user_buffer_offset(
2237                                                 &proc->alloc);
2238
2239                         fd_buf_size = sizeof(u32) * fda->num_fds;
2240                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2241                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2242                                        debug_id, (u64)fda->num_fds);
2243                                 continue;
2244                         }
2245                         if (fd_buf_size > parent->length ||
2246                             fda->parent_offset > parent->length - fd_buf_size) {
2247                                 /* No space for all file descriptors here. */
2248                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2249                                        debug_id, (u64)fda->num_fds);
2250                                 continue;
2251                         }
2252                         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2253                         for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2254                                 task_close_fd(proc, fd_array[fd_index]);
2255                 } break;
2256                 default:
2257                         pr_err("transaction release %d bad object type %x\n",
2258                                 debug_id, hdr->type);
2259                         break;
2260                 }
2261         }
2262 }
2263
2264 static int binder_translate_binder(struct flat_binder_object *fp,
2265                                    struct binder_transaction *t,
2266                                    struct binder_thread *thread)
2267 {
2268         struct binder_node *node;
2269         struct binder_proc *proc = thread->proc;
2270         struct binder_proc *target_proc = t->to_proc;
2271         struct binder_ref_data rdata;
2272         int ret = 0;
2273
2274         node = binder_get_node(proc, fp->binder);
2275         if (!node) {
2276                 node = binder_new_node(proc, fp);
2277                 if (!node)
2278                         return -ENOMEM;
2279         }
2280         if (fp->cookie != node->cookie) {
2281                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2282                                   proc->pid, thread->pid, (u64)fp->binder,
2283                                   node->debug_id, (u64)fp->cookie,
2284                                   (u64)node->cookie);
2285                 ret = -EINVAL;
2286                 goto done;
2287         }
2288         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2289                 ret = -EPERM;
2290                 goto done;
2291         }
2292
2293         ret = binder_inc_ref_for_node(target_proc, node,
2294                         fp->hdr.type == BINDER_TYPE_BINDER,
2295                         &thread->todo, &rdata);
2296         if (ret)
2297                 goto done;
2298
2299         if (fp->hdr.type == BINDER_TYPE_BINDER)
2300                 fp->hdr.type = BINDER_TYPE_HANDLE;
2301         else
2302                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2303         fp->binder = 0;
2304         fp->handle = rdata.desc;
2305         fp->cookie = 0;
2306
2307         trace_binder_transaction_node_to_ref(t, node, &rdata);
2308         binder_debug(BINDER_DEBUG_TRANSACTION,
2309                      "        node %d u%016llx -> ref %d desc %d\n",
2310                      node->debug_id, (u64)node->ptr,
2311                      rdata.debug_id, rdata.desc);
2312 done:
2313         binder_put_node(node);
2314         return ret;
2315 }
2316
2317 static int binder_translate_handle(struct flat_binder_object *fp,
2318                                    struct binder_transaction *t,
2319                                    struct binder_thread *thread)
2320 {
2321         struct binder_proc *proc = thread->proc;
2322         struct binder_proc *target_proc = t->to_proc;
2323         struct binder_node *node;
2324         struct binder_ref_data src_rdata;
2325         int ret = 0;
2326
2327         node = binder_get_node_from_ref(proc, fp->handle,
2328                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2329         if (!node) {
2330                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2331                                   proc->pid, thread->pid, fp->handle);
2332                 return -EINVAL;
2333         }
2334         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2335                 ret = -EPERM;
2336                 goto done;
2337         }
2338
2339         binder_node_lock(node);
2340         if (node->proc == target_proc) {
2341                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2342                         fp->hdr.type = BINDER_TYPE_BINDER;
2343                 else
2344                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2345                 fp->binder = node->ptr;
2346                 fp->cookie = node->cookie;
2347                 if (node->proc)
2348                         binder_inner_proc_lock(node->proc);
2349                 binder_inc_node_nilocked(node,
2350                                          fp->hdr.type == BINDER_TYPE_BINDER,
2351                                          0, NULL);
2352                 if (node->proc)
2353                         binder_inner_proc_unlock(node->proc);
2354                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2355                 binder_debug(BINDER_DEBUG_TRANSACTION,
2356                              "        ref %d desc %d -> node %d u%016llx\n",
2357                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2358                              (u64)node->ptr);
2359                 binder_node_unlock(node);
2360         } else {
2361                 struct binder_ref_data dest_rdata;
2362
2363                 binder_node_unlock(node);
2364                 ret = binder_inc_ref_for_node(target_proc, node,
2365                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2366                                 NULL, &dest_rdata);
2367                 if (ret)
2368                         goto done;
2369
2370                 fp->binder = 0;
2371                 fp->handle = dest_rdata.desc;
2372                 fp->cookie = 0;
2373                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2374                                                     &dest_rdata);
2375                 binder_debug(BINDER_DEBUG_TRANSACTION,
2376                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2377                              src_rdata.debug_id, src_rdata.desc,
2378                              dest_rdata.debug_id, dest_rdata.desc,
2379                              node->debug_id);
2380         }
2381 done:
2382         binder_put_node(node);
2383         return ret;
2384 }
2385
2386 static int binder_translate_fd(int fd,
2387                                struct binder_transaction *t,
2388                                struct binder_thread *thread,
2389                                struct binder_transaction *in_reply_to)
2390 {
2391         struct binder_proc *proc = thread->proc;
2392         struct binder_proc *target_proc = t->to_proc;
2393         int target_fd;
2394         struct file *file;
2395         int ret;
2396         bool target_allows_fd;
2397
2398         if (in_reply_to)
2399                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2400         else
2401                 target_allows_fd = t->buffer->target_node->accept_fds;
2402         if (!target_allows_fd) {
2403                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2404                                   proc->pid, thread->pid,
2405                                   in_reply_to ? "reply" : "transaction",
2406                                   fd);
2407                 ret = -EPERM;
2408                 goto err_fd_not_accepted;
2409         }
2410
2411         file = fget(fd);
2412         if (!file) {
2413                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2414                                   proc->pid, thread->pid, fd);
2415                 ret = -EBADF;
2416                 goto err_fget;
2417         }
2418         ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2419         if (ret < 0) {
2420                 ret = -EPERM;
2421                 goto err_security;
2422         }
2423
2424         target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2425         if (target_fd < 0) {
2426                 ret = -ENOMEM;
2427                 goto err_get_unused_fd;
2428         }
2429         task_fd_install(target_proc, target_fd, file);
2430         trace_binder_transaction_fd(t, fd, target_fd);
2431         binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
2432                      fd, target_fd);
2433
2434         return target_fd;
2435
2436 err_get_unused_fd:
2437 err_security:
2438         fput(file);
2439 err_fget:
2440 err_fd_not_accepted:
2441         return ret;
2442 }
2443
2444 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2445                                      struct binder_buffer_object *parent,
2446                                      struct binder_transaction *t,
2447                                      struct binder_thread *thread,
2448                                      struct binder_transaction *in_reply_to)
2449 {
2450         binder_size_t fdi, fd_buf_size, num_installed_fds;
2451         int target_fd;
2452         uintptr_t parent_buffer;
2453         u32 *fd_array;
2454         struct binder_proc *proc = thread->proc;
2455         struct binder_proc *target_proc = t->to_proc;
2456
2457         fd_buf_size = sizeof(u32) * fda->num_fds;
2458         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2459                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2460                                   proc->pid, thread->pid, (u64)fda->num_fds);
2461                 return -EINVAL;
2462         }
2463         if (fd_buf_size > parent->length ||
2464             fda->parent_offset > parent->length - fd_buf_size) {
2465                 /* No space for all file descriptors here. */
2466                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2467                                   proc->pid, thread->pid, (u64)fda->num_fds);
2468                 return -EINVAL;
2469         }
2470         /*
2471          * Since the parent was already fixed up, convert it
2472          * back to the kernel address space to access it
2473          */
2474         parent_buffer = parent->buffer -
2475                 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2476         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2477         if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2478                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2479                                   proc->pid, thread->pid);
2480                 return -EINVAL;
2481         }
2482         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2483                 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2484                                                 in_reply_to);
2485                 if (target_fd < 0)
2486                         goto err_translate_fd_failed;
2487                 fd_array[fdi] = target_fd;
2488         }
2489         return 0;
2490
2491 err_translate_fd_failed:
2492         /*
2493          * Failed to allocate fd or security error, free fds
2494          * installed so far.
2495          */
2496         num_installed_fds = fdi;
2497         for (fdi = 0; fdi < num_installed_fds; fdi++)
2498                 task_close_fd(target_proc, fd_array[fdi]);
2499         return target_fd;
2500 }
2501
2502 static int binder_fixup_parent(struct binder_transaction *t,
2503                                struct binder_thread *thread,
2504                                struct binder_buffer_object *bp,
2505                                binder_size_t *off_start,
2506                                binder_size_t num_valid,
2507                                struct binder_buffer_object *last_fixup_obj,
2508                                binder_size_t last_fixup_min_off)
2509 {
2510         struct binder_buffer_object *parent;
2511         u8 *parent_buffer;
2512         struct binder_buffer *b = t->buffer;
2513         struct binder_proc *proc = thread->proc;
2514         struct binder_proc *target_proc = t->to_proc;
2515
2516         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2517                 return 0;
2518
2519         parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2520         if (!parent) {
2521                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2522                                   proc->pid, thread->pid);
2523                 return -EINVAL;
2524         }
2525
2526         if (!binder_validate_fixup(b, off_start,
2527                                    parent, bp->parent_offset,
2528                                    last_fixup_obj,
2529                                    last_fixup_min_off)) {
2530                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2531                                   proc->pid, thread->pid);
2532                 return -EINVAL;
2533         }
2534
2535         if (parent->length < sizeof(binder_uintptr_t) ||
2536             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2537                 /* No space for a pointer here! */
2538                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2539                                   proc->pid, thread->pid);
2540                 return -EINVAL;
2541         }
2542         parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2543                         binder_alloc_get_user_buffer_offset(
2544                                 &target_proc->alloc));
2545         *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2546
2547         return 0;
2548 }
2549
2550 /**
2551  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2552  * @t:          transaction to send
2553  * @proc:       process to send the transaction to
2554  * @thread:     thread in @proc to send the transaction to (may be NULL)
2555  *
2556  * This function queues a transaction to the specified process. It will try
2557  * to find a thread in the target process to handle the transaction and
2558  * wake it up. If no thread is found, the work is queued to the proc
2559  * waitqueue.
2560  *
2561  * If the @thread parameter is not NULL, the transaction is always queued
2562  * to the waitlist of that specific thread.
2563  *
2564  * Return:      true if the transactions was successfully queued
2565  *              false if the target process or thread is dead
2566  */
2567 static bool binder_proc_transaction(struct binder_transaction *t,
2568                                     struct binder_proc *proc,
2569                                     struct binder_thread *thread)
2570 {
2571         struct list_head *target_list = NULL;
2572         struct binder_node *node = t->buffer->target_node;
2573         bool oneway = !!(t->flags & TF_ONE_WAY);
2574         bool wakeup = true;
2575
2576         BUG_ON(!node);
2577         binder_node_lock(node);
2578         if (oneway) {
2579                 BUG_ON(thread);
2580                 if (node->has_async_transaction) {
2581                         target_list = &node->async_todo;
2582                         wakeup = false;
2583                 } else {
2584                         node->has_async_transaction = 1;
2585                 }
2586         }
2587
2588         binder_inner_proc_lock(proc);
2589
2590         if (proc->is_dead || (thread && thread->is_dead)) {
2591                 binder_inner_proc_unlock(proc);
2592                 binder_node_unlock(node);
2593                 return false;
2594         }
2595
2596         if (!thread && !target_list)
2597                 thread = binder_select_thread_ilocked(proc);
2598
2599         if (thread)
2600                 target_list = &thread->todo;
2601         else if (!target_list)
2602                 target_list = &proc->todo;
2603         else
2604                 BUG_ON(target_list != &node->async_todo);
2605
2606         binder_enqueue_work_ilocked(&t->work, target_list);
2607
2608         if (wakeup)
2609                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2610
2611         binder_inner_proc_unlock(proc);
2612         binder_node_unlock(node);
2613
2614         return true;
2615 }
2616
2617 /**
2618  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2619  * @node:         struct binder_node for which to get refs
2620  * @proc:         returns @node->proc if valid
2621  * @error:        if no @proc then returns BR_DEAD_REPLY
2622  *
2623  * User-space normally keeps the node alive when creating a transaction
2624  * since it has a reference to the target. The local strong ref keeps it
2625  * alive if the sending process dies before the target process processes
2626  * the transaction. If the source process is malicious or has a reference
2627  * counting bug, relying on the local strong ref can fail.
2628  *
2629  * Since user-space can cause the local strong ref to go away, we also take
2630  * a tmpref on the node to ensure it survives while we are constructing
2631  * the transaction. We also need a tmpref on the proc while we are
2632  * constructing the transaction, so we take that here as well.
2633  *
2634  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2635  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2636  * target proc has died, @error is set to BR_DEAD_REPLY
2637  */
2638 static struct binder_node *binder_get_node_refs_for_txn(
2639                 struct binder_node *node,
2640                 struct binder_proc **procp,
2641                 uint32_t *error)
2642 {
2643         struct binder_node *target_node = NULL;
2644
2645         binder_node_inner_lock(node);
2646         if (node->proc) {
2647                 target_node = node;
2648                 binder_inc_node_nilocked(node, 1, 0, NULL);
2649                 binder_inc_node_tmpref_ilocked(node);
2650                 node->proc->tmp_ref++;
2651                 *procp = node->proc;
2652         } else
2653                 *error = BR_DEAD_REPLY;
2654         binder_node_inner_unlock(node);
2655
2656         return target_node;
2657 }
2658
2659 static void binder_transaction(struct binder_proc *proc,
2660                                struct binder_thread *thread,
2661                                struct binder_transaction_data *tr, int reply,
2662                                binder_size_t extra_buffers_size)
2663 {
2664         int ret;
2665         struct binder_transaction *t;
2666         struct binder_work *tcomplete;
2667         binder_size_t *offp, *off_end, *off_start;
2668         binder_size_t off_min;
2669         u8 *sg_bufp, *sg_buf_end;
2670         struct binder_proc *target_proc = NULL;
2671         struct binder_thread *target_thread = NULL;
2672         struct binder_node *target_node = NULL;
2673         struct binder_transaction *in_reply_to = NULL;
2674         struct binder_transaction_log_entry *e;
2675         uint32_t return_error = 0;
2676         uint32_t return_error_param = 0;
2677         uint32_t return_error_line = 0;
2678         struct binder_buffer_object *last_fixup_obj = NULL;
2679         binder_size_t last_fixup_min_off = 0;
2680         struct binder_context *context = proc->context;
2681         int t_debug_id = atomic_inc_return(&binder_last_id);
2682
2683         e = binder_transaction_log_add(&binder_transaction_log);
2684         e->debug_id = t_debug_id;
2685         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2686         e->from_proc = proc->pid;
2687         e->from_thread = thread->pid;
2688         e->target_handle = tr->target.handle;
2689         e->data_size = tr->data_size;
2690         e->offsets_size = tr->offsets_size;
2691         e->context_name = proc->context->name;
2692
2693         if (reply) {
2694                 binder_inner_proc_lock(proc);
2695                 in_reply_to = thread->transaction_stack;
2696                 if (in_reply_to == NULL) {
2697                         binder_inner_proc_unlock(proc);
2698                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2699                                           proc->pid, thread->pid);
2700                         return_error = BR_FAILED_REPLY;
2701                         return_error_param = -EPROTO;
2702                         return_error_line = __LINE__;
2703                         goto err_empty_call_stack;
2704                 }
2705                 if (in_reply_to->to_thread != thread) {
2706                         spin_lock(&in_reply_to->lock);
2707                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2708                                 proc->pid, thread->pid, in_reply_to->debug_id,
2709                                 in_reply_to->to_proc ?
2710                                 in_reply_to->to_proc->pid : 0,
2711                                 in_reply_to->to_thread ?
2712                                 in_reply_to->to_thread->pid : 0);
2713                         spin_unlock(&in_reply_to->lock);
2714                         binder_inner_proc_unlock(proc);
2715                         return_error = BR_FAILED_REPLY;
2716                         return_error_param = -EPROTO;
2717                         return_error_line = __LINE__;
2718                         in_reply_to = NULL;
2719                         goto err_bad_call_stack;
2720                 }
2721                 thread->transaction_stack = in_reply_to->to_parent;
2722                 binder_inner_proc_unlock(proc);
2723                 binder_set_nice(in_reply_to->saved_priority);
2724                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2725                 if (target_thread == NULL) {
2726                         return_error = BR_DEAD_REPLY;
2727                         return_error_line = __LINE__;
2728                         goto err_dead_binder;
2729                 }
2730                 if (target_thread->transaction_stack != in_reply_to) {
2731                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2732                                 proc->pid, thread->pid,
2733                                 target_thread->transaction_stack ?
2734                                 target_thread->transaction_stack->debug_id : 0,
2735                                 in_reply_to->debug_id);
2736                         binder_inner_proc_unlock(target_thread->proc);
2737                         return_error = BR_FAILED_REPLY;
2738                         return_error_param = -EPROTO;
2739                         return_error_line = __LINE__;
2740                         in_reply_to = NULL;
2741                         target_thread = NULL;
2742                         goto err_dead_binder;
2743                 }
2744                 target_proc = target_thread->proc;
2745                 target_proc->tmp_ref++;
2746                 binder_inner_proc_unlock(target_thread->proc);
2747         } else {
2748                 if (tr->target.handle) {
2749                         struct binder_ref *ref;
2750
2751                         /*
2752                          * There must already be a strong ref
2753                          * on this node. If so, do a strong
2754                          * increment on the node to ensure it
2755                          * stays alive until the transaction is
2756                          * done.
2757                          */
2758                         binder_proc_lock(proc);
2759                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2760                                                      true);
2761                         if (ref) {
2762                                 target_node = binder_get_node_refs_for_txn(
2763                                                 ref->node, &target_proc,
2764                                                 &return_error);
2765                         } else {
2766                                 binder_user_error("%d:%d got transaction to invalid handle\n",
2767                                                   proc->pid, thread->pid);
2768                                 return_error = BR_FAILED_REPLY;
2769                         }
2770                         binder_proc_unlock(proc);
2771                 } else {
2772                         mutex_lock(&context->context_mgr_node_lock);
2773                         target_node = context->binder_context_mgr_node;
2774                         if (target_node)
2775                                 target_node = binder_get_node_refs_for_txn(
2776                                                 target_node, &target_proc,
2777                                                 &return_error);
2778                         else
2779                                 return_error = BR_DEAD_REPLY;
2780                         mutex_unlock(&context->context_mgr_node_lock);
2781                         if (target_node && target_proc->pid == proc->pid) {
2782                                 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2783                                                   proc->pid, thread->pid);
2784                                 return_error = BR_FAILED_REPLY;
2785                                 return_error_param = -EINVAL;
2786                                 return_error_line = __LINE__;
2787                                 goto err_invalid_target_handle;
2788                         }
2789                 }
2790                 if (!target_node) {
2791                         /*
2792                          * return_error is set above
2793                          */
2794                         return_error_param = -EINVAL;
2795                         return_error_line = __LINE__;
2796                         goto err_dead_binder;
2797                 }
2798                 e->to_node = target_node->debug_id;
2799                 if (WARN_ON(proc == target_proc)) {
2800                         return_error = BR_FAILED_REPLY;
2801                         return_error_param = -EINVAL;
2802                         return_error_line = __LINE__;
2803                         goto err_invalid_target_handle;
2804                 }
2805                 if (security_binder_transaction(proc->cred,
2806                                                 target_proc->cred) < 0) {
2807                         return_error = BR_FAILED_REPLY;
2808                         return_error_param = -EPERM;
2809                         return_error_line = __LINE__;
2810                         goto err_invalid_target_handle;
2811                 }
2812                 binder_inner_proc_lock(proc);
2813                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2814                         struct binder_transaction *tmp;
2815
2816                         tmp = thread->transaction_stack;
2817                         if (tmp->to_thread != thread) {
2818                                 spin_lock(&tmp->lock);
2819                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2820                                         proc->pid, thread->pid, tmp->debug_id,
2821                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2822                                         tmp->to_thread ?
2823                                         tmp->to_thread->pid : 0);
2824                                 spin_unlock(&tmp->lock);
2825                                 binder_inner_proc_unlock(proc);
2826                                 return_error = BR_FAILED_REPLY;
2827                                 return_error_param = -EPROTO;
2828                                 return_error_line = __LINE__;
2829                                 goto err_bad_call_stack;
2830                         }
2831                         while (tmp) {
2832                                 struct binder_thread *from;
2833
2834                                 spin_lock(&tmp->lock);
2835                                 from = tmp->from;
2836                                 if (from && from->proc == target_proc) {
2837                                         atomic_inc(&from->tmp_ref);
2838                                         target_thread = from;
2839                                         spin_unlock(&tmp->lock);
2840                                         break;
2841                                 }
2842                                 spin_unlock(&tmp->lock);
2843                                 tmp = tmp->from_parent;
2844                         }
2845                 }
2846                 binder_inner_proc_unlock(proc);
2847         }
2848         if (target_thread)
2849                 e->to_thread = target_thread->pid;
2850         e->to_proc = target_proc->pid;
2851
2852         /* TODO: reuse incoming transaction for reply */
2853         t = kzalloc(sizeof(*t), GFP_KERNEL);
2854         if (t == NULL) {
2855                 return_error = BR_FAILED_REPLY;
2856                 return_error_param = -ENOMEM;
2857                 return_error_line = __LINE__;
2858                 goto err_alloc_t_failed;
2859         }
2860         binder_stats_created(BINDER_STAT_TRANSACTION);
2861         spin_lock_init(&t->lock);
2862
2863         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2864         if (tcomplete == NULL) {
2865                 return_error = BR_FAILED_REPLY;
2866                 return_error_param = -ENOMEM;
2867                 return_error_line = __LINE__;
2868                 goto err_alloc_tcomplete_failed;
2869         }
2870         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2871
2872         t->debug_id = t_debug_id;
2873
2874         if (reply)
2875                 binder_debug(BINDER_DEBUG_TRANSACTION,
2876                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2877                              proc->pid, thread->pid, t->debug_id,
2878                              target_proc->pid, target_thread->pid,
2879                              (u64)tr->data.ptr.buffer,
2880                              (u64)tr->data.ptr.offsets,
2881                              (u64)tr->data_size, (u64)tr->offsets_size,
2882                              (u64)extra_buffers_size);
2883         else
2884                 binder_debug(BINDER_DEBUG_TRANSACTION,
2885                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2886                              proc->pid, thread->pid, t->debug_id,
2887                              target_proc->pid, target_node->debug_id,
2888                              (u64)tr->data.ptr.buffer,
2889                              (u64)tr->data.ptr.offsets,
2890                              (u64)tr->data_size, (u64)tr->offsets_size,
2891                              (u64)extra_buffers_size);
2892
2893         if (!reply && !(tr->flags & TF_ONE_WAY))
2894                 t->from = thread;
2895         else
2896                 t->from = NULL;
2897         t->sender_euid = task_euid(proc->tsk);
2898         t->to_proc = target_proc;
2899         t->to_thread = target_thread;
2900         t->code = tr->code;
2901         t->flags = tr->flags;
2902         t->priority = task_nice(current);
2903
2904         trace_binder_transaction(reply, t, target_node);
2905
2906         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2907                 tr->offsets_size, extra_buffers_size,
2908                 !reply && (t->flags & TF_ONE_WAY));
2909         if (IS_ERR(t->buffer)) {
2910                 /*
2911                  * -ESRCH indicates VMA cleared. The target is dying.
2912                  */
2913                 return_error_param = PTR_ERR(t->buffer);
2914                 return_error = return_error_param == -ESRCH ?
2915                         BR_DEAD_REPLY : BR_FAILED_REPLY;
2916                 return_error_line = __LINE__;
2917                 t->buffer = NULL;
2918                 goto err_binder_alloc_buf_failed;
2919         }
2920         t->buffer->debug_id = t->debug_id;
2921         t->buffer->transaction = t;
2922         t->buffer->target_node = target_node;
2923         trace_binder_transaction_alloc_buf(t->buffer);
2924         off_start = (binder_size_t *)(t->buffer->data +
2925                                       ALIGN(tr->data_size, sizeof(void *)));
2926         offp = off_start;
2927
2928         if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2929                            tr->data.ptr.buffer, tr->data_size)) {
2930                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2931                                 proc->pid, thread->pid);
2932                 return_error = BR_FAILED_REPLY;
2933                 return_error_param = -EFAULT;
2934                 return_error_line = __LINE__;
2935                 goto err_copy_data_failed;
2936         }
2937         if (copy_from_user(offp, (const void __user *)(uintptr_t)
2938                            tr->data.ptr.offsets, tr->offsets_size)) {
2939                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2940                                 proc->pid, thread->pid);
2941                 return_error = BR_FAILED_REPLY;
2942                 return_error_param = -EFAULT;
2943                 return_error_line = __LINE__;
2944                 goto err_copy_data_failed;
2945         }
2946         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2947                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2948                                 proc->pid, thread->pid, (u64)tr->offsets_size);
2949                 return_error = BR_FAILED_REPLY;
2950                 return_error_param = -EINVAL;
2951                 return_error_line = __LINE__;
2952                 goto err_bad_offset;
2953         }
2954         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2955                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2956                                   proc->pid, thread->pid,
2957                                   (u64)extra_buffers_size);
2958                 return_error = BR_FAILED_REPLY;
2959                 return_error_param = -EINVAL;
2960                 return_error_line = __LINE__;
2961                 goto err_bad_offset;
2962         }
2963         off_end = (void *)off_start + tr->offsets_size;
2964         sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2965         sg_buf_end = sg_bufp + extra_buffers_size;
2966         off_min = 0;
2967         for (; offp < off_end; offp++) {
2968                 struct binder_object_header *hdr;
2969                 size_t object_size = binder_validate_object(t->buffer, *offp);
2970
2971                 if (object_size == 0 || *offp < off_min) {
2972                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2973                                           proc->pid, thread->pid, (u64)*offp,
2974                                           (u64)off_min,
2975                                           (u64)t->buffer->data_size);
2976                         return_error = BR_FAILED_REPLY;
2977                         return_error_param = -EINVAL;
2978                         return_error_line = __LINE__;
2979                         goto err_bad_offset;
2980                 }
2981
2982                 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2983                 off_min = *offp + object_size;
2984                 switch (hdr->type) {
2985                 case BINDER_TYPE_BINDER:
2986                 case BINDER_TYPE_WEAK_BINDER: {
2987                         struct flat_binder_object *fp;
2988
2989                         fp = to_flat_binder_object(hdr);
2990                         ret = binder_translate_binder(fp, t, thread);
2991                         if (ret < 0) {
2992                                 return_error = BR_FAILED_REPLY;
2993                                 return_error_param = ret;
2994                                 return_error_line = __LINE__;
2995                                 goto err_translate_failed;
2996                         }
2997                 } break;
2998                 case BINDER_TYPE_HANDLE:
2999                 case BINDER_TYPE_WEAK_HANDLE: {
3000                         struct flat_binder_object *fp;
3001
3002                         fp = to_flat_binder_object(hdr);
3003                         ret = binder_translate_handle(fp, t, thread);
3004                         if (ret < 0) {
3005                                 return_error = BR_FAILED_REPLY;
3006                                 return_error_param = ret;
3007                                 return_error_line = __LINE__;
3008                                 goto err_translate_failed;
3009                         }
3010                 } break;
3011
3012                 case BINDER_TYPE_FD: {
3013                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
3014                         int target_fd = binder_translate_fd(fp->fd, t, thread,
3015                                                             in_reply_to);
3016
3017                         if (target_fd < 0) {
3018                                 return_error = BR_FAILED_REPLY;
3019                                 return_error_param = target_fd;
3020                                 return_error_line = __LINE__;
3021                                 goto err_translate_failed;
3022                         }
3023                         fp->pad_binder = 0;
3024                         fp->fd = target_fd;
3025                 } break;
3026                 case BINDER_TYPE_FDA: {
3027                         struct binder_fd_array_object *fda =
3028                                 to_binder_fd_array_object(hdr);
3029                         struct binder_buffer_object *parent =
3030                                 binder_validate_ptr(t->buffer, fda->parent,
3031                                                     off_start,
3032                                                     offp - off_start);
3033                         if (!parent) {
3034                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3035                                                   proc->pid, thread->pid);
3036                                 return_error = BR_FAILED_REPLY;
3037                                 return_error_param = -EINVAL;
3038                                 return_error_line = __LINE__;
3039                                 goto err_bad_parent;
3040                         }
3041                         if (!binder_validate_fixup(t->buffer, off_start,
3042                                                    parent, fda->parent_offset,
3043                                                    last_fixup_obj,
3044                                                    last_fixup_min_off)) {
3045                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3046                                                   proc->pid, thread->pid);
3047                                 return_error = BR_FAILED_REPLY;
3048                                 return_error_param = -EINVAL;
3049                                 return_error_line = __LINE__;
3050                                 goto err_bad_parent;
3051                         }
3052                         ret = binder_translate_fd_array(fda, parent, t, thread,
3053                                                         in_reply_to);
3054                         if (ret < 0) {
3055                                 return_error = BR_FAILED_REPLY;
3056                                 return_error_param = ret;
3057                                 return_error_line = __LINE__;
3058                                 goto err_translate_failed;
3059                         }
3060                         last_fixup_obj = parent;
3061                         last_fixup_min_off =
3062                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3063                 } break;
3064                 case BINDER_TYPE_PTR: {
3065                         struct binder_buffer_object *bp =
3066                                 to_binder_buffer_object(hdr);
3067                         size_t buf_left = sg_buf_end - sg_bufp;
3068
3069                         if (bp->length > buf_left) {
3070                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3071                                                   proc->pid, thread->pid);
3072                                 return_error = BR_FAILED_REPLY;
3073                                 return_error_param = -EINVAL;
3074                                 return_error_line = __LINE__;
3075                                 goto err_bad_offset;
3076                         }
3077                         if (copy_from_user(sg_bufp,
3078                                            (const void __user *)(uintptr_t)
3079                                            bp->buffer, bp->length)) {
3080                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3081                                                   proc->pid, thread->pid);
3082                                 return_error_param = -EFAULT;
3083                                 return_error = BR_FAILED_REPLY;
3084                                 return_error_line = __LINE__;
3085                                 goto err_copy_data_failed;
3086                         }
3087                         /* Fixup buffer pointer to target proc address space */
3088                         bp->buffer = (uintptr_t)sg_bufp +
3089                                 binder_alloc_get_user_buffer_offset(
3090                                                 &target_proc->alloc);
3091                         sg_bufp += ALIGN(bp->length, sizeof(u64));
3092
3093                         ret = binder_fixup_parent(t, thread, bp, off_start,
3094                                                   offp - off_start,
3095                                                   last_fixup_obj,
3096                                                   last_fixup_min_off);
3097                         if (ret < 0) {
3098                                 return_error = BR_FAILED_REPLY;
3099                                 return_error_param = ret;
3100                                 return_error_line = __LINE__;
3101                                 goto err_translate_failed;
3102                         }
3103                         last_fixup_obj = bp;
3104                         last_fixup_min_off = 0;
3105                 } break;
3106                 default:
3107                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3108                                 proc->pid, thread->pid, hdr->type);
3109                         return_error = BR_FAILED_REPLY;
3110                         return_error_param = -EINVAL;
3111                         return_error_line = __LINE__;
3112                         goto err_bad_object_type;
3113                 }
3114         }
3115         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3116         binder_enqueue_work(proc, tcomplete, &thread->todo);
3117         t->work.type = BINDER_WORK_TRANSACTION;
3118
3119         if (reply) {
3120                 binder_inner_proc_lock(target_proc);
3121                 if (target_thread->is_dead) {
3122                         binder_inner_proc_unlock(target_proc);
3123                         goto err_dead_proc_or_thread;
3124                 }
3125                 BUG_ON(t->buffer->async_transaction != 0);
3126                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3127                 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
3128                 binder_inner_proc_unlock(target_proc);
3129                 wake_up_interruptible_sync(&target_thread->wait);
3130                 binder_free_transaction(in_reply_to);
3131         } else if (!(t->flags & TF_ONE_WAY)) {
3132                 BUG_ON(t->buffer->async_transaction != 0);
3133                 binder_inner_proc_lock(proc);
3134                 t->need_reply = 1;
3135                 t->from_parent = thread->transaction_stack;
3136                 thread->transaction_stack = t;
3137                 binder_inner_proc_unlock(proc);
3138                 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3139                         binder_inner_proc_lock(proc);
3140                         binder_pop_transaction_ilocked(thread, t);
3141                         binder_inner_proc_unlock(proc);
3142                         goto err_dead_proc_or_thread;
3143                 }
3144         } else {
3145                 BUG_ON(target_node == NULL);
3146                 BUG_ON(t->buffer->async_transaction != 1);
3147                 if (!binder_proc_transaction(t, target_proc, NULL))
3148                         goto err_dead_proc_or_thread;
3149         }
3150         if (target_thread)
3151                 binder_thread_dec_tmpref(target_thread);
3152         binder_proc_dec_tmpref(target_proc);
3153         if (target_node)
3154                 binder_dec_node_tmpref(target_node);
3155         /*
3156          * write barrier to synchronize with initialization
3157          * of log entry
3158          */
3159         smp_wmb();
3160         WRITE_ONCE(e->debug_id_done, t_debug_id);
3161         return;
3162
3163 err_dead_proc_or_thread:
3164         return_error = BR_DEAD_REPLY;
3165         return_error_line = __LINE__;
3166         binder_dequeue_work(proc, tcomplete);
3167 err_translate_failed:
3168 err_bad_object_type:
3169 err_bad_offset:
3170 err_bad_parent:
3171 err_copy_data_failed:
3172         trace_binder_transaction_failed_buffer_release(t->buffer);
3173         binder_transaction_buffer_release(target_proc, t->buffer, offp);
3174         if (target_node)
3175                 binder_dec_node_tmpref(target_node);
3176         target_node = NULL;
3177         t->buffer->transaction = NULL;
3178         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3179 err_binder_alloc_buf_failed:
3180         kfree(tcomplete);
3181         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3182 err_alloc_tcomplete_failed:
3183         kfree(t);
3184         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3185 err_alloc_t_failed:
3186 err_bad_call_stack:
3187 err_empty_call_stack:
3188 err_dead_binder:
3189 err_invalid_target_handle:
3190         if (target_thread)
3191                 binder_thread_dec_tmpref(target_thread);
3192         if (target_proc)
3193                 binder_proc_dec_tmpref(target_proc);
3194         if (target_node) {
3195                 binder_dec_node(target_node, 1, 0);
3196                 binder_dec_node_tmpref(target_node);
3197         }
3198
3199         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3200                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3201                      proc->pid, thread->pid, return_error, return_error_param,
3202                      (u64)tr->data_size, (u64)tr->offsets_size,
3203                      return_error_line);
3204
3205         {
3206                 struct binder_transaction_log_entry *fe;
3207
3208                 e->return_error = return_error;
3209                 e->return_error_param = return_error_param;
3210                 e->return_error_line = return_error_line;
3211                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3212                 *fe = *e;
3213                 /*
3214                  * write barrier to synchronize with initialization
3215                  * of log entry
3216                  */
3217                 smp_wmb();
3218                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3219                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3220         }
3221
3222         BUG_ON(thread->return_error.cmd != BR_OK);
3223         if (in_reply_to) {
3224                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3225                 binder_enqueue_work(thread->proc,
3226                                     &thread->return_error.work,
3227                                     &thread->todo);
3228                 binder_send_failed_reply(in_reply_to, return_error);
3229         } else {
3230                 thread->return_error.cmd = return_error;
3231                 binder_enqueue_work(thread->proc,
3232                                     &thread->return_error.work,
3233                                     &thread->todo);
3234         }
3235 }
3236
3237 static int binder_thread_write(struct binder_proc *proc,
3238                         struct binder_thread *thread,
3239                         binder_uintptr_t binder_buffer, size_t size,
3240                         binder_size_t *consumed)
3241 {
3242         uint32_t cmd;
3243         struct binder_context *context = proc->context;
3244         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3245         void __user *ptr = buffer + *consumed;
3246         void __user *end = buffer + size;
3247
3248         while (ptr < end && thread->return_error.cmd == BR_OK) {
3249                 int ret;
3250
3251                 if (get_user(cmd, (uint32_t __user *)ptr))
3252                         return -EFAULT;
3253                 ptr += sizeof(uint32_t);
3254                 trace_binder_command(cmd);
3255                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3256                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3257                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3258                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3259                 }
3260                 switch (cmd) {
3261                 case BC_INCREFS:
3262                 case BC_ACQUIRE:
3263                 case BC_RELEASE:
3264                 case BC_DECREFS: {
3265                         uint32_t target;
3266                         const char *debug_string;
3267                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3268                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3269                         struct binder_ref_data rdata;
3270
3271                         if (get_user(target, (uint32_t __user *)ptr))
3272                                 return -EFAULT;
3273
3274                         ptr += sizeof(uint32_t);
3275                         ret = -1;
3276                         if (increment && !target) {
3277                                 struct binder_node *ctx_mgr_node;
3278                                 mutex_lock(&context->context_mgr_node_lock);
3279                                 ctx_mgr_node = context->binder_context_mgr_node;
3280                                 if (ctx_mgr_node) {
3281                                         if (ctx_mgr_node->proc == proc) {
3282                                                 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3283                                                                   proc->pid, thread->pid);
3284                                                 mutex_unlock(&context->context_mgr_node_lock);
3285                                                 return -EINVAL;
3286                                         }
3287                                         ret = binder_inc_ref_for_node(
3288                                                         proc, ctx_mgr_node,
3289                                                         strong, NULL, &rdata);
3290                                 }
3291                                 mutex_unlock(&context->context_mgr_node_lock);
3292                         }
3293                         if (ret)
3294                                 ret = binder_update_ref_for_handle(
3295                                                 proc, target, increment, strong,
3296                                                 &rdata);
3297                         if (!ret && rdata.desc != target) {
3298                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3299                                         proc->pid, thread->pid,
3300                                         target, rdata.desc);
3301                         }
3302                         switch (cmd) {
3303                         case BC_INCREFS:
3304                                 debug_string = "IncRefs";
3305                                 break;
3306                         case BC_ACQUIRE:
3307                                 debug_string = "Acquire";
3308                                 break;
3309                         case BC_RELEASE:
3310                                 debug_string = "Release";
3311                                 break;
3312                         case BC_DECREFS:
3313                         default:
3314                                 debug_string = "DecRefs";
3315                                 break;
3316                         }
3317                         if (ret) {
3318                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3319                                         proc->pid, thread->pid, debug_string,
3320                                         strong, target, ret);
3321                                 break;
3322                         }
3323                         binder_debug(BINDER_DEBUG_USER_REFS,
3324                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3325                                      proc->pid, thread->pid, debug_string,
3326                                      rdata.debug_id, rdata.desc, rdata.strong,
3327                                      rdata.weak);
3328                         break;
3329                 }
3330                 case BC_INCREFS_DONE:
3331                 case BC_ACQUIRE_DONE: {
3332                         binder_uintptr_t node_ptr;
3333                         binder_uintptr_t cookie;
3334                         struct binder_node *node;
3335                         bool free_node;
3336
3337                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3338                                 return -EFAULT;
3339                         ptr += sizeof(binder_uintptr_t);
3340                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3341                                 return -EFAULT;
3342                         ptr += sizeof(binder_uintptr_t);
3343                         node = binder_get_node(proc, node_ptr);
3344                         if (node == NULL) {
3345                                 binder_user_error("%d:%d %s u%016llx no match\n",
3346                                         proc->pid, thread->pid,
3347                                         cmd == BC_INCREFS_DONE ?
3348                                         "BC_INCREFS_DONE" :
3349                                         "BC_ACQUIRE_DONE",
3350                                         (u64)node_ptr);
3351                                 break;
3352                         }
3353                         if (cookie != node->cookie) {
3354                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3355                                         proc->pid, thread->pid,
3356                                         cmd == BC_INCREFS_DONE ?
3357                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3358                                         (u64)node_ptr, node->debug_id,
3359                                         (u64)cookie, (u64)node->cookie);
3360                                 binder_put_node(node);
3361                                 break;
3362                         }
3363                         binder_node_inner_lock(node);
3364                         if (cmd == BC_ACQUIRE_DONE) {
3365                                 if (node->pending_strong_ref == 0) {
3366                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3367                                                 proc->pid, thread->pid,
3368                                                 node->debug_id);
3369                                         binder_node_inner_unlock(node);
3370                                         binder_put_node(node);
3371                                         break;
3372                                 }
3373                                 node->pending_strong_ref = 0;
3374                         } else {
3375                                 if (node->pending_weak_ref == 0) {
3376                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3377                                                 proc->pid, thread->pid,
3378                                                 node->debug_id);
3379                                         binder_node_inner_unlock(node);
3380                                         binder_put_node(node);
3381                                         break;
3382                                 }
3383                                 node->pending_weak_ref = 0;
3384                         }
3385                         free_node = binder_dec_node_nilocked(node,
3386                                         cmd == BC_ACQUIRE_DONE, 0);
3387                         WARN_ON(free_node);
3388                         binder_debug(BINDER_DEBUG_USER_REFS,
3389                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3390                                      proc->pid, thread->pid,
3391                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3392                                      node->debug_id, node->local_strong_refs,
3393                                      node->local_weak_refs, node->tmp_refs);
3394                         binder_node_inner_unlock(node);
3395                         binder_put_node(node);
3396                         break;
3397                 }
3398                 case BC_ATTEMPT_ACQUIRE:
3399                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3400                         return -EINVAL;
3401                 case BC_ACQUIRE_RESULT:
3402                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3403                         return -EINVAL;
3404
3405                 case BC_FREE_BUFFER: {
3406                         binder_uintptr_t data_ptr;
3407                         struct binder_buffer *buffer;
3408
3409                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3410                                 return -EFAULT;
3411                         ptr += sizeof(binder_uintptr_t);
3412
3413                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3414                                                               data_ptr);
3415                         if (IS_ERR_OR_NULL(buffer)) {
3416                                 if (PTR_ERR(buffer) == -EPERM) {
3417                                         binder_user_error(
3418                                                 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3419                                                 proc->pid, thread->pid,
3420                                                 (u64)data_ptr);
3421                                 } else {
3422                                         binder_user_error(
3423                                                 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3424                                                 proc->pid, thread->pid,
3425                                                 (u64)data_ptr);
3426                                 }
3427                                 break;
3428                         }
3429                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3430                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3431                                      proc->pid, thread->pid, (u64)data_ptr,
3432                                      buffer->debug_id,
3433                                      buffer->transaction ? "active" : "finished");
3434
3435                         binder_inner_proc_lock(proc);
3436                         if (buffer->transaction) {
3437                                 buffer->transaction->buffer = NULL;
3438                                 buffer->transaction = NULL;
3439                         }
3440                         binder_inner_proc_unlock(proc);
3441                         if (buffer->async_transaction && buffer->target_node) {
3442                                 struct binder_node *buf_node;
3443                                 struct binder_work *w;
3444
3445                                 buf_node = buffer->target_node;
3446                                 binder_node_inner_lock(buf_node);
3447                                 BUG_ON(!buf_node->has_async_transaction);
3448                                 BUG_ON(buf_node->proc != proc);
3449                                 w = binder_dequeue_work_head_ilocked(
3450                                                 &buf_node->async_todo);
3451                                 if (!w) {
3452                                         buf_node->has_async_transaction = 0;
3453                                 } else {
3454                                         binder_enqueue_work_ilocked(
3455                                                         w, &proc->todo);
3456                                         binder_wakeup_proc_ilocked(proc);
3457                                 }
3458                                 binder_node_inner_unlock(buf_node);
3459                         }
3460                         trace_binder_transaction_buffer_release(buffer);
3461                         binder_transaction_buffer_release(proc, buffer, NULL);
3462                         binder_alloc_free_buf(&proc->alloc, buffer);
3463                         break;
3464                 }
3465
3466                 case BC_TRANSACTION_SG:
3467                 case BC_REPLY_SG: {
3468                         struct binder_transaction_data_sg tr;
3469
3470                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3471                                 return -EFAULT;
3472                         ptr += sizeof(tr);
3473                         binder_transaction(proc, thread, &tr.transaction_data,
3474                                            cmd == BC_REPLY_SG, tr.buffers_size);
3475                         break;
3476                 }
3477                 case BC_TRANSACTION:
3478                 case BC_REPLY: {
3479                         struct binder_transaction_data tr;
3480
3481                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3482                                 return -EFAULT;
3483                         ptr += sizeof(tr);
3484                         binder_transaction(proc, thread, &tr,
3485                                            cmd == BC_REPLY, 0);
3486                         break;
3487                 }
3488
3489                 case BC_REGISTER_LOOPER:
3490                         binder_debug(BINDER_DEBUG_THREADS,
3491                                      "%d:%d BC_REGISTER_LOOPER\n",
3492                                      proc->pid, thread->pid);
3493                         binder_inner_proc_lock(proc);
3494                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3495                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3496                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3497                                         proc->pid, thread->pid);
3498                         } else if (proc->requested_threads == 0) {
3499                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3500                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3501                                         proc->pid, thread->pid);
3502                         } else {
3503                                 proc->requested_threads--;
3504                                 proc->requested_threads_started++;
3505                         }
3506                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3507                         binder_inner_proc_unlock(proc);
3508                         break;
3509                 case BC_ENTER_LOOPER:
3510                         binder_debug(BINDER_DEBUG_THREADS,
3511                                      "%d:%d BC_ENTER_LOOPER\n",
3512                                      proc->pid, thread->pid);
3513                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3514                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3515                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3516                                         proc->pid, thread->pid);
3517                         }
3518                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3519                         break;
3520                 case BC_EXIT_LOOPER:
3521                         binder_debug(BINDER_DEBUG_THREADS,
3522                                      "%d:%d BC_EXIT_LOOPER\n",
3523                                      proc->pid, thread->pid);
3524                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3525                         break;
3526
3527                 case BC_REQUEST_DEATH_NOTIFICATION:
3528                 case BC_CLEAR_DEATH_NOTIFICATION: {
3529                         uint32_t target;
3530                         binder_uintptr_t cookie;
3531                         struct binder_ref *ref;
3532                         struct binder_ref_death *death = NULL;
3533
3534                         if (get_user(target, (uint32_t __user *)ptr))
3535                                 return -EFAULT;
3536                         ptr += sizeof(uint32_t);
3537                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3538                                 return -EFAULT;
3539                         ptr += sizeof(binder_uintptr_t);
3540                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3541                                 /*
3542                                  * Allocate memory for death notification
3543                                  * before taking lock
3544                                  */
3545                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3546                                 if (death == NULL) {
3547                                         WARN_ON(thread->return_error.cmd !=
3548                                                 BR_OK);
3549                                         thread->return_error.cmd = BR_ERROR;
3550                                         binder_enqueue_work(
3551                                                 thread->proc,
3552                                                 &thread->return_error.work,
3553                                                 &thread->todo);
3554                                         binder_debug(
3555                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3556                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3557                                                 proc->pid, thread->pid);
3558                                         break;
3559                                 }
3560                         }
3561                         binder_proc_lock(proc);
3562                         ref = binder_get_ref_olocked(proc, target, false);
3563                         if (ref == NULL) {
3564                                 binder_user_error("%d:%d %s invalid ref %d\n",
3565                                         proc->pid, thread->pid,
3566                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3567                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3568                                         "BC_CLEAR_DEATH_NOTIFICATION",
3569                                         target);
3570                                 binder_proc_unlock(proc);
3571                                 kfree(death);
3572                                 break;
3573                         }
3574
3575                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3576                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3577                                      proc->pid, thread->pid,
3578                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3579                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3580                                      "BC_CLEAR_DEATH_NOTIFICATION",
3581                                      (u64)cookie, ref->data.debug_id,
3582                                      ref->data.desc, ref->data.strong,
3583                                      ref->data.weak, ref->node->debug_id);
3584
3585                         binder_node_lock(ref->node);
3586                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3587                                 if (ref->death) {
3588                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3589                                                 proc->pid, thread->pid);
3590                                         binder_node_unlock(ref->node);
3591                                         binder_proc_unlock(proc);
3592                                         kfree(death);
3593                                         break;
3594                                 }
3595                                 binder_stats_created(BINDER_STAT_DEATH);
3596                                 INIT_LIST_HEAD(&death->work.entry);
3597                                 death->cookie = cookie;
3598                                 ref->death = death;
3599                                 if (ref->node->proc == NULL) {
3600                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3601
3602                                         binder_inner_proc_lock(proc);
3603                                         binder_enqueue_work_ilocked(
3604                                                 &ref->death->work, &proc->todo);
3605                                         binder_wakeup_proc_ilocked(proc);
3606                                         binder_inner_proc_unlock(proc);
3607                                 }
3608                         } else {
3609                                 if (ref->death == NULL) {
3610                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3611                                                 proc->pid, thread->pid);
3612                                         binder_node_unlock(ref->node);
3613                                         binder_proc_unlock(proc);
3614                                         break;
3615                                 }
3616                                 death = ref->death;
3617                                 if (death->cookie != cookie) {
3618                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3619                                                 proc->pid, thread->pid,
3620                                                 (u64)death->cookie,
3621                                                 (u64)cookie);
3622                                         binder_node_unlock(ref->node);
3623                                         binder_proc_unlock(proc);
3624                                         break;
3625                                 }
3626                                 ref->death = NULL;
3627                                 binder_inner_proc_lock(proc);
3628                                 if (list_empty(&death->work.entry)) {
3629                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3630                                         if (thread->looper &
3631                                             (BINDER_LOOPER_STATE_REGISTERED |
3632                                              BINDER_LOOPER_STATE_ENTERED))
3633                                                 binder_enqueue_work_ilocked(
3634                                                                 &death->work,
3635                                                                 &thread->todo);
3636                                         else {
3637                                                 binder_enqueue_work_ilocked(
3638                                                                 &death->work,
3639                                                                 &proc->todo);
3640                                                 binder_wakeup_proc_ilocked(
3641                                                                 proc);
3642                                         }
3643                                 } else {
3644                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3645                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3646                                 }
3647                                 binder_inner_proc_unlock(proc);
3648                         }
3649                         binder_node_unlock(ref->node);
3650                         binder_proc_unlock(proc);
3651                 } break;
3652                 case BC_DEAD_BINDER_DONE: {
3653                         struct binder_work *w;
3654                         binder_uintptr_t cookie;
3655                         struct binder_ref_death *death = NULL;
3656
3657                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3658                                 return -EFAULT;
3659
3660                         ptr += sizeof(cookie);
3661                         binder_inner_proc_lock(proc);
3662                         list_for_each_entry(w, &proc->delivered_death,
3663                                             entry) {
3664                                 struct binder_ref_death *tmp_death =
3665                                         container_of(w,
3666                                                      struct binder_ref_death,
3667                                                      work);
3668
3669                                 if (tmp_death->cookie == cookie) {
3670                                         death = tmp_death;
3671                                         break;
3672                                 }
3673                         }
3674                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3675                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3676                                      proc->pid, thread->pid, (u64)cookie,
3677                                      death);
3678                         if (death == NULL) {
3679                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3680                                         proc->pid, thread->pid, (u64)cookie);
3681                                 binder_inner_proc_unlock(proc);
3682                                 break;
3683                         }
3684                         binder_dequeue_work_ilocked(&death->work);
3685                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3686                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3687                                 if (thread->looper &
3688                                         (BINDER_LOOPER_STATE_REGISTERED |
3689                                          BINDER_LOOPER_STATE_ENTERED))
3690                                         binder_enqueue_work_ilocked(
3691                                                 &death->work, &thread->todo);
3692                                 else {
3693                                         binder_enqueue_work_ilocked(
3694                                                         &death->work,
3695                                                         &proc->todo);
3696                                         binder_wakeup_proc_ilocked(proc);
3697                                 }
3698                         }
3699                         binder_inner_proc_unlock(proc);
3700                 } break;
3701
3702                 default:
3703                         pr_err("%d:%d unknown command %d\n",
3704                                proc->pid, thread->pid, cmd);
3705                         return -EINVAL;
3706                 }
3707                 *consumed = ptr - buffer;
3708         }
3709         return 0;
3710 }
3711
3712 static void binder_stat_br(struct binder_proc *proc,
3713                            struct binder_thread *thread, uint32_t cmd)
3714 {
3715         trace_binder_return(cmd);
3716         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3717                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3718                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3719                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3720         }
3721 }
3722
3723 static int binder_put_node_cmd(struct binder_proc *proc,
3724                                struct binder_thread *thread,
3725                                void __user **ptrp,
3726                                binder_uintptr_t node_ptr,
3727                                binder_uintptr_t node_cookie,
3728                                int node_debug_id,
3729                                uint32_t cmd, const char *cmd_name)
3730 {
3731         void __user *ptr = *ptrp;
3732
3733         if (put_user(cmd, (uint32_t __user *)ptr))
3734                 return -EFAULT;
3735         ptr += sizeof(uint32_t);
3736
3737         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3738                 return -EFAULT;
3739         ptr += sizeof(binder_uintptr_t);
3740
3741         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3742                 return -EFAULT;
3743         ptr += sizeof(binder_uintptr_t);
3744
3745         binder_stat_br(proc, thread, cmd);
3746         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3747                      proc->pid, thread->pid, cmd_name, node_debug_id,
3748                      (u64)node_ptr, (u64)node_cookie);
3749
3750         *ptrp = ptr;
3751         return 0;
3752 }
3753
3754 static int binder_wait_for_work(struct binder_thread *thread,
3755                                 bool do_proc_work)
3756 {
3757         DEFINE_WAIT(wait);
3758         struct binder_proc *proc = thread->proc;
3759         int ret = 0;
3760
3761         freezer_do_not_count();
3762         binder_inner_proc_lock(proc);
3763         for (;;) {
3764                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3765                 if (binder_has_work_ilocked(thread, do_proc_work))
3766                         break;
3767                 if (do_proc_work)
3768                         list_add(&thread->waiting_thread_node,
3769                                  &proc->waiting_threads);
3770                 binder_inner_proc_unlock(proc);
3771                 schedule();
3772                 binder_inner_proc_lock(proc);
3773                 list_del_init(&thread->waiting_thread_node);
3774                 if (signal_pending(current)) {
3775                         ret = -ERESTARTSYS;
3776                         break;
3777                 }
3778         }
3779         finish_wait(&thread->wait, &wait);
3780         binder_inner_proc_unlock(proc);
3781         freezer_count();
3782
3783         return ret;
3784 }
3785
3786 static int binder_thread_read(struct binder_proc *proc,
3787                               struct binder_thread *thread,
3788                               binder_uintptr_t binder_buffer, size_t size,
3789                               binder_size_t *consumed, int non_block)
3790 {
3791         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3792         void __user *ptr = buffer + *consumed;
3793         void __user *end = buffer + size;
3794
3795         int ret = 0;
3796         int wait_for_proc_work;
3797
3798         if (*consumed == 0) {
3799                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3800                         return -EFAULT;
3801                 ptr += sizeof(uint32_t);
3802         }
3803
3804 retry:
3805         binder_inner_proc_lock(proc);
3806         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3807         binder_inner_proc_unlock(proc);
3808
3809         thread->looper |= BINDER_LOOPER_STATE_WAITING;
3810
3811         trace_binder_wait_for_work(wait_for_proc_work,
3812                                    !!thread->transaction_stack,
3813                                    !binder_worklist_empty(proc, &thread->todo));
3814         if (wait_for_proc_work) {
3815                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3816                                         BINDER_LOOPER_STATE_ENTERED))) {
3817                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3818                                 proc->pid, thread->pid, thread->looper);
3819                         wait_event_interruptible(binder_user_error_wait,
3820                                                  binder_stop_on_user_error < 2);
3821                 }
3822                 binder_set_nice(proc->default_priority);
3823         }
3824
3825         if (non_block) {
3826                 if (!binder_has_work(thread, wait_for_proc_work))
3827                         ret = -EAGAIN;
3828         } else {
3829                 ret = binder_wait_for_work(thread, wait_for_proc_work);
3830         }
3831
3832         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3833
3834         if (ret)
3835                 return ret;
3836
3837         while (1) {
3838                 uint32_t cmd;
3839                 struct binder_transaction_data tr;
3840                 struct binder_work *w = NULL;
3841                 struct list_head *list = NULL;
3842                 struct binder_transaction *t = NULL;
3843                 struct binder_thread *t_from;
3844
3845                 binder_inner_proc_lock(proc);
3846                 if (!binder_worklist_empty_ilocked(&thread->todo))
3847                         list = &thread->todo;
3848                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3849                            wait_for_proc_work)
3850                         list = &proc->todo;
3851                 else {
3852                         binder_inner_proc_unlock(proc);
3853
3854                         /* no data added */
3855                         if (ptr - buffer == 4 && !thread->looper_need_return)
3856                                 goto retry;
3857                         break;
3858                 }
3859
3860                 if (end - ptr < sizeof(tr) + 4) {
3861                         binder_inner_proc_unlock(proc);
3862                         break;
3863                 }
3864                 w = binder_dequeue_work_head_ilocked(list);
3865
3866                 switch (w->type) {
3867                 case BINDER_WORK_TRANSACTION: {
3868                         binder_inner_proc_unlock(proc);
3869                         t = container_of(w, struct binder_transaction, work);
3870                 } break;
3871                 case BINDER_WORK_RETURN_ERROR: {
3872                         struct binder_error *e = container_of(
3873                                         w, struct binder_error, work);
3874
3875                         WARN_ON(e->cmd == BR_OK);
3876                         binder_inner_proc_unlock(proc);
3877                         if (put_user(e->cmd, (uint32_t __user *)ptr))
3878                                 return -EFAULT;
3879                         e->cmd = BR_OK;
3880                         ptr += sizeof(uint32_t);
3881
3882                         binder_stat_br(proc, thread, e->cmd);
3883                 } break;
3884                 case BINDER_WORK_TRANSACTION_COMPLETE: {
3885                         binder_inner_proc_unlock(proc);
3886                         cmd = BR_TRANSACTION_COMPLETE;
3887                         kfree(w);
3888                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3889                         if (put_user(cmd, (uint32_t __user *)ptr))
3890                                 return -EFAULT;
3891                         ptr += sizeof(uint32_t);
3892
3893                         binder_stat_br(proc, thread, cmd);
3894                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3895                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
3896                                      proc->pid, thread->pid);
3897                 } break;
3898                 case BINDER_WORK_NODE: {
3899                         struct binder_node *node = container_of(w, struct binder_node, work);
3900                         int strong, weak;
3901                         binder_uintptr_t node_ptr = node->ptr;
3902                         binder_uintptr_t node_cookie = node->cookie;
3903                         int node_debug_id = node->debug_id;
3904                         int has_weak_ref;
3905                         int has_strong_ref;
3906                         void __user *orig_ptr = ptr;
3907
3908                         BUG_ON(proc != node->proc);
3909                         strong = node->internal_strong_refs ||
3910                                         node->local_strong_refs;
3911                         weak = !hlist_empty(&node->refs) ||
3912                                         node->local_weak_refs ||
3913                                         node->tmp_refs || strong;
3914                         has_strong_ref = node->has_strong_ref;
3915                         has_weak_ref = node->has_weak_ref;
3916
3917                         if (weak && !has_weak_ref) {
3918                                 node->has_weak_ref = 1;
3919                                 node->pending_weak_ref = 1;
3920                                 node->local_weak_refs++;
3921                         }
3922                         if (strong && !has_strong_ref) {
3923                                 node->has_strong_ref = 1;
3924                                 node->pending_strong_ref = 1;
3925                                 node->local_strong_refs++;
3926                         }
3927                         if (!strong && has_strong_ref)
3928                                 node->has_strong_ref = 0;
3929                         if (!weak && has_weak_ref)
3930                                 node->has_weak_ref = 0;
3931                         if (!weak && !strong) {
3932                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3933                                              "%d:%d node %d u%016llx c%016llx deleted\n",
3934                                              proc->pid, thread->pid,
3935                                              node_debug_id,
3936                                              (u64)node_ptr,
3937                                              (u64)node_cookie);
3938                                 rb_erase(&node->rb_node, &proc->nodes);
3939                                 binder_inner_proc_unlock(proc);
3940                                 binder_node_lock(node);
3941                                 /*
3942                                  * Acquire the node lock before freeing the
3943                                  * node to serialize with other threads that
3944                                  * may have been holding the node lock while
3945                                  * decrementing this node (avoids race where
3946                                  * this thread frees while the other thread
3947                                  * is unlocking the node after the final
3948                                  * decrement)
3949                                  */
3950                                 binder_node_unlock(node);
3951                                 binder_free_node(node);
3952                         } else
3953                                 binder_inner_proc_unlock(proc);
3954
3955                         if (weak && !has_weak_ref)
3956                                 ret = binder_put_node_cmd(
3957                                                 proc, thread, &ptr, node_ptr,
3958                                                 node_cookie, node_debug_id,
3959                                                 BR_INCREFS, "BR_INCREFS");
3960                         if (!ret && strong && !has_strong_ref)
3961                                 ret = binder_put_node_cmd(
3962                                                 proc, thread, &ptr, node_ptr,
3963                                                 node_cookie, node_debug_id,
3964                                                 BR_ACQUIRE, "BR_ACQUIRE");
3965                         if (!ret && !strong && has_strong_ref)
3966                                 ret = binder_put_node_cmd(
3967                                                 proc, thread, &ptr, node_ptr,
3968                                                 node_cookie, node_debug_id,
3969                                                 BR_RELEASE, "BR_RELEASE");
3970                         if (!ret && !weak && has_weak_ref)
3971                                 ret = binder_put_node_cmd(
3972                                                 proc, thread, &ptr, node_ptr,
3973                                                 node_cookie, node_debug_id,
3974                                                 BR_DECREFS, "BR_DECREFS");
3975                         if (orig_ptr == ptr)
3976                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3977                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
3978                                              proc->pid, thread->pid,
3979                                              node_debug_id,
3980                                              (u64)node_ptr,
3981                                              (u64)node_cookie);
3982                         if (ret)
3983                                 return ret;
3984                 } break;
3985                 case BINDER_WORK_DEAD_BINDER:
3986                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3987                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3988                         struct binder_ref_death *death;
3989                         uint32_t cmd;
3990                         binder_uintptr_t cookie;
3991
3992                         death = container_of(w, struct binder_ref_death, work);
3993                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3994                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3995                         else
3996                                 cmd = BR_DEAD_BINDER;
3997                         cookie = death->cookie;
3998
3999                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4000                                      "%d:%d %s %016llx\n",
4001                                       proc->pid, thread->pid,
4002                                       cmd == BR_DEAD_BINDER ?
4003                                       "BR_DEAD_BINDER" :
4004                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4005                                       (u64)cookie);
4006                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4007                                 binder_inner_proc_unlock(proc);
4008                                 kfree(death);
4009                                 binder_stats_deleted(BINDER_STAT_DEATH);
4010                         } else {
4011                                 binder_enqueue_work_ilocked(
4012                                                 w, &proc->delivered_death);
4013                                 binder_inner_proc_unlock(proc);
4014                         }
4015                         if (put_user(cmd, (uint32_t __user *)ptr))
4016                                 return -EFAULT;
4017                         ptr += sizeof(uint32_t);
4018                         if (put_user(cookie,
4019                                      (binder_uintptr_t __user *)ptr))
4020                                 return -EFAULT;
4021                         ptr += sizeof(binder_uintptr_t);
4022                         binder_stat_br(proc, thread, cmd);
4023                         if (cmd == BR_DEAD_BINDER)
4024                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4025                 } break;
4026                 }
4027
4028                 if (!t)
4029                         continue;
4030
4031                 BUG_ON(t->buffer == NULL);
4032                 if (t->buffer->target_node) {
4033                         struct binder_node *target_node = t->buffer->target_node;
4034
4035                         tr.target.ptr = target_node->ptr;
4036                         tr.cookie =  target_node->cookie;
4037                         t->saved_priority = task_nice(current);
4038                         if (t->priority < target_node->min_priority &&
4039                             !(t->flags & TF_ONE_WAY))
4040                                 binder_set_nice(t->priority);
4041                         else if (!(t->flags & TF_ONE_WAY) ||
4042                                  t->saved_priority > target_node->min_priority)
4043                                 binder_set_nice(target_node->min_priority);
4044                         cmd = BR_TRANSACTION;
4045                 } else {
4046                         tr.target.ptr = 0;
4047                         tr.cookie = 0;
4048                         cmd = BR_REPLY;
4049                 }
4050                 tr.code = t->code;
4051                 tr.flags = t->flags;
4052                 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4053
4054                 t_from = binder_get_txn_from(t);
4055                 if (t_from) {
4056                         struct task_struct *sender = t_from->proc->tsk;
4057
4058                         tr.sender_pid = task_tgid_nr_ns(sender,
4059                                                         task_active_pid_ns(current));
4060                 } else {
4061                         tr.sender_pid = 0;
4062                 }
4063
4064                 tr.data_size = t->buffer->data_size;
4065                 tr.offsets_size = t->buffer->offsets_size;
4066                 tr.data.ptr.buffer = (binder_uintptr_t)
4067                         ((uintptr_t)t->buffer->data +
4068                         binder_alloc_get_user_buffer_offset(&proc->alloc));
4069                 tr.data.ptr.offsets = tr.data.ptr.buffer +
4070                                         ALIGN(t->buffer->data_size,
4071                                             sizeof(void *));
4072
4073                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4074                         if (t_from)
4075                                 binder_thread_dec_tmpref(t_from);
4076
4077                         binder_cleanup_transaction(t, "put_user failed",
4078                                                    BR_FAILED_REPLY);
4079
4080                         return -EFAULT;
4081                 }
4082                 ptr += sizeof(uint32_t);
4083                 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4084                         if (t_from)
4085                                 binder_thread_dec_tmpref(t_from);
4086
4087                         binder_cleanup_transaction(t, "copy_to_user failed",
4088                                                    BR_FAILED_REPLY);
4089
4090                         return -EFAULT;
4091                 }
4092                 ptr += sizeof(tr);
4093
4094                 trace_binder_transaction_received(t);
4095                 binder_stat_br(proc, thread, cmd);
4096                 binder_debug(BINDER_DEBUG_TRANSACTION,
4097                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4098                              proc->pid, thread->pid,
4099                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4100                              "BR_REPLY",
4101                              t->debug_id, t_from ? t_from->proc->pid : 0,
4102                              t_from ? t_from->pid : 0, cmd,
4103                              t->buffer->data_size, t->buffer->offsets_size,
4104                              (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4105
4106                 if (t_from)
4107                         binder_thread_dec_tmpref(t_from);
4108                 t->buffer->allow_user_free = 1;
4109                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4110                         binder_inner_proc_lock(thread->proc);
4111                         t->to_parent = thread->transaction_stack;
4112                         t->to_thread = thread;
4113                         thread->transaction_stack = t;
4114                         binder_inner_proc_unlock(thread->proc);
4115                 } else {
4116                         binder_free_transaction(t);
4117                 }
4118                 break;
4119         }
4120
4121 done:
4122
4123         *consumed = ptr - buffer;
4124         binder_inner_proc_lock(proc);
4125         if (proc->requested_threads == 0 &&
4126             list_empty(&thread->proc->waiting_threads) &&
4127             proc->requested_threads_started < proc->max_threads &&
4128             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4129              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4130              /*spawn a new thread if we leave this out */) {
4131                 proc->requested_threads++;
4132                 binder_inner_proc_unlock(proc);
4133                 binder_debug(BINDER_DEBUG_THREADS,
4134                              "%d:%d BR_SPAWN_LOOPER\n",
4135                              proc->pid, thread->pid);
4136                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4137                         return -EFAULT;
4138                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4139         } else
4140                 binder_inner_proc_unlock(proc);
4141         return 0;
4142 }
4143
4144 static void binder_release_work(struct binder_proc *proc,
4145                                 struct list_head *list)
4146 {
4147         struct binder_work *w;
4148         enum binder_work_type wtype;
4149
4150         while (1) {
4151                 binder_inner_proc_lock(proc);
4152                 w = binder_dequeue_work_head_ilocked(list);
4153                 wtype = w ? w->type : 0;
4154                 binder_inner_proc_unlock(proc);
4155                 if (!w)
4156                         return;
4157
4158                 switch (wtype) {
4159                 case BINDER_WORK_TRANSACTION: {
4160                         struct binder_transaction *t;
4161
4162                         t = container_of(w, struct binder_transaction, work);
4163
4164                         binder_cleanup_transaction(t, "process died.",
4165                                                    BR_DEAD_REPLY);
4166                 } break;
4167                 case BINDER_WORK_RETURN_ERROR: {
4168                         struct binder_error *e = container_of(
4169                                         w, struct binder_error, work);
4170
4171                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4172                                 "undelivered TRANSACTION_ERROR: %u\n",
4173                                 e->cmd);
4174                 } break;
4175                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4176                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4177                                 "undelivered TRANSACTION_COMPLETE\n");
4178                         kfree(w);
4179                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4180                 } break;
4181                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4182                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4183                         struct binder_ref_death *death;
4184
4185                         death = container_of(w, struct binder_ref_death, work);
4186                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4187                                 "undelivered death notification, %016llx\n",
4188                                 (u64)death->cookie);
4189                         kfree(death);
4190                         binder_stats_deleted(BINDER_STAT_DEATH);
4191                 } break;
4192                 case BINDER_WORK_NODE:
4193                         break;
4194                 default:
4195                         pr_err("unexpected work type, %d, not freed\n",
4196                                wtype);
4197                         break;
4198                 }
4199         }
4200
4201 }
4202
4203 static struct binder_thread *binder_get_thread_ilocked(
4204                 struct binder_proc *proc, struct binder_thread *new_thread)
4205 {
4206         struct binder_thread *thread = NULL;
4207         struct rb_node *parent = NULL;
4208         struct rb_node **p = &proc->threads.rb_node;
4209
4210         while (*p) {
4211                 parent = *p;
4212                 thread = rb_entry(parent, struct binder_thread, rb_node);
4213
4214                 if (current->pid < thread->pid)
4215                         p = &(*p)->rb_left;
4216                 else if (current->pid > thread->pid)
4217                         p = &(*p)->rb_right;
4218                 else
4219                         return thread;
4220         }
4221         if (!new_thread)
4222                 return NULL;
4223         thread = new_thread;
4224         binder_stats_created(BINDER_STAT_THREAD);
4225         thread->proc = proc;
4226         thread->pid = current->pid;
4227         atomic_set(&thread->tmp_ref, 0);
4228         init_waitqueue_head(&thread->wait);
4229         INIT_LIST_HEAD(&thread->todo);
4230         rb_link_node(&thread->rb_node, parent, p);
4231         rb_insert_color(&thread->rb_node, &proc->threads);
4232         thread->looper_need_return = true;
4233         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4234         thread->return_error.cmd = BR_OK;
4235         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4236         thread->reply_error.cmd = BR_OK;
4237         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4238         return thread;
4239 }
4240
4241 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4242 {
4243         struct binder_thread *thread;
4244         struct binder_thread *new_thread;
4245
4246         binder_inner_proc_lock(proc);
4247         thread = binder_get_thread_ilocked(proc, NULL);
4248         binder_inner_proc_unlock(proc);
4249         if (!thread) {
4250                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4251                 if (new_thread == NULL)
4252                         return NULL;
4253                 binder_inner_proc_lock(proc);
4254                 thread = binder_get_thread_ilocked(proc, new_thread);
4255                 binder_inner_proc_unlock(proc);
4256                 if (thread != new_thread)
4257                         kfree(new_thread);
4258         }
4259         return thread;
4260 }
4261
4262 static void binder_free_proc(struct binder_proc *proc)
4263 {
4264         BUG_ON(!list_empty(&proc->todo));
4265         BUG_ON(!list_empty(&proc->delivered_death));
4266         binder_alloc_deferred_release(&proc->alloc);
4267         put_task_struct(proc->tsk);
4268         put_cred(proc->cred);
4269         binder_stats_deleted(BINDER_STAT_PROC);
4270         kfree(proc);
4271 }
4272
4273 static void binder_free_thread(struct binder_thread *thread)
4274 {
4275         BUG_ON(!list_empty(&thread->todo));
4276         binder_stats_deleted(BINDER_STAT_THREAD);
4277         binder_proc_dec_tmpref(thread->proc);
4278         kfree(thread);
4279 }
4280
4281 static int binder_thread_release(struct binder_proc *proc,
4282                                  struct binder_thread *thread)
4283 {
4284         struct binder_transaction *t;
4285         struct binder_transaction *send_reply = NULL;
4286         int active_transactions = 0;
4287         struct binder_transaction *last_t = NULL;
4288
4289         binder_inner_proc_lock(thread->proc);
4290         /*
4291          * take a ref on the proc so it survives
4292          * after we remove this thread from proc->threads.
4293          * The corresponding dec is when we actually
4294          * free the thread in binder_free_thread()
4295          */
4296         proc->tmp_ref++;
4297         /*
4298          * take a ref on this thread to ensure it
4299          * survives while we are releasing it
4300          */
4301         atomic_inc(&thread->tmp_ref);
4302         rb_erase(&thread->rb_node, &proc->threads);
4303         t = thread->transaction_stack;
4304         if (t) {
4305                 spin_lock(&t->lock);
4306                 if (t->to_thread == thread)
4307                         send_reply = t;
4308         }
4309         thread->is_dead = true;
4310
4311         while (t) {
4312                 last_t = t;
4313                 active_transactions++;
4314                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4315                              "release %d:%d transaction %d %s, still active\n",
4316                               proc->pid, thread->pid,
4317                              t->debug_id,
4318                              (t->to_thread == thread) ? "in" : "out");
4319
4320                 if (t->to_thread == thread) {
4321                         t->to_proc = NULL;
4322                         t->to_thread = NULL;
4323                         if (t->buffer) {
4324                                 t->buffer->transaction = NULL;
4325                                 t->buffer = NULL;
4326                         }
4327                         t = t->to_parent;
4328                 } else if (t->from == thread) {
4329                         t->from = NULL;
4330                         t = t->from_parent;
4331                 } else
4332                         BUG();
4333                 spin_unlock(&last_t->lock);
4334                 if (t)
4335                         spin_lock(&t->lock);
4336         }
4337
4338         /*
4339          * If this thread used poll, make sure we remove the waitqueue from any
4340          * poll data structures holding it.
4341          */
4342         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4343                 wake_up_pollfree(&thread->wait);
4344
4345         binder_inner_proc_unlock(thread->proc);
4346
4347         /*
4348          * This is needed to avoid races between wake_up_pollfree() above and
4349          * someone else removing the last entry from the queue for other reasons
4350          * (e.g. ep_remove_wait_queue() being called due to an epoll file
4351          * descriptor being closed).  Such other users hold an RCU read lock, so
4352          * we can be sure they're done after we call synchronize_rcu().
4353          */
4354         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4355                 synchronize_rcu();
4356
4357         if (send_reply)
4358                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4359         binder_release_work(proc, &thread->todo);
4360         binder_thread_dec_tmpref(thread);
4361         return active_transactions;
4362 }
4363
4364 static unsigned int binder_poll(struct file *filp,
4365                                 struct poll_table_struct *wait)
4366 {
4367         struct binder_proc *proc = filp->private_data;
4368         struct binder_thread *thread = NULL;
4369         bool wait_for_proc_work;
4370
4371         thread = binder_get_thread(proc);
4372         if (!thread)
4373                 return POLLERR;
4374
4375         binder_inner_proc_lock(thread->proc);
4376         thread->looper |= BINDER_LOOPER_STATE_POLL;
4377         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4378
4379         binder_inner_proc_unlock(thread->proc);
4380
4381         poll_wait(filp, &thread->wait, wait);
4382
4383         if (binder_has_work(thread, wait_for_proc_work))
4384                 return POLLIN;
4385
4386         return 0;
4387 }
4388
4389 static int binder_ioctl_write_read(struct file *filp,
4390                                 unsigned int cmd, unsigned long arg,
4391                                 struct binder_thread *thread)
4392 {
4393         int ret = 0;
4394         struct binder_proc *proc = filp->private_data;
4395         unsigned int size = _IOC_SIZE(cmd);
4396         void __user *ubuf = (void __user *)arg;
4397         struct binder_write_read bwr;
4398
4399         if (size != sizeof(struct binder_write_read)) {
4400                 ret = -EINVAL;
4401                 goto out;
4402         }
4403         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4404                 ret = -EFAULT;
4405                 goto out;
4406         }
4407         binder_debug(BINDER_DEBUG_READ_WRITE,
4408                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4409                      proc->pid, thread->pid,
4410                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4411                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4412
4413         if (bwr.write_size > 0) {
4414                 ret = binder_thread_write(proc, thread,
4415                                           bwr.write_buffer,
4416                                           bwr.write_size,
4417                                           &bwr.write_consumed);
4418                 trace_binder_write_done(ret);
4419                 if (ret < 0) {
4420                         bwr.read_consumed = 0;
4421                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4422                                 ret = -EFAULT;
4423                         goto out;
4424                 }
4425         }
4426         if (bwr.read_size > 0) {
4427                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4428                                          bwr.read_size,
4429                                          &bwr.read_consumed,
4430                                          filp->f_flags & O_NONBLOCK);
4431                 trace_binder_read_done(ret);
4432                 binder_inner_proc_lock(proc);
4433                 if (!binder_worklist_empty_ilocked(&proc->todo))
4434                         binder_wakeup_proc_ilocked(proc);
4435                 binder_inner_proc_unlock(proc);
4436                 if (ret < 0) {
4437                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4438                                 ret = -EFAULT;
4439                         goto out;
4440                 }
4441         }
4442         binder_debug(BINDER_DEBUG_READ_WRITE,
4443                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4444                      proc->pid, thread->pid,
4445                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4446                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4447         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4448                 ret = -EFAULT;
4449                 goto out;
4450         }
4451 out:
4452         return ret;
4453 }
4454
4455 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4456 {
4457         int ret = 0;
4458         struct binder_proc *proc = filp->private_data;
4459         struct binder_context *context = proc->context;
4460         struct binder_node *new_node;
4461         kuid_t curr_euid = current_euid();
4462
4463         mutex_lock(&context->context_mgr_node_lock);
4464         if (context->binder_context_mgr_node) {
4465                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4466                 ret = -EBUSY;
4467                 goto out;
4468         }
4469         ret = security_binder_set_context_mgr(proc->cred);
4470         if (ret < 0)
4471                 goto out;
4472         if (uid_valid(context->binder_context_mgr_uid)) {
4473                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4474                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4475                                from_kuid(&init_user_ns, curr_euid),
4476                                from_kuid(&init_user_ns,
4477                                          context->binder_context_mgr_uid));
4478                         ret = -EPERM;
4479                         goto out;
4480                 }
4481         } else {
4482                 context->binder_context_mgr_uid = curr_euid;
4483         }
4484         new_node = binder_new_node(proc, NULL);
4485         if (!new_node) {
4486                 ret = -ENOMEM;
4487                 goto out;
4488         }
4489         binder_node_lock(new_node);
4490         new_node->local_weak_refs++;
4491         new_node->local_strong_refs++;
4492         new_node->has_strong_ref = 1;
4493         new_node->has_weak_ref = 1;
4494         context->binder_context_mgr_node = new_node;
4495         binder_node_unlock(new_node);
4496         binder_put_node(new_node);
4497 out:
4498         mutex_unlock(&context->context_mgr_node_lock);
4499         return ret;
4500 }
4501
4502 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4503                                 struct binder_node_debug_info *info)
4504 {
4505         struct rb_node *n;
4506         binder_uintptr_t ptr = info->ptr;
4507
4508         memset(info, 0, sizeof(*info));
4509
4510         binder_inner_proc_lock(proc);
4511         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4512                 struct binder_node *node = rb_entry(n, struct binder_node,
4513                                                     rb_node);
4514                 if (node->ptr > ptr) {
4515                         info->ptr = node->ptr;
4516                         info->cookie = node->cookie;
4517                         info->has_strong_ref = node->has_strong_ref;
4518                         info->has_weak_ref = node->has_weak_ref;
4519                         break;
4520                 }
4521         }
4522         binder_inner_proc_unlock(proc);
4523
4524         return 0;
4525 }
4526
4527 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4528 {
4529         int ret;
4530         struct binder_proc *proc = filp->private_data;
4531         struct binder_thread *thread;
4532         unsigned int size = _IOC_SIZE(cmd);
4533         void __user *ubuf = (void __user *)arg;
4534
4535         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4536                         proc->pid, current->pid, cmd, arg);*/
4537
4538         binder_selftest_alloc(&proc->alloc);
4539
4540         trace_binder_ioctl(cmd, arg);
4541
4542         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4543         if (ret)
4544                 goto err_unlocked;
4545
4546         thread = binder_get_thread(proc);
4547         if (thread == NULL) {
4548                 ret = -ENOMEM;
4549                 goto err;
4550         }
4551
4552         switch (cmd) {
4553         case BINDER_WRITE_READ:
4554                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4555                 if (ret)
4556                         goto err;
4557                 break;
4558         case BINDER_SET_MAX_THREADS: {
4559                 int max_threads;
4560
4561                 if (copy_from_user(&max_threads, ubuf,
4562                                    sizeof(max_threads))) {
4563                         ret = -EINVAL;
4564                         goto err;
4565                 }
4566                 binder_inner_proc_lock(proc);
4567                 proc->max_threads = max_threads;
4568                 binder_inner_proc_unlock(proc);
4569                 break;
4570         }
4571         case BINDER_SET_CONTEXT_MGR:
4572                 ret = binder_ioctl_set_ctx_mgr(filp);
4573                 if (ret)
4574                         goto err;
4575                 break;
4576         case BINDER_THREAD_EXIT:
4577                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4578                              proc->pid, thread->pid);
4579                 binder_thread_release(proc, thread);
4580                 thread = NULL;
4581                 break;
4582         case BINDER_VERSION: {
4583                 struct binder_version __user *ver = ubuf;
4584
4585                 if (size != sizeof(struct binder_version)) {
4586                         ret = -EINVAL;
4587                         goto err;
4588                 }
4589                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4590                              &ver->protocol_version)) {
4591                         ret = -EINVAL;
4592                         goto err;
4593                 }
4594                 break;
4595         }
4596         case BINDER_GET_NODE_DEBUG_INFO: {
4597                 struct binder_node_debug_info info;
4598
4599                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4600                         ret = -EFAULT;
4601                         goto err;
4602                 }
4603
4604                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4605                 if (ret < 0)
4606                         goto err;
4607
4608                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4609                         ret = -EFAULT;
4610                         goto err;
4611                 }
4612                 break;
4613         }
4614         default:
4615                 ret = -EINVAL;
4616                 goto err;
4617         }
4618         ret = 0;
4619 err:
4620         if (thread)
4621                 thread->looper_need_return = false;
4622         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4623         if (ret && ret != -ERESTARTSYS)
4624                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4625 err_unlocked:
4626         trace_binder_ioctl_done(ret);
4627         return ret;
4628 }
4629
4630 static void binder_vma_open(struct vm_area_struct *vma)
4631 {
4632         struct binder_proc *proc = vma->vm_private_data;
4633
4634         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4635                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4636                      proc->pid, vma->vm_start, vma->vm_end,
4637                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4638                      (unsigned long)pgprot_val(vma->vm_page_prot));
4639 }
4640
4641 static void binder_vma_close(struct vm_area_struct *vma)
4642 {
4643         struct binder_proc *proc = vma->vm_private_data;
4644
4645         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4646                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4647                      proc->pid, vma->vm_start, vma->vm_end,
4648                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4649                      (unsigned long)pgprot_val(vma->vm_page_prot));
4650         binder_alloc_vma_close(&proc->alloc);
4651         binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4652 }
4653
4654 static int binder_vm_fault(struct vm_fault *vmf)
4655 {
4656         return VM_FAULT_SIGBUS;
4657 }
4658
4659 static const struct vm_operations_struct binder_vm_ops = {
4660         .open = binder_vma_open,
4661         .close = binder_vma_close,
4662         .fault = binder_vm_fault,
4663 };
4664
4665 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4666 {
4667         int ret;
4668         struct binder_proc *proc = filp->private_data;
4669         const char *failure_string;
4670
4671         if (proc->tsk != current->group_leader)
4672                 return -EINVAL;
4673
4674         if ((vma->vm_end - vma->vm_start) > SZ_4M)
4675                 vma->vm_end = vma->vm_start + SZ_4M;
4676
4677         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4678                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4679                      __func__, proc->pid, vma->vm_start, vma->vm_end,
4680                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4681                      (unsigned long)pgprot_val(vma->vm_page_prot));
4682
4683         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4684                 ret = -EPERM;
4685                 failure_string = "bad vm_flags";
4686                 goto err_bad_arg;
4687         }
4688         vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4689         vma->vm_ops = &binder_vm_ops;
4690         vma->vm_private_data = proc;
4691
4692         ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4693         if (ret)
4694                 return ret;
4695         mutex_lock(&proc->files_lock);
4696         proc->files = get_files_struct(current);
4697         mutex_unlock(&proc->files_lock);
4698         return 0;
4699
4700 err_bad_arg:
4701         pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4702                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4703         return ret;
4704 }
4705
4706 static int binder_open(struct inode *nodp, struct file *filp)
4707 {
4708         struct binder_proc *proc;
4709         struct binder_device *binder_dev;
4710
4711         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4712                      current->group_leader->pid, current->pid);
4713
4714         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4715         if (proc == NULL)
4716                 return -ENOMEM;
4717         spin_lock_init(&proc->inner_lock);
4718         spin_lock_init(&proc->outer_lock);
4719         get_task_struct(current->group_leader);
4720         proc->tsk = current->group_leader;
4721         mutex_init(&proc->files_lock);
4722         proc->cred = get_cred(filp->f_cred);
4723         INIT_LIST_HEAD(&proc->todo);
4724         proc->default_priority = task_nice(current);
4725         binder_dev = container_of(filp->private_data, struct binder_device,
4726                                   miscdev);
4727         proc->context = &binder_dev->context;
4728         binder_alloc_init(&proc->alloc);
4729
4730         binder_stats_created(BINDER_STAT_PROC);
4731         proc->pid = current->group_leader->pid;
4732         INIT_LIST_HEAD(&proc->delivered_death);
4733         INIT_LIST_HEAD(&proc->waiting_threads);
4734         filp->private_data = proc;
4735
4736         mutex_lock(&binder_procs_lock);
4737         hlist_add_head(&proc->proc_node, &binder_procs);
4738         mutex_unlock(&binder_procs_lock);
4739
4740         if (binder_debugfs_dir_entry_proc) {
4741                 char strbuf[11];
4742
4743                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4744                 /*
4745                  * proc debug entries are shared between contexts, so
4746                  * this will fail if the process tries to open the driver
4747                  * again with a different context. The priting code will
4748                  * anyway print all contexts that a given PID has, so this
4749                  * is not a problem.
4750                  */
4751                 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4752                         binder_debugfs_dir_entry_proc,
4753                         (void *)(unsigned long)proc->pid,
4754                         &binder_proc_fops);
4755         }
4756
4757         return 0;
4758 }
4759
4760 static int binder_flush(struct file *filp, fl_owner_t id)
4761 {
4762         struct binder_proc *proc = filp->private_data;
4763
4764         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4765
4766         return 0;
4767 }
4768
4769 static void binder_deferred_flush(struct binder_proc *proc)
4770 {
4771         struct rb_node *n;
4772         int wake_count = 0;
4773
4774         binder_inner_proc_lock(proc);
4775         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4776                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4777
4778                 thread->looper_need_return = true;
4779                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4780                         wake_up_interruptible(&thread->wait);
4781                         wake_count++;
4782                 }
4783         }
4784         binder_inner_proc_unlock(proc);
4785
4786         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4787                      "binder_flush: %d woke %d threads\n", proc->pid,
4788                      wake_count);
4789 }
4790
4791 static int binder_release(struct inode *nodp, struct file *filp)
4792 {
4793         struct binder_proc *proc = filp->private_data;
4794
4795         debugfs_remove(proc->debugfs_entry);
4796         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4797
4798         return 0;
4799 }
4800
4801 static int binder_node_release(struct binder_node *node, int refs)
4802 {
4803         struct binder_ref *ref;
4804         int death = 0;
4805         struct binder_proc *proc = node->proc;
4806
4807         binder_release_work(proc, &node->async_todo);
4808
4809         binder_node_lock(node);
4810         binder_inner_proc_lock(proc);
4811         binder_dequeue_work_ilocked(&node->work);
4812         /*
4813          * The caller must have taken a temporary ref on the node,
4814          */
4815         BUG_ON(!node->tmp_refs);
4816         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4817                 binder_inner_proc_unlock(proc);
4818                 binder_node_unlock(node);
4819                 binder_free_node(node);
4820
4821                 return refs;
4822         }
4823
4824         node->proc = NULL;
4825         node->local_strong_refs = 0;
4826         node->local_weak_refs = 0;
4827         binder_inner_proc_unlock(proc);
4828
4829         spin_lock(&binder_dead_nodes_lock);
4830         hlist_add_head(&node->dead_node, &binder_dead_nodes);
4831         spin_unlock(&binder_dead_nodes_lock);
4832
4833         hlist_for_each_entry(ref, &node->refs, node_entry) {
4834                 refs++;
4835                 /*
4836                  * Need the node lock to synchronize
4837                  * with new notification requests and the
4838                  * inner lock to synchronize with queued
4839                  * death notifications.
4840                  */
4841                 binder_inner_proc_lock(ref->proc);
4842                 if (!ref->death) {
4843                         binder_inner_proc_unlock(ref->proc);
4844                         continue;
4845                 }
4846
4847                 death++;
4848
4849                 BUG_ON(!list_empty(&ref->death->work.entry));
4850                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4851                 binder_enqueue_work_ilocked(&ref->death->work,
4852                                             &ref->proc->todo);
4853                 binder_wakeup_proc_ilocked(ref->proc);
4854                 binder_inner_proc_unlock(ref->proc);
4855         }
4856
4857         binder_debug(BINDER_DEBUG_DEAD_BINDER,
4858                      "node %d now dead, refs %d, death %d\n",
4859                      node->debug_id, refs, death);
4860         binder_node_unlock(node);
4861         binder_put_node(node);
4862
4863         return refs;
4864 }
4865
4866 static void binder_deferred_release(struct binder_proc *proc)
4867 {
4868         struct binder_context *context = proc->context;
4869         struct rb_node *n;
4870         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4871
4872         BUG_ON(proc->files);
4873
4874         mutex_lock(&binder_procs_lock);
4875         hlist_del(&proc->proc_node);
4876         mutex_unlock(&binder_procs_lock);
4877
4878         mutex_lock(&context->context_mgr_node_lock);
4879         if (context->binder_context_mgr_node &&
4880             context->binder_context_mgr_node->proc == proc) {
4881                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4882                              "%s: %d context_mgr_node gone\n",
4883                              __func__, proc->pid);
4884                 context->binder_context_mgr_node = NULL;
4885         }
4886         mutex_unlock(&context->context_mgr_node_lock);
4887         binder_inner_proc_lock(proc);
4888         /*
4889          * Make sure proc stays alive after we
4890          * remove all the threads
4891          */
4892         proc->tmp_ref++;
4893
4894         proc->is_dead = true;
4895         threads = 0;
4896         active_transactions = 0;
4897         while ((n = rb_first(&proc->threads))) {
4898                 struct binder_thread *thread;
4899
4900                 thread = rb_entry(n, struct binder_thread, rb_node);
4901                 binder_inner_proc_unlock(proc);
4902                 threads++;
4903                 active_transactions += binder_thread_release(proc, thread);
4904                 binder_inner_proc_lock(proc);
4905         }
4906
4907         nodes = 0;
4908         incoming_refs = 0;
4909         while ((n = rb_first(&proc->nodes))) {
4910                 struct binder_node *node;
4911
4912                 node = rb_entry(n, struct binder_node, rb_node);
4913                 nodes++;
4914                 /*
4915                  * take a temporary ref on the node before
4916                  * calling binder_node_release() which will either
4917                  * kfree() the node or call binder_put_node()
4918                  */
4919                 binder_inc_node_tmpref_ilocked(node);
4920                 rb_erase(&node->rb_node, &proc->nodes);
4921                 binder_inner_proc_unlock(proc);
4922                 incoming_refs = binder_node_release(node, incoming_refs);
4923                 binder_inner_proc_lock(proc);
4924         }
4925         binder_inner_proc_unlock(proc);
4926
4927         outgoing_refs = 0;
4928         binder_proc_lock(proc);
4929         while ((n = rb_first(&proc->refs_by_desc))) {
4930                 struct binder_ref *ref;
4931
4932                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4933                 outgoing_refs++;
4934                 binder_cleanup_ref_olocked(ref);
4935                 binder_proc_unlock(proc);
4936                 binder_free_ref(ref);
4937                 binder_proc_lock(proc);
4938         }
4939         binder_proc_unlock(proc);
4940
4941         binder_release_work(proc, &proc->todo);
4942         binder_release_work(proc, &proc->delivered_death);
4943
4944         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4945                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4946                      __func__, proc->pid, threads, nodes, incoming_refs,
4947                      outgoing_refs, active_transactions);
4948
4949         binder_proc_dec_tmpref(proc);
4950 }
4951
4952 static void binder_deferred_func(struct work_struct *work)
4953 {
4954         struct binder_proc *proc;
4955         struct files_struct *files;
4956
4957         int defer;
4958
4959         do {
4960                 mutex_lock(&binder_deferred_lock);
4961                 if (!hlist_empty(&binder_deferred_list)) {
4962                         proc = hlist_entry(binder_deferred_list.first,
4963                                         struct binder_proc, deferred_work_node);
4964                         hlist_del_init(&proc->deferred_work_node);
4965                         defer = proc->deferred_work;
4966                         proc->deferred_work = 0;
4967                 } else {
4968                         proc = NULL;
4969                         defer = 0;
4970                 }
4971                 mutex_unlock(&binder_deferred_lock);
4972
4973                 files = NULL;
4974                 if (defer & BINDER_DEFERRED_PUT_FILES) {
4975                         mutex_lock(&proc->files_lock);
4976                         files = proc->files;
4977                         if (files)
4978                                 proc->files = NULL;
4979                         mutex_unlock(&proc->files_lock);
4980                 }
4981
4982                 if (defer & BINDER_DEFERRED_FLUSH)
4983                         binder_deferred_flush(proc);
4984
4985                 if (defer & BINDER_DEFERRED_RELEASE)
4986                         binder_deferred_release(proc); /* frees proc */
4987
4988                 if (files)
4989                         put_files_struct(files);
4990         } while (proc);
4991 }
4992 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4993
4994 static void
4995 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4996 {
4997         mutex_lock(&binder_deferred_lock);
4998         proc->deferred_work |= defer;
4999         if (hlist_unhashed(&proc->deferred_work_node)) {
5000                 hlist_add_head(&proc->deferred_work_node,
5001                                 &binder_deferred_list);
5002                 schedule_work(&binder_deferred_work);
5003         }
5004         mutex_unlock(&binder_deferred_lock);
5005 }
5006
5007 static void print_binder_transaction_ilocked(struct seq_file *m,
5008                                              struct binder_proc *proc,
5009                                              const char *prefix,
5010                                              struct binder_transaction *t)
5011 {
5012         struct binder_proc *to_proc;
5013         struct binder_buffer *buffer = t->buffer;
5014
5015         spin_lock(&t->lock);
5016         to_proc = t->to_proc;
5017         seq_printf(m,
5018                    "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5019                    prefix, t->debug_id, t,
5020                    t->from ? t->from->proc->pid : 0,
5021                    t->from ? t->from->pid : 0,
5022                    to_proc ? to_proc->pid : 0,
5023                    t->to_thread ? t->to_thread->pid : 0,
5024                    t->code, t->flags, t->priority, t->need_reply);
5025         spin_unlock(&t->lock);
5026
5027         if (proc != to_proc) {
5028                 /*
5029                  * Can only safely deref buffer if we are holding the
5030                  * correct proc inner lock for this node
5031                  */
5032                 seq_puts(m, "\n");
5033                 return;
5034         }
5035
5036         if (buffer == NULL) {
5037                 seq_puts(m, " buffer free\n");
5038                 return;
5039         }
5040         if (buffer->target_node)
5041                 seq_printf(m, " node %d", buffer->target_node->debug_id);
5042         seq_printf(m, " size %zd:%zd data %pK\n",
5043                    buffer->data_size, buffer->offsets_size,
5044                    buffer->data);
5045 }
5046
5047 static void print_binder_work_ilocked(struct seq_file *m,
5048                                      struct binder_proc *proc,
5049                                      const char *prefix,
5050                                      const char *transaction_prefix,
5051                                      struct binder_work *w)
5052 {
5053         struct binder_node *node;
5054         struct binder_transaction *t;
5055
5056         switch (w->type) {
5057         case BINDER_WORK_TRANSACTION:
5058                 t = container_of(w, struct binder_transaction, work);
5059                 print_binder_transaction_ilocked(
5060                                 m, proc, transaction_prefix, t);
5061                 break;
5062         case BINDER_WORK_RETURN_ERROR: {
5063                 struct binder_error *e = container_of(
5064                                 w, struct binder_error, work);
5065
5066                 seq_printf(m, "%stransaction error: %u\n",
5067                            prefix, e->cmd);
5068         } break;
5069         case BINDER_WORK_TRANSACTION_COMPLETE:
5070                 seq_printf(m, "%stransaction complete\n", prefix);
5071                 break;
5072         case BINDER_WORK_NODE:
5073                 node = container_of(w, struct binder_node, work);
5074                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5075                            prefix, node->debug_id,
5076                            (u64)node->ptr, (u64)node->cookie);
5077                 break;
5078         case BINDER_WORK_DEAD_BINDER:
5079                 seq_printf(m, "%shas dead binder\n", prefix);
5080                 break;
5081         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5082                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5083                 break;
5084         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5085                 seq_printf(m, "%shas cleared death notification\n", prefix);
5086                 break;
5087         default:
5088                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5089                 break;
5090         }
5091 }
5092
5093 static void print_binder_thread_ilocked(struct seq_file *m,
5094                                         struct binder_thread *thread,
5095                                         int print_always)
5096 {
5097         struct binder_transaction *t;
5098         struct binder_work *w;
5099         size_t start_pos = m->count;
5100         size_t header_pos;
5101
5102         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5103                         thread->pid, thread->looper,
5104                         thread->looper_need_return,
5105                         atomic_read(&thread->tmp_ref));
5106         header_pos = m->count;
5107         t = thread->transaction_stack;
5108         while (t) {
5109                 if (t->from == thread) {
5110                         print_binder_transaction_ilocked(m, thread->proc,
5111                                         "    outgoing transaction", t);
5112                         t = t->from_parent;
5113                 } else if (t->to_thread == thread) {
5114                         print_binder_transaction_ilocked(m, thread->proc,
5115                                                  "    incoming transaction", t);
5116                         t = t->to_parent;
5117                 } else {
5118                         print_binder_transaction_ilocked(m, thread->proc,
5119                                         "    bad transaction", t);
5120                         t = NULL;
5121                 }
5122         }
5123         list_for_each_entry(w, &thread->todo, entry) {
5124                 print_binder_work_ilocked(m, thread->proc, "    ",
5125                                           "    pending transaction", w);
5126         }
5127         if (!print_always && m->count == header_pos)
5128                 m->count = start_pos;
5129 }
5130
5131 static void print_binder_node_nilocked(struct seq_file *m,
5132                                        struct binder_node *node)
5133 {
5134         struct binder_ref *ref;
5135         struct binder_work *w;
5136         int count;
5137
5138         count = 0;
5139         hlist_for_each_entry(ref, &node->refs, node_entry)
5140                 count++;
5141
5142         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5143                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5144                    node->has_strong_ref, node->has_weak_ref,
5145                    node->local_strong_refs, node->local_weak_refs,
5146                    node->internal_strong_refs, count, node->tmp_refs);
5147         if (count) {
5148                 seq_puts(m, " proc");
5149                 hlist_for_each_entry(ref, &node->refs, node_entry)
5150                         seq_printf(m, " %d", ref->proc->pid);
5151         }
5152         seq_puts(m, "\n");
5153         if (node->proc) {
5154                 list_for_each_entry(w, &node->async_todo, entry)
5155                         print_binder_work_ilocked(m, node->proc, "    ",
5156                                           "    pending async transaction", w);
5157         }
5158 }
5159
5160 static void print_binder_ref_olocked(struct seq_file *m,
5161                                      struct binder_ref *ref)
5162 {
5163         binder_node_lock(ref->node);
5164         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5165                    ref->data.debug_id, ref->data.desc,
5166                    ref->node->proc ? "" : "dead ",
5167                    ref->node->debug_id, ref->data.strong,
5168                    ref->data.weak, ref->death);
5169         binder_node_unlock(ref->node);
5170 }
5171
5172 static void print_binder_proc(struct seq_file *m,
5173                               struct binder_proc *proc, int print_all)
5174 {
5175         struct binder_work *w;
5176         struct rb_node *n;
5177         size_t start_pos = m->count;
5178         size_t header_pos;
5179         struct binder_node *last_node = NULL;
5180
5181         seq_printf(m, "proc %d\n", proc->pid);
5182         seq_printf(m, "context %s\n", proc->context->name);
5183         header_pos = m->count;
5184
5185         binder_inner_proc_lock(proc);
5186         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5187                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5188                                                 rb_node), print_all);
5189
5190         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5191                 struct binder_node *node = rb_entry(n, struct binder_node,
5192                                                     rb_node);
5193                 /*
5194                  * take a temporary reference on the node so it
5195                  * survives and isn't removed from the tree
5196                  * while we print it.
5197                  */
5198                 binder_inc_node_tmpref_ilocked(node);
5199                 /* Need to drop inner lock to take node lock */
5200                 binder_inner_proc_unlock(proc);
5201                 if (last_node)
5202                         binder_put_node(last_node);
5203                 binder_node_inner_lock(node);
5204                 print_binder_node_nilocked(m, node);
5205                 binder_node_inner_unlock(node);
5206                 last_node = node;
5207                 binder_inner_proc_lock(proc);
5208         }
5209         binder_inner_proc_unlock(proc);
5210         if (last_node)
5211                 binder_put_node(last_node);
5212
5213         if (print_all) {
5214                 binder_proc_lock(proc);
5215                 for (n = rb_first(&proc->refs_by_desc);
5216                      n != NULL;
5217                      n = rb_next(n))
5218                         print_binder_ref_olocked(m, rb_entry(n,
5219                                                             struct binder_ref,
5220                                                             rb_node_desc));
5221                 binder_proc_unlock(proc);
5222         }
5223         binder_alloc_print_allocated(m, &proc->alloc);
5224         binder_inner_proc_lock(proc);
5225         list_for_each_entry(w, &proc->todo, entry)
5226                 print_binder_work_ilocked(m, proc, "  ",
5227                                           "  pending transaction", w);
5228         list_for_each_entry(w, &proc->delivered_death, entry) {
5229                 seq_puts(m, "  has delivered dead binder\n");
5230                 break;
5231         }
5232         binder_inner_proc_unlock(proc);
5233         if (!print_all && m->count == header_pos)
5234                 m->count = start_pos;
5235 }
5236
5237 static const char * const binder_return_strings[] = {
5238         "BR_ERROR",
5239         "BR_OK",
5240         "BR_TRANSACTION",
5241         "BR_REPLY",
5242         "BR_ACQUIRE_RESULT",
5243         "BR_DEAD_REPLY",
5244         "BR_TRANSACTION_COMPLETE",
5245         "BR_INCREFS",
5246         "BR_ACQUIRE",
5247         "BR_RELEASE",
5248         "BR_DECREFS",
5249         "BR_ATTEMPT_ACQUIRE",
5250         "BR_NOOP",
5251         "BR_SPAWN_LOOPER",
5252         "BR_FINISHED",
5253         "BR_DEAD_BINDER",
5254         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5255         "BR_FAILED_REPLY"
5256 };
5257
5258 static const char * const binder_command_strings[] = {
5259         "BC_TRANSACTION",
5260         "BC_REPLY",
5261         "BC_ACQUIRE_RESULT",
5262         "BC_FREE_BUFFER",
5263         "BC_INCREFS",
5264         "BC_ACQUIRE",
5265         "BC_RELEASE",
5266         "BC_DECREFS",
5267         "BC_INCREFS_DONE",
5268         "BC_ACQUIRE_DONE",
5269         "BC_ATTEMPT_ACQUIRE",
5270         "BC_REGISTER_LOOPER",
5271         "BC_ENTER_LOOPER",
5272         "BC_EXIT_LOOPER",
5273         "BC_REQUEST_DEATH_NOTIFICATION",
5274         "BC_CLEAR_DEATH_NOTIFICATION",
5275         "BC_DEAD_BINDER_DONE",
5276         "BC_TRANSACTION_SG",
5277         "BC_REPLY_SG",
5278 };
5279
5280 static const char * const binder_objstat_strings[] = {
5281         "proc",
5282         "thread",
5283         "node",
5284         "ref",
5285         "death",
5286         "transaction",
5287         "transaction_complete"
5288 };
5289
5290 static void print_binder_stats(struct seq_file *m, const char *prefix,
5291                                struct binder_stats *stats)
5292 {
5293         int i;
5294
5295         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5296                      ARRAY_SIZE(binder_command_strings));
5297         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5298                 int temp = atomic_read(&stats->bc[i]);
5299
5300                 if (temp)
5301                         seq_printf(m, "%s%s: %d\n", prefix,
5302                                    binder_command_strings[i], temp);
5303         }
5304
5305         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5306                      ARRAY_SIZE(binder_return_strings));
5307         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5308                 int temp = atomic_read(&stats->br[i]);
5309
5310                 if (temp)
5311                         seq_printf(m, "%s%s: %d\n", prefix,
5312                                    binder_return_strings[i], temp);
5313         }
5314
5315         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5316                      ARRAY_SIZE(binder_objstat_strings));
5317         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5318                      ARRAY_SIZE(stats->obj_deleted));
5319         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5320                 int created = atomic_read(&stats->obj_created[i]);
5321                 int deleted = atomic_read(&stats->obj_deleted[i]);
5322
5323                 if (created || deleted)
5324                         seq_printf(m, "%s%s: active %d total %d\n",
5325                                 prefix,
5326                                 binder_objstat_strings[i],
5327                                 created - deleted,
5328                                 created);
5329         }
5330 }
5331
5332 static void print_binder_proc_stats(struct seq_file *m,
5333                                     struct binder_proc *proc)
5334 {
5335         struct binder_work *w;
5336         struct binder_thread *thread;
5337         struct rb_node *n;
5338         int count, strong, weak, ready_threads;
5339         size_t free_async_space =
5340                 binder_alloc_get_free_async_space(&proc->alloc);
5341
5342         seq_printf(m, "proc %d\n", proc->pid);
5343         seq_printf(m, "context %s\n", proc->context->name);
5344         count = 0;
5345         ready_threads = 0;
5346         binder_inner_proc_lock(proc);
5347         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5348                 count++;
5349
5350         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5351                 ready_threads++;
5352
5353         seq_printf(m, "  threads: %d\n", count);
5354         seq_printf(m, "  requested threads: %d+%d/%d\n"
5355                         "  ready threads %d\n"
5356                         "  free async space %zd\n", proc->requested_threads,
5357                         proc->requested_threads_started, proc->max_threads,
5358                         ready_threads,
5359                         free_async_space);
5360         count = 0;
5361         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5362                 count++;
5363         binder_inner_proc_unlock(proc);
5364         seq_printf(m, "  nodes: %d\n", count);
5365         count = 0;
5366         strong = 0;
5367         weak = 0;
5368         binder_proc_lock(proc);
5369         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5370                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5371                                                   rb_node_desc);
5372                 count++;
5373                 strong += ref->data.strong;
5374                 weak += ref->data.weak;
5375         }
5376         binder_proc_unlock(proc);
5377         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5378
5379         count = binder_alloc_get_allocated_count(&proc->alloc);
5380         seq_printf(m, "  buffers: %d\n", count);
5381
5382         binder_alloc_print_pages(m, &proc->alloc);
5383
5384         count = 0;
5385         binder_inner_proc_lock(proc);
5386         list_for_each_entry(w, &proc->todo, entry) {
5387                 if (w->type == BINDER_WORK_TRANSACTION)
5388                         count++;
5389         }
5390         binder_inner_proc_unlock(proc);
5391         seq_printf(m, "  pending transactions: %d\n", count);
5392
5393         print_binder_stats(m, "  ", &proc->stats);
5394 }
5395
5396
5397 static int binder_state_show(struct seq_file *m, void *unused)
5398 {
5399         struct binder_proc *proc;
5400         struct binder_node *node;
5401         struct binder_node *last_node = NULL;
5402
5403         seq_puts(m, "binder state:\n");
5404
5405         spin_lock(&binder_dead_nodes_lock);
5406         if (!hlist_empty(&binder_dead_nodes))
5407                 seq_puts(m, "dead nodes:\n");
5408         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5409                 /*
5410                  * take a temporary reference on the node so it
5411                  * survives and isn't removed from the list
5412                  * while we print it.
5413                  */
5414                 node->tmp_refs++;
5415                 spin_unlock(&binder_dead_nodes_lock);
5416                 if (last_node)
5417                         binder_put_node(last_node);
5418                 binder_node_lock(node);
5419                 print_binder_node_nilocked(m, node);
5420                 binder_node_unlock(node);
5421                 last_node = node;
5422                 spin_lock(&binder_dead_nodes_lock);
5423         }
5424         spin_unlock(&binder_dead_nodes_lock);
5425         if (last_node)
5426                 binder_put_node(last_node);
5427
5428         mutex_lock(&binder_procs_lock);
5429         hlist_for_each_entry(proc, &binder_procs, proc_node)
5430                 print_binder_proc(m, proc, 1);
5431         mutex_unlock(&binder_procs_lock);
5432
5433         return 0;
5434 }
5435
5436 static int binder_stats_show(struct seq_file *m, void *unused)
5437 {
5438         struct binder_proc *proc;
5439
5440         seq_puts(m, "binder stats:\n");
5441
5442         print_binder_stats(m, "", &binder_stats);
5443
5444         mutex_lock(&binder_procs_lock);
5445         hlist_for_each_entry(proc, &binder_procs, proc_node)
5446                 print_binder_proc_stats(m, proc);
5447         mutex_unlock(&binder_procs_lock);
5448
5449         return 0;
5450 }
5451
5452 static int binder_transactions_show(struct seq_file *m, void *unused)
5453 {
5454         struct binder_proc *proc;
5455
5456         seq_puts(m, "binder transactions:\n");
5457         mutex_lock(&binder_procs_lock);
5458         hlist_for_each_entry(proc, &binder_procs, proc_node)
5459                 print_binder_proc(m, proc, 0);
5460         mutex_unlock(&binder_procs_lock);
5461
5462         return 0;
5463 }
5464
5465 static int binder_proc_show(struct seq_file *m, void *unused)
5466 {
5467         struct binder_proc *itr;
5468         int pid = (unsigned long)m->private;
5469
5470         mutex_lock(&binder_procs_lock);
5471         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5472                 if (itr->pid == pid) {
5473                         seq_puts(m, "binder proc state:\n");
5474                         print_binder_proc(m, itr, 1);
5475                 }
5476         }
5477         mutex_unlock(&binder_procs_lock);
5478
5479         return 0;
5480 }
5481
5482 static void print_binder_transaction_log_entry(struct seq_file *m,
5483                                         struct binder_transaction_log_entry *e)
5484 {
5485         int debug_id = READ_ONCE(e->debug_id_done);
5486         /*
5487          * read barrier to guarantee debug_id_done read before
5488          * we print the log values
5489          */
5490         smp_rmb();
5491         seq_printf(m,
5492                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5493                    e->debug_id, (e->call_type == 2) ? "reply" :
5494                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5495                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5496                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5497                    e->return_error, e->return_error_param,
5498                    e->return_error_line);
5499         /*
5500          * read-barrier to guarantee read of debug_id_done after
5501          * done printing the fields of the entry
5502          */
5503         smp_rmb();
5504         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5505                         "\n" : " (incomplete)\n");
5506 }
5507
5508 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5509 {
5510         struct binder_transaction_log *log = m->private;
5511         unsigned int log_cur = atomic_read(&log->cur);
5512         unsigned int count;
5513         unsigned int cur;
5514         int i;
5515
5516         count = log_cur + 1;
5517         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5518                 0 : count % ARRAY_SIZE(log->entry);
5519         if (count > ARRAY_SIZE(log->entry) || log->full)
5520                 count = ARRAY_SIZE(log->entry);
5521         for (i = 0; i < count; i++) {
5522                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5523
5524                 print_binder_transaction_log_entry(m, &log->entry[index]);
5525         }
5526         return 0;
5527 }
5528
5529 static const struct file_operations binder_fops = {
5530         .owner = THIS_MODULE,
5531         .poll = binder_poll,
5532         .unlocked_ioctl = binder_ioctl,
5533         .compat_ioctl = binder_ioctl,
5534         .mmap = binder_mmap,
5535         .open = binder_open,
5536         .flush = binder_flush,
5537         .release = binder_release,
5538 };
5539
5540 BINDER_DEBUG_ENTRY(state);
5541 BINDER_DEBUG_ENTRY(stats);
5542 BINDER_DEBUG_ENTRY(transactions);
5543 BINDER_DEBUG_ENTRY(transaction_log);
5544
5545 static int __init init_binder_device(const char *name)
5546 {
5547         int ret;
5548         struct binder_device *binder_device;
5549
5550         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5551         if (!binder_device)
5552                 return -ENOMEM;
5553
5554         binder_device->miscdev.fops = &binder_fops;
5555         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5556         binder_device->miscdev.name = name;
5557
5558         binder_device->context.binder_context_mgr_uid = INVALID_UID;
5559         binder_device->context.name = name;
5560         mutex_init(&binder_device->context.context_mgr_node_lock);
5561
5562         ret = misc_register(&binder_device->miscdev);
5563         if (ret < 0) {
5564                 kfree(binder_device);
5565                 return ret;
5566         }
5567
5568         hlist_add_head(&binder_device->hlist, &binder_devices);
5569
5570         return ret;
5571 }
5572
5573 static int __init binder_init(void)
5574 {
5575         int ret;
5576         char *device_name, *device_names, *device_tmp;
5577         struct binder_device *device;
5578         struct hlist_node *tmp;
5579
5580         binder_alloc_shrinker_init();
5581
5582         atomic_set(&binder_transaction_log.cur, ~0U);
5583         atomic_set(&binder_transaction_log_failed.cur, ~0U);
5584
5585         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5586         if (binder_debugfs_dir_entry_root)
5587                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5588                                                  binder_debugfs_dir_entry_root);
5589
5590         if (binder_debugfs_dir_entry_root) {
5591                 debugfs_create_file("state",
5592                                     S_IRUGO,
5593                                     binder_debugfs_dir_entry_root,
5594                                     NULL,
5595                                     &binder_state_fops);
5596                 debugfs_create_file("stats",
5597                                     S_IRUGO,
5598                                     binder_debugfs_dir_entry_root,
5599                                     NULL,
5600                                     &binder_stats_fops);
5601                 debugfs_create_file("transactions",
5602                                     S_IRUGO,
5603                                     binder_debugfs_dir_entry_root,
5604                                     NULL,
5605                                     &binder_transactions_fops);
5606                 debugfs_create_file("transaction_log",
5607                                     S_IRUGO,
5608                                     binder_debugfs_dir_entry_root,
5609                                     &binder_transaction_log,
5610                                     &binder_transaction_log_fops);
5611                 debugfs_create_file("failed_transaction_log",
5612                                     S_IRUGO,
5613                                     binder_debugfs_dir_entry_root,
5614                                     &binder_transaction_log_failed,
5615                                     &binder_transaction_log_fops);
5616         }
5617
5618         /*
5619          * Copy the module_parameter string, because we don't want to
5620          * tokenize it in-place.
5621          */
5622         device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5623         if (!device_names) {
5624                 ret = -ENOMEM;
5625                 goto err_alloc_device_names_failed;
5626         }
5627         strcpy(device_names, binder_devices_param);
5628
5629         device_tmp = device_names;
5630         while ((device_name = strsep(&device_tmp, ","))) {
5631                 ret = init_binder_device(device_name);
5632                 if (ret)
5633                         goto err_init_binder_device_failed;
5634         }
5635
5636         return ret;
5637
5638 err_init_binder_device_failed:
5639         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5640                 misc_deregister(&device->miscdev);
5641                 hlist_del(&device->hlist);
5642                 kfree(device);
5643         }
5644
5645         kfree(device_names);
5646
5647 err_alloc_device_names_failed:
5648         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5649
5650         return ret;
5651 }
5652
5653 device_initcall(binder_init);
5654
5655 #define CREATE_TRACE_POINTS
5656 #include "binder_trace.h"
5657
5658 MODULE_LICENSE("GPL v2");