binder: protect proc->threads with inner_lock

proc->threads will need to be accessed with higher
locks of other processes held so use proc->inner_lock
to protect it. proc->tmp_ref now needs to be protected
by proc->inner_lock.

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Todd Kjos 2017-06-29 12:02:05 -07:00 committed by Greg Kroah-Hartman
parent da0fa9e4e8
commit 7bd7b0e639

View File

@ -469,6 +469,7 @@ enum binder_deferred_state {
* struct binder_proc - binder process bookkeeping
* @proc_node: element for binder_procs list
* @threads: rbtree of binder_threads in this proc
* (protected by @inner_lock)
* @nodes: rbtree of binder nodes associated with
* this proc ordered by node->ptr
* (protected by @inner_lock)
@ -486,6 +487,7 @@ enum binder_deferred_state {
* (protected by binder_deferred_lock)
* @is_dead: process is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @inner_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @wait: wait queue head to wait for proc work
@ -501,6 +503,7 @@ enum binder_deferred_state {
* @requested_threads_started: number binder threads started
* @ready_threads: number of threads waiting for proc work
* @tmp_ref: temporary reference to indicate proc is in use
* (protected by @inner_lock)
* @default_priority: default scheduler priority
* (invariant after initialized)
* @debugfs_entry: debugfs node
@ -556,6 +559,7 @@ enum {
* @proc: binder process for this thread
* (invariant after initialization)
* @rb_node: element for proc->threads rbtree
* (protected by @proc->inner_lock)
* @pid: PID for this thread
* (invariant after initialization)
* @looper: bitmap of looping state
@ -576,6 +580,7 @@ enum {
* always be acquired)
* @is_dead: thread is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @proc->inner_lock)
*
* Bookkeeping structure for binder threads.
*/
@ -1667,15 +1672,15 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
/*
* atomic is used to protect the counter value while
* it cannot reach zero or thread->is_dead is false
*
* TODO: future patch adds locking to ensure that the
* check of tmp_ref and is_dead is done with a lock held
*/
binder_inner_proc_lock(thread->proc);
atomic_dec(&thread->tmp_ref);
if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
binder_inner_proc_unlock(thread->proc);
binder_free_thread(thread);
return;
}
binder_inner_proc_unlock(thread->proc);
}
/**
@ -1692,12 +1697,15 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
*/
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
binder_inner_proc_lock(proc);
proc->tmp_ref--;
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
!proc->tmp_ref) {
binder_inner_proc_unlock(proc);
binder_free_proc(proc);
return;
}
binder_inner_proc_unlock(proc);
}
/**
@ -2480,7 +2488,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_dead_binder;
}
binder_inner_proc_lock(target_proc);
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_proc);
binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
@ -3854,7 +3864,8 @@ static void binder_release_work(struct binder_proc *proc,
}
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
static struct binder_thread *binder_get_thread_ilocked(
struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@ -3869,25 +3880,45 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
break;
return thread;
}
if (*p == NULL) {
thread = kzalloc(sizeof(*thread), GFP_KERNEL);
if (thread == NULL)
if (!new_thread)
return NULL;
thread = new_thread;
binder_stats_created(BINDER_STAT_THREAD);
thread->proc = proc;
thread->pid = current->pid;
atomic_set(&thread->tmp_ref, 0);
init_waitqueue_head(&thread->wait);
INIT_LIST_HEAD(&thread->todo);
rb_link_node(&thread->rb_node, parent, p);
rb_insert_color(&thread->rb_node, &proc->threads);
thread->looper_need_return = true;
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
return thread;
}
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
struct binder_thread *thread;
struct binder_thread *new_thread;
binder_inner_proc_lock(proc);
thread = binder_get_thread_ilocked(proc, NULL);
binder_inner_proc_unlock(proc);
if (!thread) {
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
if (new_thread == NULL)
return NULL;
binder_stats_created(BINDER_STAT_THREAD);
thread->proc = proc;
thread->pid = current->pid;
atomic_set(&thread->tmp_ref, 0);
init_waitqueue_head(&thread->wait);
INIT_LIST_HEAD(&thread->todo);
rb_link_node(&thread->rb_node, parent, p);
rb_insert_color(&thread->rb_node, &proc->threads);
thread->looper_need_return = true;
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
binder_inner_proc_lock(proc);
thread = binder_get_thread_ilocked(proc, new_thread);
binder_inner_proc_unlock(proc);
if (thread != new_thread)
kfree(new_thread);
}
return thread;
}
@ -3918,6 +3949,7 @@ static int binder_thread_release(struct binder_proc *proc,
int active_transactions = 0;
struct binder_transaction *last_t = NULL;
binder_inner_proc_lock(thread->proc);
/*
* take a ref on the proc so it survives
* after we remove this thread from proc->threads.
@ -3965,6 +3997,7 @@ static int binder_thread_release(struct binder_proc *proc,
if (t)
spin_lock(&t->lock);
}
binder_inner_proc_unlock(thread->proc);
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
@ -4338,6 +4371,7 @@ static void binder_deferred_flush(struct binder_proc *proc)
struct rb_node *n;
int wake_count = 0;
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
@ -4347,6 +4381,7 @@ static void binder_deferred_flush(struct binder_proc *proc)
wake_count++;
}
}
binder_inner_proc_unlock(proc);
wake_up_interruptible_all(&proc->wait);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
@ -4445,6 +4480,7 @@ static void binder_deferred_release(struct binder_proc *proc)
context->binder_context_mgr_node = NULL;
}
mutex_unlock(&context->context_mgr_node_lock);
binder_inner_proc_lock(proc);
/*
* Make sure proc stays alive after we
* remove all the threads
@ -4458,13 +4494,14 @@ static void binder_deferred_release(struct binder_proc *proc)
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
binder_inner_proc_unlock(proc);
threads++;
active_transactions += binder_thread_release(proc, thread);
binder_inner_proc_lock(proc);
}
nodes = 0;
incoming_refs = 0;
binder_inner_proc_lock(proc);
while ((n = rb_first(&proc->nodes))) {
struct binder_node *node;
@ -4872,10 +4909,13 @@ static void print_binder_proc_stats(struct seq_file *m,
struct binder_work *w;
struct rb_node *n;
int count, strong, weak;
size_t free_async_space =
binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
seq_printf(m, " threads: %d\n", count);
@ -4884,9 +4924,8 @@ static void print_binder_proc_stats(struct seq_file *m,
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
proc->ready_threads,
binder_alloc_get_free_async_space(&proc->alloc));
free_async_space);
count = 0;
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
binder_inner_proc_unlock(proc);