kernel_optimize_test/kernel/task_work.c
Al Viro a2d4c71d15 deal with task_work callbacks adding more work
It doesn't matter on normal return to userland path (we'll recheck the
NOTIFY_RESUME flag anyway), but in case of exit_task_work() we'll
need that as soon as we get callbacks capable of triggering more
task_work_add().

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2012-07-22 23:57:57 +04:00

81 lines
1.7 KiB
C

#include <linux/spinlock.h>
#include <linux/task_work.h>
#include <linux/tracehook.h>
int
task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
{
struct callback_head *last, *first;
unsigned long flags;
/*
* Not inserting the new work if the task has already passed
* exit_task_work() is the responisbility of callers.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
last = task->task_works;
first = last ? last->next : twork;
twork->next = first;
if (last)
last->next = twork;
task->task_works = twork;
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
if (notify)
set_notify_resume(task);
return 0;
}
struct callback_head *
task_work_cancel(struct task_struct *task, task_work_func_t func)
{
unsigned long flags;
struct callback_head *last, *res = NULL;
raw_spin_lock_irqsave(&task->pi_lock, flags);
last = task->task_works;
if (last) {
struct callback_head *q = last, *p = q->next;
while (1) {
if (p->func == func) {
q->next = p->next;
if (p == last)
task->task_works = q == p ? NULL : q;
res = p;
break;
}
if (p == last)
break;
q = p;
p = q->next;
}
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return res;
}
void task_work_run(void)
{
struct task_struct *task = current;
struct callback_head *p, *q;
while (1) {
raw_spin_lock_irq(&task->pi_lock);
p = task->task_works;
task->task_works = NULL;
raw_spin_unlock_irq(&task->pi_lock);
if (unlikely(!p))
return;
q = p->next; /* head */
p->next = NULL; /* cut it */
while (q) {
p = q->next;
q->func(q);
q = p;
}
}
}