kernel_optimize_test/kernel/itimer.c

246 lines
6.8 KiB
C
Raw Normal View History

/*
* linux/kernel/itimer.c
*
* Copyright (C) 1992 Darren Senn
*/
/* These are all the functions necessary to implement itimers */
#include <linux/mm.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/posix-timers.h>
#include <asm/uaccess.h>
static unsigned long it_real_value(struct signal_struct *sig)
{
unsigned long val = 0;
if (timer_pending(&sig->real_timer)) {
val = sig->real_timer.expires - jiffies;
/* look out for negative/zero itimer.. */
if ((long) val <= 0)
val = 1;
}
return val;
}
int do_getitimer(int which, struct itimerval *value)
{
struct task_struct *tsk = current;
unsigned long interval, val;
cputime_t cinterval, cval;
switch (which) {
case ITIMER_REAL:
spin_lock_irq(&tsk->sighand->siglock);
interval = tsk->signal->it_real_incr;
val = it_real_value(tsk->signal);
spin_unlock_irq(&tsk->sighand->siglock);
jiffies_to_timeval(val, &value->it_value);
jiffies_to_timeval(interval, &value->it_interval);
break;
case ITIMER_VIRTUAL:
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr;
if (!cputime_eq(cval, cputime_zero)) {
struct task_struct *t = tsk;
cputime_t utime = tsk->signal->utime;
do {
utime = cputime_add(utime, t->utime);
t = next_thread(t);
} while (t != tsk);
if (cputime_le(cval, utime)) { /* about to fire */
cval = jiffies_to_cputime(1);
} else {
cval = cputime_sub(cval, utime);
}
}
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval);
break;
case ITIMER_PROF:
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr;
if (!cputime_eq(cval, cputime_zero)) {
struct task_struct *t = tsk;
cputime_t ptime = cputime_add(tsk->signal->utime,
tsk->signal->stime);
do {
ptime = cputime_add(ptime,
cputime_add(t->utime,
t->stime));
t = next_thread(t);
} while (t != tsk);
if (cputime_le(cval, ptime)) { /* about to fire */
cval = jiffies_to_cputime(1);
} else {
cval = cputime_sub(cval, ptime);
}
}
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval);
break;
default:
return(-EINVAL);
}
return 0;
}
asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
{
int error = -EFAULT;
struct itimerval get_buffer;
if (value) {
error = do_getitimer(which, &get_buffer);
if (!error &&
copy_to_user(value, &get_buffer, sizeof(get_buffer)))
error = -EFAULT;
}
return error;
}
/*
* Called with P->sighand->siglock held and P->signal->real_timer inactive.
* If interval is nonzero, arm the timer for interval ticks from now.
*/
static inline void it_real_arm(struct task_struct *p, unsigned long interval)
{
p->signal->it_real_value = interval; /* XXX unnecessary field?? */
if (interval == 0)
return;
if (interval > (unsigned long) LONG_MAX)
interval = LONG_MAX;
[PATCH] setitimer timer expires too early It seems that the code responsible for this is in kernel/itimer.c:126: p->signal->real_timer.expires = jiffies + interval; add_timer(&p->signal->real_timer); If you request an interval of, lets say 900 usecs, the interval given by timeval_to_jiffies will be 1. If you request this when we are half-way between two timer ticks, the interval will only give 400 usecs. If we want to guarantee that we never ever give intervals less than requested, the simple solution would be to change that to: p->signal->real_timer.expires = jiffies + interval + 1; This however will produce pathological cases, like having a idle system being requested 1 ms timeouts will give systematically 2 ms timeouts, whereas currently it simply gives a few usecs less than 1 ms. The complex (and more computationally expensive) solution would be to check the gettimeofday time, and compute the correct number of jiffies. This way, if we request a 300 usecs timer 200 usecs inside the timer tick, we can wait just one tick, but not if we are 800 usecs inside the tick. This would also mean that we would have to lock preemption during these computations to avoid races, etc. I've searched the archives but couldn't find this particular issue being discussed before. Attached is a patch to do the simple solution, in case anybody thinks that it should be used. Signed-Off-By: Paulo Marques <pmarques@grupopie.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-05-06 07:15:49 +08:00
/* the "+ 1" below makes sure that the timer doesn't go off before
* the interval requested. This could happen if
* time requested % (usecs per jiffy) is more than the usecs left
* in the current jiffy */
p->signal->real_timer.expires = jiffies + interval + 1;
add_timer(&p->signal->real_timer);
}
void it_real_fn(unsigned long __data)
{
struct task_struct * p = (struct task_struct *) __data;
send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p);
/*
* Now restart the timer if necessary. We don't need any locking
* here because do_setitimer makes sure we have finished running
* before it touches anything.
*/
it_real_arm(p, p->signal->it_real_incr);
}
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
struct task_struct *tsk = current;
unsigned long val, interval;
cputime_t cval, cinterval, nval, ninterval;
switch (which) {
case ITIMER_REAL:
spin_lock_irq(&tsk->sighand->siglock);
interval = tsk->signal->it_real_incr;
val = it_real_value(tsk->signal);
if (val)
del_timer_sync(&tsk->signal->real_timer);
tsk->signal->it_real_incr =
timeval_to_jiffies(&value->it_interval);
it_real_arm(tsk, timeval_to_jiffies(&value->it_value));
spin_unlock_irq(&tsk->sighand->siglock);
if (ovalue) {
jiffies_to_timeval(val, &ovalue->it_value);
jiffies_to_timeval(interval,
&ovalue->it_interval);
}
break;
case ITIMER_VIRTUAL:
nval = timeval_to_cputime(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval);
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr;
if (!cputime_eq(cval, cputime_zero) ||
!cputime_eq(nval, cputime_zero)) {
if (cputime_gt(nval, cputime_zero))
nval = cputime_add(nval,
jiffies_to_cputime(1));
set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
&nval, &cval);
}
tsk->signal->it_virt_expires = nval;
tsk->signal->it_virt_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
}
break;
case ITIMER_PROF:
nval = timeval_to_cputime(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval);
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr;
if (!cputime_eq(cval, cputime_zero) ||
!cputime_eq(nval, cputime_zero)) {
if (cputime_gt(nval, cputime_zero))
nval = cputime_add(nval,
jiffies_to_cputime(1));
set_process_cpu_timer(tsk, CPUCLOCK_PROF,
&nval, &cval);
}
tsk->signal->it_prof_expires = nval;
tsk->signal->it_prof_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
}
break;
default:
return -EINVAL;
}
return 0;
}
asmlinkage long sys_setitimer(int which,
struct itimerval __user *value,
struct itimerval __user *ovalue)
{
struct itimerval set_buffer, get_buffer;
int error;
if (value) {
if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
return -EFAULT;
} else
memset((char *) &set_buffer, 0, sizeof(set_buffer));
error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
if (error || !ovalue)
return error;
if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
return -EFAULT;
return 0;
}