forked from luck/tmp_suning_uos_patched
sched_clock: only update deltas with local reads.
Reading the CPU clock should try to stay accurate within the CPU. By reading the CPU clock from another CPU and updating the deltas can cause unneeded jumps when reading from the local CPU. This patch changes the code to update the last read TSC only when read from the local CPU. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: john stultz <johnstul@us.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2b8a0cf489
commit
c0c87734f1
@ -124,7 +124,7 @@ static int check_max(struct sched_clock_data *scd)
|
||||
* - filter out backward motion
|
||||
* - use jiffies to generate a min,max window to clip the raw values
|
||||
*/
|
||||
static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time)
|
||||
{
|
||||
unsigned long now_jiffies = jiffies;
|
||||
long delta_jiffies = now_jiffies - scd->tick_jiffies;
|
||||
@ -162,8 +162,12 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
if (unlikely(clock < min_clock))
|
||||
clock = min_clock;
|
||||
|
||||
scd->prev_raw = now;
|
||||
scd->clock = clock;
|
||||
if (time)
|
||||
*time = clock;
|
||||
else {
|
||||
scd->prev_raw = now;
|
||||
scd->clock = clock;
|
||||
}
|
||||
}
|
||||
|
||||
static void lock_double_clock(struct sched_clock_data *data1,
|
||||
@ -207,15 +211,18 @@ u64 sched_clock_cpu(int cpu)
|
||||
now -= scd->tick_gtod;
|
||||
|
||||
__raw_spin_unlock(&my_scd->lock);
|
||||
|
||||
__update_sched_clock(scd, now, &clock);
|
||||
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
} else {
|
||||
__raw_spin_lock(&scd->lock);
|
||||
__update_sched_clock(scd, now, NULL);
|
||||
clock = scd->clock;
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
}
|
||||
|
||||
__update_sched_clock(scd, now);
|
||||
clock = scd->clock;
|
||||
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
@ -234,7 +241,7 @@ void sched_clock_tick(void)
|
||||
now_gtod = ktime_to_ns(ktime_get());
|
||||
|
||||
__raw_spin_lock(&scd->lock);
|
||||
__update_sched_clock(scd, now);
|
||||
__update_sched_clock(scd, now, NULL);
|
||||
/*
|
||||
* update tick_gtod after __update_sched_clock() because that will
|
||||
* already observe 1 new jiffy; adding a new tick_gtod to that would
|
||||
|
Loading…
Reference in New Issue
Block a user