forked from luck/tmp_suning_uos_patched
6c7811c628
The SPDX identifier defines the license of the files already. No need for the boilerplates. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Kees Cook <keescook@chromium.org> Acked-by: Ingo Molnar <mingo@kernel.org> Acked-by: John Stultz <john.stultz@linaro.org> Acked-by: Corey Minyard <cminyard@mvista.com> Acked-by: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Kate Stewart <kstewart@linuxfoundation.org> Cc: Philippe Ombredanne <pombredanne@nexb.com> Cc: Peter Anvin <hpa@zytor.com> Cc: Russell King <rmk+kernel@armlinux.org.uk> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Cc: David Riley <davidriley@chromium.org> Cc: Colin Cross <ccross@android.com> Cc: Mark Brown <broonie@kernel.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: https://lkml.kernel.org/r/20181031182253.132458951@linutronix.de
100 lines
2.5 KiB
C
100 lines
2.5 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* Based on clocksource code. See commit 74d23cc704d1
|
|
*/
|
|
#include <linux/export.h>
|
|
#include <linux/timecounter.h>
|
|
|
|
void timecounter_init(struct timecounter *tc,
|
|
const struct cyclecounter *cc,
|
|
u64 start_tstamp)
|
|
{
|
|
tc->cc = cc;
|
|
tc->cycle_last = cc->read(cc);
|
|
tc->nsec = start_tstamp;
|
|
tc->mask = (1ULL << cc->shift) - 1;
|
|
tc->frac = 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(timecounter_init);
|
|
|
|
/**
|
|
* timecounter_read_delta - get nanoseconds since last call of this function
|
|
* @tc: Pointer to time counter
|
|
*
|
|
* When the underlying cycle counter runs over, this will be handled
|
|
* correctly as long as it does not run over more than once between
|
|
* calls.
|
|
*
|
|
* The first call to this function for a new time counter initializes
|
|
* the time tracking and returns an undefined result.
|
|
*/
|
|
static u64 timecounter_read_delta(struct timecounter *tc)
|
|
{
|
|
u64 cycle_now, cycle_delta;
|
|
u64 ns_offset;
|
|
|
|
/* read cycle counter: */
|
|
cycle_now = tc->cc->read(tc->cc);
|
|
|
|
/* calculate the delta since the last timecounter_read_delta(): */
|
|
cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
|
|
|
|
/* convert to nanoseconds: */
|
|
ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta,
|
|
tc->mask, &tc->frac);
|
|
|
|
/* update time stamp of timecounter_read_delta() call: */
|
|
tc->cycle_last = cycle_now;
|
|
|
|
return ns_offset;
|
|
}
|
|
|
|
u64 timecounter_read(struct timecounter *tc)
|
|
{
|
|
u64 nsec;
|
|
|
|
/* increment time by nanoseconds since last call */
|
|
nsec = timecounter_read_delta(tc);
|
|
nsec += tc->nsec;
|
|
tc->nsec = nsec;
|
|
|
|
return nsec;
|
|
}
|
|
EXPORT_SYMBOL_GPL(timecounter_read);
|
|
|
|
/*
|
|
* This is like cyclecounter_cyc2ns(), but it is used for computing a
|
|
* time previous to the time stored in the cycle counter.
|
|
*/
|
|
static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
|
|
u64 cycles, u64 mask, u64 frac)
|
|
{
|
|
u64 ns = (u64) cycles;
|
|
|
|
ns = ((ns * cc->mult) - frac) >> cc->shift;
|
|
|
|
return ns;
|
|
}
|
|
|
|
u64 timecounter_cyc2time(struct timecounter *tc,
|
|
u64 cycle_tstamp)
|
|
{
|
|
u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
|
|
u64 nsec = tc->nsec, frac = tc->frac;
|
|
|
|
/*
|
|
* Instead of always treating cycle_tstamp as more recent
|
|
* than tc->cycle_last, detect when it is too far in the
|
|
* future and treat it as old time stamp instead.
|
|
*/
|
|
if (delta > tc->cc->mask / 2) {
|
|
delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
|
|
nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac);
|
|
} else {
|
|
nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac);
|
|
}
|
|
|
|
return nsec;
|
|
}
|
|
EXPORT_SYMBOL_GPL(timecounter_cyc2time);
|