forked from luck/tmp_suning_uos_patched
da15cfdae0
After talking with some application writers who want very fast, but not fine-grained timestamps, I decided to try to implement new clock_ids to clock_gettime(): CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE which returns the time at the last tick. This is very fast as we don't have to access any hardware (which can be very painful if you're using something like the acpi_pm clocksource), and we can even use the vdso clock_gettime() method to avoid the syscall. The only trade off is you only get low-res tick grained time resolution. This isn't a new idea, I know Ingo has a patch in the -rt tree that made the vsyscall gettimeofday() return coarse grained time when the vsyscall64 sysctrl was set to 2. However this affects all applications on a system. With this method, applications can choose the proper speed/granularity trade-off for themselves. Signed-off-by: John Stultz <johnstul@us.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: nikolag@ca.ibm.com Cc: Darren Hart <dvhltc@us.ibm.com> Cc: arjan@infradead.org Cc: jonathan@jonmasters.org LKML-Reference: <1250734414.6897.5.camel@localhost.localdomain> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
31 lines
674 B
C
31 lines
674 B
C
#ifndef _ASM_X86_VGTOD_H
|
|
#define _ASM_X86_VGTOD_H
|
|
|
|
#include <asm/vsyscall.h>
|
|
#include <linux/clocksource.h>
|
|
|
|
struct vsyscall_gtod_data {
|
|
seqlock_t lock;
|
|
|
|
/* open coded 'struct timespec' */
|
|
time_t wall_time_sec;
|
|
u32 wall_time_nsec;
|
|
|
|
int sysctl_enabled;
|
|
struct timezone sys_tz;
|
|
struct { /* extract of a clocksource struct */
|
|
cycle_t (*vread)(void);
|
|
cycle_t cycle_last;
|
|
cycle_t mask;
|
|
u32 mult;
|
|
u32 shift;
|
|
} clock;
|
|
struct timespec wall_to_monotonic;
|
|
struct timespec wall_time_coarse;
|
|
};
|
|
extern struct vsyscall_gtod_data __vsyscall_gtod_data
|
|
__section_vsyscall_gtod_data;
|
|
extern struct vsyscall_gtod_data vsyscall_gtod_data;
|
|
|
|
#endif /* _ASM_X86_VGTOD_H */
|