forked from luck/tmp_suning_uos_patched
Stop ia64 being the last holdout using GENERIC_TIME_VSYSCALL_OLD
so John Stultz can drop that code. -----BEGIN PGP SIGNATURE----- iQIxBAABCAAbBQJaCdb4FBx0b255Lmx1Y2tAaW50ZWwuY29tAAoJEKurIx+X31iB DVoP/16nHYiLg/TUBJidPnyij7x9JFK2A+m7oQy4syyay7GTKklxhXB0Zz2PYqqw 61eb6F1e4MzNyHjjKTnKxxGgTWvYfeQfdVTAWU1m+oDPVwey3X3LoTgdiIeRv0BN ZzEuBkdRCKoRYnujrYdG34mnwVh0ut0x934k41R4EOX0I/gsmX1T1j0aeaX5K4OA zGib/OqMPhnJGSv5WguSw4KzzZECGUm6v5CrT63bsyvTvBIDCW49a8iC7wLJ+mEN 4IlBDaMks1r6M0fIb2Ckit0Aa/irLvvXVFxWycA6oZdyJ/BhuTF5Zw+Cel7AAoBT e7xZSX1rGyYrtTupSMhNrMdp3BT3hkQLlowRBTCjYbBDTM60IlFotioY5O18ljvi YEZYP7oDWC43Ck1abV9+HT5aWaaQcQTELyPFLNIR600+zswvQ3q5XtSQWq5lxwF7 iR6fc0nAvHxZbiDkY927Rm/BGxw/oWdoB0uHko36hkoU6kdHYOiTyIMGoEw5ZuTL 4xii9Z0qMTm5SJgAxDAJeH8MZVow1f5V7J2dh800i5HeLOxaelzC8PsepmhIXw43 SSEm56lg1zDZrXKCZGPOuL04AJvJntCtlB3H4nWIuc9HFlQ5Z1g6pNHZFlz0NPF0 D5hcQf+nukZ1mD97GaERwzd9ao4N9NWhArSgvDv6AztgEE2f =PDUR -----END PGP SIGNATURE----- Merge tag 'please-pull-gettime_vsyscall_update' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux Pull ia64 update from Tony Luck: "Stop ia64 being the last holdout using GENERIC_TIME_VSYSCALL_OLD so that John Stultz can drop that code" * tag 'please-pull-gettime_vsyscall_update' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: ia64: Update fsyscall gettime to use modern vsyscall_update
This commit is contained in:
commit
f08d8bcc12
|
@ -47,7 +47,7 @@ config IA64
|
|||
select ARCH_TASK_STRUCT_ALLOCATOR
|
||||
select ARCH_THREAD_STACK_ALLOCATOR
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select GENERIC_TIME_VSYSCALL_OLD
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select SYSCTL_ARCH_UNALIGN_NO_WARN
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
|
|
@ -212,6 +212,8 @@ void foo(void)
|
|||
BLANK();
|
||||
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
|
||||
offsetof (struct timespec, tv_nsec));
|
||||
DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET,
|
||||
offsetof (struct time_sn_spec, snsec));
|
||||
|
||||
DEFINE(CLONE_SETTLS_BIT, 19);
|
||||
#if CLONE_SETTLS != (1<<19)
|
||||
|
|
|
@ -236,9 +236,9 @@ ENTRY(fsys_gettimeofday)
|
|||
MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
|
||||
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
|
||||
(p13) ld8 r25 = [r19] // get itc_lastcycle value
|
||||
ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
|
||||
ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec
|
||||
;;
|
||||
ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
|
||||
ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec
|
||||
(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
|
||||
;;
|
||||
(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
|
||||
|
@ -266,9 +266,9 @@ EX(.fail_efault, probe.w.fault r31, 3)
|
|||
mf
|
||||
;;
|
||||
ld4 r10 = [r20] // gtod_lock.sequence
|
||||
shr.u r2 = r2,r23 // shift by factor
|
||||
;;
|
||||
add r8 = r8,r2 // Add xtime.nsecs
|
||||
;;
|
||||
shr.u r8 = r8,r23 // shift by factor
|
||||
cmp4.ne p7,p0 = r28,r10
|
||||
(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
|
||||
// End critical section.
|
||||
|
|
|
@ -6,10 +6,16 @@
|
|||
* fsyscall gettimeofday data
|
||||
*/
|
||||
|
||||
/* like timespec, but includes "shifted nanoseconds" */
|
||||
struct time_sn_spec {
|
||||
u64 sec;
|
||||
u64 snsec;
|
||||
};
|
||||
|
||||
struct fsyscall_gtod_data_t {
|
||||
seqcount_t seq;
|
||||
struct timespec wall_time;
|
||||
struct timespec monotonic_time;
|
||||
struct time_sn_spec wall_time;
|
||||
struct time_sn_spec monotonic_time;
|
||||
u64 clk_mask;
|
||||
u32 clk_mult;
|
||||
u32 clk_shift;
|
||||
|
|
|
@ -430,30 +430,32 @@ void update_vsyscall_tz(void)
|
|||
{
|
||||
}
|
||||
|
||||
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
|
||||
struct clocksource *c, u32 mult, u64 cycle_last)
|
||||
void update_vsyscall(struct timekeeper *tk)
|
||||
{
|
||||
write_seqcount_begin(&fsyscall_gtod_data.seq);
|
||||
|
||||
/* copy fsyscall clock data */
|
||||
fsyscall_gtod_data.clk_mask = c->mask;
|
||||
fsyscall_gtod_data.clk_mult = mult;
|
||||
fsyscall_gtod_data.clk_shift = c->shift;
|
||||
fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
|
||||
fsyscall_gtod_data.clk_cycle_last = cycle_last;
|
||||
/* copy vsyscall data */
|
||||
fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
|
||||
fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
|
||||
fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
|
||||
fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
|
||||
fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
|
||||
|
||||
/* copy kernel time structures */
|
||||
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
|
||||
fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
|
||||
fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
|
||||
+ wall->tv_sec;
|
||||
fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
|
||||
+ wall->tv_nsec;
|
||||
fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
|
||||
fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
|
||||
|
||||
fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
|
||||
+ tk->wall_to_monotonic.tv_sec;
|
||||
fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
|
||||
+ ((u64)tk->wall_to_monotonic.tv_nsec
|
||||
<< tk->tkr_mono.shift);
|
||||
|
||||
/* normalize */
|
||||
while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
|
||||
fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
|
||||
fsyscall_gtod_data.monotonic_time.tv_sec++;
|
||||
while (fsyscall_gtod_data.monotonic_time.snsec >=
|
||||
(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
|
||||
fsyscall_gtod_data.monotonic_time.snsec -=
|
||||
((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
|
||||
fsyscall_gtod_data.monotonic_time.sec++;
|
||||
}
|
||||
|
||||
write_seqcount_end(&fsyscall_gtod_data.seq);
|
||||
|
|
Loading…
Reference in New Issue
Block a user