forked from luck/tmp_suning_uos_patched
x86/asm/tsc: Replace rdtscll() with native_read_tsc()
Now that the ->read_tsc() paravirt hook is gone, rdtscll() is just a wrapper around native_read_tsc(). Unwrap it. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Huang Rui <ray.huang@amd.com> Cc: John Stultz <john.stultz@linaro.org> Cc: Len Brown <lenb@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kvm ML <kvm@vger.kernel.org> Link: http://lkml.kernel.org/r/d2449ae62c1b1fb90195bcfb19ef4a35883a04dc.1434501121.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9261e050b6
commit
87be28aaf1
|
@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
|
|||
|
||||
if (has_cpuflag(X86_FEATURE_TSC)) {
|
||||
debug_putstr(" RDTSC");
|
||||
rdtscll(raw);
|
||||
raw = native_read_tsc();
|
||||
|
||||
random ^= raw;
|
||||
use_i8254 = false;
|
||||
|
|
|
@ -192,9 +192,6 @@ do { \
|
|||
#define rdtscl(low) \
|
||||
((low) = (u32)native_read_tsc())
|
||||
|
||||
#define rdtscll(val) \
|
||||
((val) = native_read_tsc())
|
||||
|
||||
#define rdtscp(low, high, aux) \
|
||||
do { \
|
||||
unsigned long long _val = native_read_tscp(&(aux)); \
|
||||
|
|
|
@ -21,15 +21,12 @@ extern void disable_TSC(void);
|
|||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
unsigned long long ret = 0;
|
||||
|
||||
#ifndef CONFIG_X86_TSC
|
||||
if (!cpu_has_tsc)
|
||||
return 0;
|
||||
#endif
|
||||
rdtscll(ret);
|
||||
|
||||
return ret;
|
||||
return native_read_tsc();
|
||||
}
|
||||
|
||||
extern void tsc_init(void);
|
||||
|
|
|
@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
|
|||
|
||||
/* Verify whether apbt counter works */
|
||||
t1 = dw_apb_clocksource_read(clocksource_apbt);
|
||||
rdtscll(start);
|
||||
start = native_read_tsc();
|
||||
|
||||
/*
|
||||
* We don't know the TSC frequency yet, but waiting for
|
||||
|
@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
|
|||
*/
|
||||
do {
|
||||
rep_nop();
|
||||
rdtscll(now);
|
||||
now = native_read_tsc();
|
||||
} while ((now - start) < 200000UL);
|
||||
|
||||
/* APBT is the only always on clocksource, it has to work! */
|
||||
|
|
|
@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
|
|||
{
|
||||
u64 tsc;
|
||||
|
||||
rdtscll(tsc);
|
||||
tsc = native_read_tsc();
|
||||
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
|
||||
return 0;
|
||||
}
|
||||
|
@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
|
|||
unsigned long pm = acpi_pm_read_early();
|
||||
|
||||
if (cpu_has_tsc)
|
||||
rdtscll(tsc);
|
||||
tsc = native_read_tsc();
|
||||
|
||||
switch (lapic_cal_loops++) {
|
||||
case 0:
|
||||
|
@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
|
|||
long long max_loops = cpu_khz ? cpu_khz : 1000000;
|
||||
|
||||
if (cpu_has_tsc)
|
||||
rdtscll(tsc);
|
||||
tsc = native_read_tsc();
|
||||
|
||||
if (disable_apic) {
|
||||
disable_ioapic_support();
|
||||
|
@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
|
|||
}
|
||||
if (queued) {
|
||||
if (cpu_has_tsc && cpu_khz) {
|
||||
rdtscll(ntsc);
|
||||
ntsc = native_read_tsc();
|
||||
max_loops = (cpu_khz << 10) - (ntsc - tsc);
|
||||
} else
|
||||
max_loops--;
|
||||
|
|
|
@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
|
|||
{
|
||||
memset(m, 0, sizeof(struct mce));
|
||||
m->cpu = m->extcpu = smp_processor_id();
|
||||
rdtscll(m->tsc);
|
||||
m->tsc = native_read_tsc();
|
||||
/* We hope get_seconds stays lockless */
|
||||
m->time = get_seconds();
|
||||
m->cpuvendor = boot_cpu_data.x86_vendor;
|
||||
|
@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
|
|||
{
|
||||
unsigned long *cpu_tsc = (unsigned long *)data;
|
||||
|
||||
rdtscll(cpu_tsc[smp_processor_id()]);
|
||||
cpu_tsc[smp_processor_id()] = native_read_tsc();
|
||||
}
|
||||
|
||||
static int mce_apei_read_done;
|
||||
|
|
|
@ -110,7 +110,7 @@ static void init_espfix_random(void)
|
|||
*/
|
||||
if (!arch_get_random_long(&rand)) {
|
||||
/* The constant is an arbitrary large prime */
|
||||
rdtscll(rand);
|
||||
rand = native_read_tsc();
|
||||
rand *= 0xc345c6b72fd16123UL;
|
||||
}
|
||||
|
||||
|
|
|
@ -735,7 +735,7 @@ static int hpet_clocksource_register(void)
|
|||
|
||||
/* Verify whether hpet counter works */
|
||||
t1 = hpet_readl(HPET_COUNTER);
|
||||
rdtscll(start);
|
||||
start = native_read_tsc();
|
||||
|
||||
/*
|
||||
* We don't know the TSC frequency yet, but waiting for
|
||||
|
@ -745,7 +745,7 @@ static int hpet_clocksource_register(void)
|
|||
*/
|
||||
do {
|
||||
rep_nop();
|
||||
rdtscll(now);
|
||||
now = native_read_tsc();
|
||||
} while ((now - start) < 200000UL);
|
||||
|
||||
if (t1 == hpet_readl(HPET_COUNTER)) {
|
||||
|
|
|
@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
|
|||
u64 ret;
|
||||
|
||||
rdtsc_barrier();
|
||||
rdtscll(ret);
|
||||
ret = native_read_tsc();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
|||
|
||||
data = cyc2ns_write_begin(cpu);
|
||||
|
||||
rdtscll(tsc_now);
|
||||
tsc_now = native_read_tsc();
|
||||
ns_now = cycles_2_ns(tsc_now);
|
||||
|
||||
/*
|
||||
|
@ -290,7 +290,7 @@ u64 native_sched_clock(void)
|
|||
}
|
||||
|
||||
/* read the Time Stamp Counter: */
|
||||
rdtscll(tsc_now);
|
||||
tsc_now = native_read_tsc();
|
||||
|
||||
/* return the value in ns */
|
||||
return cycles_2_ns(tsc_now);
|
||||
|
|
|
@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
|
|||
{
|
||||
u64 host_tsc, tsc_offset;
|
||||
|
||||
rdtscll(host_tsc);
|
||||
host_tsc = native_read_tsc();
|
||||
tsc_offset = vmcs_read64(TSC_OFFSET);
|
||||
return host_tsc + tsc_offset;
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ void use_tsc_delay(void)
|
|||
int read_current_timer(unsigned long *timer_val)
|
||||
{
|
||||
if (delay_fn == delay_tsc) {
|
||||
rdtscll(*timer_val);
|
||||
*timer_val = native_read_tsc();
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
|
|
|
@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
|
|||
|
||||
/* check result for the last window */
|
||||
msr_now = pkg_state_counter();
|
||||
rdtscll(tsc_now);
|
||||
tsc_now = native_read_tsc();
|
||||
|
||||
/* calculate pkg cstate vs tsc ratio */
|
||||
if (!msr_last || !tsc_last)
|
||||
|
@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy)
|
|||
u64 val64;
|
||||
|
||||
msr_now = pkg_state_counter();
|
||||
rdtscll(tsc_now);
|
||||
tsc_now = native_read_tsc();
|
||||
jiffies_now = jiffies;
|
||||
|
||||
/* calculate pkg cstate vs tsc ratio */
|
||||
|
|
|
@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void)
|
|||
|
||||
printk(KERN_DEBUG "start--> \n");
|
||||
then = read_pmtmr();
|
||||
rdtscll(then_tsc);
|
||||
then_tsc = native_read_tsc();
|
||||
for (i=0;i<20;i++) {
|
||||
mdelay(100);
|
||||
now = read_pmtmr();
|
||||
rdtscll(now_tsc);
|
||||
now_tsc = native_read_tsc();
|
||||
diff = (now - then) & 0xFFFFFF;
|
||||
diff_tsc = now_tsc - then_tsc;
|
||||
printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);
|
||||
|
|
Loading…
Reference in New Issue
Block a user