diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index c15e3309b397..b2467980f4cd 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -105,12 +105,12 @@ notrace static inline int native_write_msr_safe(unsigned int msr, return err; } -extern unsigned long long native_read_tsc(void); +extern unsigned long long rdtsc(void); extern int rdmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]); -static __always_inline unsigned long long __native_read_tsc(void) +static __always_inline unsigned long long __rdtsc(void) { DECLARE_ARGS(val, low, high); @@ -180,7 +180,7 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) } #define rdtscl(low) \ - ((low) = (u32)__native_read_tsc()) + ((low) = (u32)__rdtsc()) #define rdpmc(counter, low, high) \ do { \ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index fd6079aebfce..5fefc867ab73 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -192,7 +192,7 @@ static inline cycles_t get_cycles(void) return 0; #endif - return native_read_tsc(); + return rdtsc(); } static inline unsigned long long paravirt_sched_clock(void) diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 109a9dd5d454..572fb45c2b72 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -60,7 +60,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) static __always_inline u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src) { - u64 delta = __native_read_tsc() - src->tsc_timestamp; + u64 delta = __rdtsc() - src->tsc_timestamp; return pvclock_scale_delta(delta, src->tsc_to_system_mul, src->tsc_shift); } diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 6a998598f172..a6e532ee0005 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -70,7 +70,7 @@ static __always_inline void boot_init_stack_canary(void) * on during the bootup the random pool has true entropy too. */ get_random_bytes(&canary, sizeof(canary)); - tsc = __native_read_tsc(); + tsc = __rdtsc(); canary += tsc + (tsc << 32UL); current->stack_canary = canary; diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 75ea56b2442d..8ccfad9729f7 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void) return 0; #endif - return native_read_tsc(); + return rdtsc(); } static __always_inline cycles_t vget_cycles(void) @@ -39,7 +39,7 @@ static __always_inline cycles_t vget_cycles(void) if (!cpu_has_tsc) return 0; #endif - return (cycles_t)__native_read_tsc(); + return (cycles_t)__rdtsc(); } extern void tsc_init(void); diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index c4094f3f3e33..21e666fcc67e 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -277,7 +277,7 @@ static int apbt_clocksource_register(void) /* Verify whether apbt counter works */ t1 = dw_apb_clocksource_read(clocksource_apbt); - start = native_read_tsc(); + start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for @@ -287,7 +287,7 @@ static int apbt_clocksource_register(void) */ do { rep_nop(); - now = native_read_tsc(); + now = rdtsc(); } while ((now - start) < 200000UL); /* APBT is the only always on clocksource, it has to work! */ @@ -404,13 +404,13 @@ unsigned long apbt_quick_calibrate(void) old = dw_apb_clocksource_read(clocksource_apbt); old += loop; - t1 = __native_read_tsc(); + t1 = __rdtsc(); do { new = dw_apb_clocksource_read(clocksource_apbt); } while (new < old); - t2 = __native_read_tsc(); + t2 = __rdtsc(); shift = 5; if (unlikely(loop >> shift == 0)) { diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4aa858246873..ffbe6b2034ae 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -471,7 +471,7 @@ static int lapic_next_deadline(unsigned long delta, { u64 tsc; - tsc = native_read_tsc(); + tsc = rdtsc(); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); return 0; } @@ -606,7 +606,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev) unsigned long pm = acpi_pm_read_early(); if (cpu_has_tsc) - tsc = native_read_tsc(); + tsc = rdtsc(); switch (lapic_cal_loops++) { case 0: @@ -1266,7 +1266,7 @@ void setup_local_APIC(void) long long max_loops = cpu_khz ? cpu_khz : 1000000; if (cpu_has_tsc) - tsc = native_read_tsc(); + tsc = rdtsc(); if (disable_apic) { disable_ioapic_support(); @@ -1361,7 +1361,7 @@ void setup_local_APIC(void) } if (queued) { if (cpu_has_tsc && cpu_khz) { - ntsc = native_read_tsc(); + ntsc = rdtsc(); max_loops = (cpu_khz << 10) - (ntsc - tsc); } else max_loops--; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7104bc2ae8dc..799decf853d0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -109,7 +109,7 @@ void mce_setup(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpu = m->extcpu = smp_processor_id(); - m->tsc = native_read_tsc(); + m->tsc = rdtsc(); /* We hope get_seconds stays lockless */ m->time = get_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; @@ -1730,7 +1730,7 @@ static void collect_tscs(void *data) { unsigned long *cpu_tsc = (unsigned long *)data; - cpu_tsc[smp_processor_id()] = native_read_tsc(); + cpu_tsc[smp_processor_id()] = rdtsc(); } static int mce_apei_read_done; diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 716fb9642286..756b970f1d77 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -110,7 +110,7 @@ static void init_espfix_random(void) */ if (!arch_get_random_long(&rand)) { /* The constant is an arbitrary large prime */ - rand = native_read_tsc(); + rand = rdtsc(); rand *= 0xc345c6b72fd16123UL; } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index deb78a7d425e..56777a40ffb0 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -767,7 +767,7 @@ static int hpet_clocksource_register(void) /* Verify whether hpet counter works */ t1 = hpet_readl(HPET_COUNTER); - start = native_read_tsc(); + start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for @@ -777,7 +777,7 @@ static int hpet_clocksource_register(void) */ do { rep_nop(); - now = native_read_tsc(); + now = rdtsc(); } while ((now - start) < 200000UL); if (t1 == hpet_readl(HPET_COUNTER)) { diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index cd6de64cc480..06e2fb0870ec 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -353,7 +353,7 @@ struct pv_cpu_ops pv_cpu_ops = { .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, .write_msr = native_write_msr_safe, - .read_tsc = native_read_tsc, + .read_tsc = rdtsc, .read_pmc = native_read_pmc, .read_tscp = native_read_tscp, .load_tr_desc = native_load_tr_desc, diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c index bd8f4d41bd56..67efb8c96fc4 100644 --- a/arch/x86/kernel/trace_clock.c +++ b/arch/x86/kernel/trace_clock.c @@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void) u64 ret; rdtsc_barrier(); - ret = native_read_tsc(); + ret = rdtsc(); return ret; } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index a76ff9d50dc8..d01362ba53d3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -59,7 +59,7 @@ u64 native_sched_clock(void) } /* read the Time Stamp Counter: */ - this_offset = native_read_tsc(); + this_offset = rdtsc(); /* return the value in ns */ return __cycles_2_ns(this_offset); @@ -77,11 +77,11 @@ unsigned long long sched_clock(void) __attribute__((alias("native_sched_clock"))); #endif -unsigned long long native_read_tsc(void) +unsigned long long rdtsc(void) { - return __native_read_tsc(); + return __rdtsc(); } -EXPORT_SYMBOL(native_read_tsc); +EXPORT_SYMBOL(rdtsc); int check_tsc_unstable(void) { @@ -619,7 +619,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) scale = &per_cpu(cyc2ns, cpu); offset = &per_cpu(cyc2ns_offset, cpu); - tsc_now = native_read_tsc(); + tsc_now = rdtsc(); ns_now = __cycles_2_ns(tsc_now); if (cpu_khz) { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index ff280209d7de..07e19064bab6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1103,7 +1103,7 @@ static void start_apic_timer(struct kvm_lapic *apic) local_irq_save(flags); now = apic->lapic_timer.timer.base->get_time(); - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); if (likely(tscdeadline > guest_tsc)) { ns = (tscdeadline - guest_tsc) * 1000000ULL; do_div(ns, this_tsc_khz); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 224d2ef754cc..78f89a937487 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1053,7 +1053,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { u64 tsc; - tsc = svm_scale_tsc(vcpu, native_read_tsc()); + tsc = svm_scale_tsc(vcpu, rdtsc()); return target_tsc - tsc; } @@ -3009,7 +3009,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) switch (ecx) { case MSR_IA32_TSC: { *data = svm->vmcb->control.tsc_offset + - svm_scale_tsc(vcpu, native_read_tsc()); + svm_scale_tsc(vcpu, rdtsc()); break; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aead33008d8f..41e6b76e9bee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2037,7 +2037,7 @@ static u64 guest_read_tsc(void) { u64 host_tsc, tsc_offset; - host_tsc = native_read_tsc(); + host_tsc = rdtsc(); tsc_offset = vmcs_read64(TSC_OFFSET); return host_tsc + tsc_offset; } @@ -2113,7 +2113,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) { - return target_tsc - native_read_tsc(); + return target_tsc - rdtsc(); } static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 41ba726c1ce2..9aa351196102 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1526,7 +1526,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) return 1; } if (!use_master_clock) { - host_tsc = native_read_tsc(); + host_tsc = rdtsc(); kernel_ns = get_kernel_ns(); } @@ -2738,7 +2738,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : - native_read_tsc() - vcpu->arch.last_host_tsc; + rdtsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { @@ -2766,7 +2766,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); - vcpu->arch.last_host_tsc = native_read_tsc(); + vcpu->arch.last_host_tsc = rdtsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, @@ -5853,7 +5853,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, - native_read_tsc()); + rdtsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); @@ -6628,7 +6628,7 @@ int kvm_arch_hardware_enable(void *garbage) if (ret != 0) return ret; - local_tsc = native_read_tsc(); + local_tsc = rdtsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 8b19f7980970..0bc05a3af1bc 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -101,7 +101,7 @@ void use_tsc_delay(void) int read_current_timer(unsigned long *timer_val) { if (delay_fn == delay_tsc) { - *timer_val = native_read_tsc(); + *timer_val = rdtsc(); return 0; } return -1; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 53b05d261e9c..9e9a186eb999 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1242,7 +1242,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_msr = native_read_msr_safe, .write_msr = xen_write_msr_safe, - .read_tsc = native_read_tsc, + .read_tsc = rdtsc, .read_pmc = native_read_pmc, .read_tscp = native_read_tscp, diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index ab13448defcf..2ac0c704bcb8 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio, /* check result for the last window */ msr_now = pkg_state_counter(); - tsc_now = native_read_tsc(); + tsc_now = rdtsc(); /* calculate pkg cstate vs tsc ratio */ if (!msr_last || !tsc_last) @@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy) u64 val64; msr_now = pkg_state_counter(); - tsc_now = native_read_tsc(); + tsc_now = rdtsc(); jiffies_now = jiffies; /* calculate pkg cstate vs tsc ratio */ diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c index 102feec2809b..13ef0e47ccf1 100644 --- a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c +++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c @@ -84,11 +84,11 @@ static int __init cpufreq_test_tsc(void) printk(KERN_DEBUG "start--> \n"); then = read_pmtmr(); - then_tsc = native_read_tsc(); + then_tsc = rdtsc(); for (i=0;i<20;i++) { mdelay(100); now = read_pmtmr(); - now_tsc = native_read_tsc(); + now_tsc = rdtsc(); diff = (now - then) & 0xFFFFFF; diff_tsc = now_tsc - then_tsc; printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);