x86/asm/tsc: Inline native_read_tsc() and remove __native_read_tsc()
authorAndy Lutomirski <luto@kernel.org>
Thu, 25 Jun 2015 16:43:55 +0000 (18:43 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 6 Jul 2015 13:23:25 +0000 (15:23 +0200)
In the following commit:

  cdc7957d1954 ("x86: move native_read_tsc() offline")

... native_read_tsc() was moved out of line, presumably for some
now-obsolete vDSO-related reason. Undo it.

The entire rdtsc, shl, or sequence is only 11 bytes, and calls
via rdtscl() and similar helpers were already inlined.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/d05ffe2aaf8468ca475ebc00efad7b2fa174af19.1434501121.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/include/asm/msr.h
arch/x86/include/asm/pvclock.h
arch/x86/include/asm/stackprotector.h
arch/x86/include/asm/tsc.h
arch/x86/kernel/apb_timer.c
arch/x86/kernel/tsc.c

index 9793322751e02f63ddba0d1b8fef5f21b0a4d502..972b488ac16ad23e309debe8afbcb82d8f44054b 100644 (file)
@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
         * but no one has ever seen it happen.
         */
        rdtsc_barrier();
-       ret = (cycle_t)__native_read_tsc();
+       ret = (cycle_t)native_read_tsc();
 
        last = gtod->cycle_last;
 
index e6a707eb508167dea3eda93a440cee3ba15f39db..88711470af7f7059edfb236dfb588f934ebb9681 100644 (file)
@@ -106,12 +106,10 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
        return err;
 }
 
-extern unsigned long long native_read_tsc(void);
-
 extern int rdmsr_safe_regs(u32 regs[8]);
 extern int wrmsr_safe_regs(u32 regs[8]);
 
-static __always_inline unsigned long long __native_read_tsc(void)
+static __always_inline unsigned long long native_read_tsc(void)
 {
        DECLARE_ARGS(val, low, high);
 
@@ -181,10 +179,10 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 }
 
 #define rdtscl(low)                                            \
-       ((low) = (u32)__native_read_tsc())
+       ((low) = (u32)native_read_tsc())
 
 #define rdtscll(val)                                           \
-       ((val) = __native_read_tsc())
+       ((val) = native_read_tsc())
 
 #define rdpmc(counter, low, high)                      \
 do {                                                   \
index 628954ceede1159fa7df62944d7b140df73d629b..2bd69d62c6233eaec23197178da16b9ce9d39b0f 100644 (file)
@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
 static __always_inline
 u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
 {
-       u64 delta = __native_read_tsc() - src->tsc_timestamp;
+       u64 delta = native_read_tsc() - src->tsc_timestamp;
        return pvclock_scale_delta(delta, src->tsc_to_system_mul,
                                   src->tsc_shift);
 }
index c2e00bb2a1365cef17911e262d28d0fad75e6828..bc5fa2af112e791fc8f090e293828e83a783566e 100644 (file)
@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
         * on during the bootup the random pool has true entropy too.
         */
        get_random_bytes(&canary, sizeof(canary));
-       tsc = __native_read_tsc();
+       tsc = native_read_tsc();
        canary += tsc + (tsc << 32UL);
 
        current->stack_canary = canary;
index 94605c0e9ceebc0058b18b995a5ac4f73a0d2aa6..fd11128faf25363e7cdbb6f14fb643d5b7024972 100644 (file)
@@ -42,7 +42,7 @@ static __always_inline cycles_t vget_cycles(void)
        if (!cpu_has_tsc)
                return 0;
 #endif
-       return (cycles_t)__native_read_tsc();
+       return (cycles_t)native_read_tsc();
 }
 
 extern void tsc_init(void);
index ede92c3364d3277fc3ea378695030eea47ca9fc8..9fe111cc50f87aeec5bd559bb4f63732b765848e 100644 (file)
@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
        old = dw_apb_clocksource_read(clocksource_apbt);
        old += loop;
 
-       t1 = __native_read_tsc();
+       t1 = native_read_tsc();
 
        do {
                new = dw_apb_clocksource_read(clocksource_apbt);
        } while (new < old);
 
-       t2 = __native_read_tsc();
+       t2 = native_read_tsc();
 
        shift = 5;
        if (unlikely(loop >> shift == 0)) {
index 505449700e0cf4e66ea6284135482ac172fe756a..e7710cd7ba009a5fd840a2cb701563c2a272b2b8 100644 (file)
@@ -308,12 +308,6 @@ unsigned long long
 sched_clock(void) __attribute__((alias("native_sched_clock")));
 #endif
 
-unsigned long long native_read_tsc(void)
-{
-       return __native_read_tsc();
-}
-EXPORT_SYMBOL(native_read_tsc);
-
 int check_tsc_unstable(void)
 {
        return tsc_unstable;