Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-i386/timex.h | |
3 | * | |
4 | * i386 architecture timex specifications | |
5 | */ | |
6 | #ifndef _ASMi386_TIMEX_H | |
7 | #define _ASMi386_TIMEX_H | |
8 | ||
9 | #include <linux/config.h> | |
10 | #include <asm/processor.h> | |
11 | ||
12 | #ifdef CONFIG_X86_ELAN | |
13 | # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | |
14 | #else | |
15 | # define CLOCK_TICK_RATE 1193182 /* Underlying HZ */ | |
16 | #endif | |
17 | ||
18 | ||
19 | /* | |
20 | * Standard way to access the cycle counter on i586+ CPUs. | |
21 | * Currently only used on SMP. | |
22 | * | |
23 | * If you really have a SMP machine with i486 chips or older, | |
24 | * compile for that, and this will just always return zero. | |
25 | * That's ok, it just means that the nicer scheduling heuristics | |
26 | * won't work for you. | |
27 | * | |
28 | * We only use the low 32 bits, and we'd simply better make sure | |
29 | * that we reschedule before that wraps. Scheduling at least every | |
30 | * four billion cycles just basically sounds like a good idea, | |
31 | * regardless of how fast the machine is. | |
32 | */ | |
33 | typedef unsigned long long cycles_t; | |
34 | ||
35 | static inline cycles_t get_cycles (void) | |
36 | { | |
37 | unsigned long long ret=0; | |
38 | ||
39 | #ifndef CONFIG_X86_TSC | |
40 | if (!cpu_has_tsc) | |
41 | return 0; | |
42 | #endif | |
43 | ||
44 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | |
45 | rdtscll(ret); | |
46 | #endif | |
47 | return ret; | |
48 | } | |
49 | ||
a3a255e7 | 50 | extern unsigned int cpu_khz; |
1da177e4 | 51 | |
8a9e1b0f VP |
52 | extern int read_current_timer(unsigned long *timer_value); |
53 | #define ARCH_HAS_READ_CURRENT_TIMER 1 | |
54 | ||
1da177e4 | 55 | #endif |