2 * "High Precision Event Timer" based timekeeping.
4 * Copyright (c) 1991,1992,1995 Linus Torvalds
5 * Copyright (c) 1994 Alan Modra
6 * Copyright (c) 1995 Markus Kuhn
7 * Copyright (c) 1996 Ingo Molnar
8 * Copyright (c) 1998 Andrea Arcangeli
9 * Copyright (c) 2002,2006 Vojtech Pavlik
10 * Copyright (c) 2003 Andi Kleen
11 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 #include <linux/clockchips.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/time.h>
19 #include <linux/mca.h>
20 #include <linux/nmi.h>
22 #include <asm/x86_init.h>
23 #include <asm/i8253.h>
25 #include <asm/vgtod.h>
27 #include <asm/timer.h>
29 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
31 unsigned long profile_pc(struct pt_regs *regs)
33 unsigned long pc = instruction_pointer(regs);
35 /* Assume the lock function has either no stack frame or a copy
37 Eflags always has bits 22 and up cleared unlike kernel addresses. */
38 if (!user_mode_vm(regs) && in_lock_functions(pc)) {
39 #ifdef CONFIG_FRAME_POINTER
40 return *(unsigned long *)(regs->bp + sizeof(long));
42 unsigned long *sp = (unsigned long *)regs->sp;
51 EXPORT_SYMBOL(profile_pc);
53 static irqreturn_t timer_interrupt(int irq, void *dev_id)
55 inc_irq_stat(irq0_irqs);
57 global_clock_event->event_handler(global_clock_event);
61 u8 irq_v = inb_p(0x61); /* read the current state */
62 outb_p(irq_v|0x80, 0x61); /* reset the IRQ */
69 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
70 * processor frequency */
71 #define TICK_COUNT 100000000
72 unsigned long __init calibrate_cpu(void)
74 int tsc_start, tsc_now;
76 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
79 for (i = 0; i < 4; i++)
80 if (avail_to_resrv_perfctr_nmi_bit(i))
82 no_ctr_free = (i == 4);
84 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
85 "cpu_khz value may be incorrect.\n");
87 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
88 wrmsrl(MSR_K7_EVNTSEL3, 0);
89 rdmsrl(MSR_K7_PERFCTR3, pmc3);
91 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
92 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
94 local_irq_save(flags);
95 /* start measuring cycles, incrementing from 0 */
96 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
97 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
100 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
101 tsc_now = get_cycles();
102 } while ((tsc_now - tsc_start) < TICK_COUNT);
104 local_irq_restore(flags);
106 wrmsrl(MSR_K7_EVNTSEL3, 0);
107 wrmsrl(MSR_K7_PERFCTR3, pmc3);
108 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
110 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
111 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
114 return pmc_now * tsc_khz / (tsc_now - tsc_start);
117 static struct irqaction irq0 = {
118 .handler = timer_interrupt,
119 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER,
123 void __init hpet_time_init(void)
131 static void x86_late_time_init(void)
133 x86_init.timers.timer_init();
136 void __init time_init(void)
139 late_time_init = x86_late_time_init;