2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55 #include <linux/delay.h>
56 #include <linux/perf_event.h>
57 #include <asm/trace.h>
60 #include <asm/processor.h>
61 #include <asm/nvram.h>
62 #include <asm/cache.h>
63 #include <asm/machdep.h>
64 #include <asm/uaccess.h>
68 #include <asm/div64.h>
70 #include <asm/vdso_datapage.h>
71 #include <asm/firmware.h>
72 #include <asm/cputime.h>
73 #ifdef CONFIG_PPC_ISERIES
74 #include <asm/iseries/it_lp_queue.h>
75 #include <asm/iseries/hv_call_xm.h>
78 /* powerpc clocksource/clockevent code */
80 #include <linux/clockchips.h>
81 #include <linux/clocksource.h>
83 static cycle_t rtc_read(struct clocksource *);
84 static struct clocksource clocksource_rtc = {
87 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
88 .mask = CLOCKSOURCE_MASK(64),
90 .mult = 0, /* To be filled in */
94 static cycle_t timebase_read(struct clocksource *);
95 static struct clocksource clocksource_timebase = {
98 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
99 .mask = CLOCKSOURCE_MASK(64),
101 .mult = 0, /* To be filled in */
102 .read = timebase_read,
105 #define DECREMENTER_MAX 0x7fffffff
107 static int decrementer_set_next_event(unsigned long evt,
108 struct clock_event_device *dev);
109 static void decrementer_set_mode(enum clock_event_mode mode,
110 struct clock_event_device *dev);
112 static struct clock_event_device decrementer_clockevent = {
113 .name = "decrementer",
115 .shift = 0, /* To be filled in */
116 .mult = 0, /* To be filled in */
118 .set_next_event = decrementer_set_next_event,
119 .set_mode = decrementer_set_mode,
120 .features = CLOCK_EVT_FEAT_ONESHOT,
123 struct decrementer_clock {
124 struct clock_event_device event;
128 static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
130 #ifdef CONFIG_PPC_ISERIES
131 static unsigned long __initdata iSeries_recal_titan;
132 static signed long __initdata iSeries_recal_tb;
134 /* Forward declaration is only needed for iSereis compiles */
135 static void __init clocksource_init(void);
138 #define XSEC_PER_SEC (1024*1024)
141 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
143 /* compute ((xsec << 12) * max) >> 32 */
144 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
147 unsigned long tb_ticks_per_jiffy;
148 unsigned long tb_ticks_per_usec = 100; /* sane default */
149 EXPORT_SYMBOL(tb_ticks_per_usec);
150 unsigned long tb_ticks_per_sec;
151 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
153 DEFINE_SPINLOCK(rtc_lock);
154 EXPORT_SYMBOL_GPL(rtc_lock);
156 static u64 tb_to_ns_scale __read_mostly;
157 static unsigned tb_to_ns_shift __read_mostly;
158 static unsigned long boot_tb __read_mostly;
160 extern struct timezone sys_tz;
161 static long timezone_offset;
163 unsigned long ppc_proc_freq;
164 EXPORT_SYMBOL(ppc_proc_freq);
165 unsigned long ppc_tb_freq;
167 static DEFINE_PER_CPU(u64, last_jiffy);
169 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
171 * Factors for converting from cputime_t (timebase ticks) to
172 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
173 * These are all stored as 0.64 fixed-point binary fractions.
175 u64 __cputime_jiffies_factor;
176 EXPORT_SYMBOL(__cputime_jiffies_factor);
177 u64 __cputime_msec_factor;
178 EXPORT_SYMBOL(__cputime_msec_factor);
179 u64 __cputime_sec_factor;
180 EXPORT_SYMBOL(__cputime_sec_factor);
181 u64 __cputime_clockt_factor;
182 EXPORT_SYMBOL(__cputime_clockt_factor);
183 DEFINE_PER_CPU(unsigned long, cputime_last_delta);
184 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
186 cputime_t cputime_one_jiffy;
188 static void calc_cputime_factors(void)
190 struct div_result res;
192 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
193 __cputime_jiffies_factor = res.result_low;
194 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
195 __cputime_msec_factor = res.result_low;
196 div128_by_32(1, 0, tb_ticks_per_sec, &res);
197 __cputime_sec_factor = res.result_low;
198 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
199 __cputime_clockt_factor = res.result_low;
203 * Read the PURR on systems that have it, otherwise the timebase.
205 static u64 read_purr(void)
207 if (cpu_has_feature(CPU_FTR_PURR))
208 return mfspr(SPRN_PURR);
213 * Read the SPURR on systems that have it, otherwise the purr
215 static u64 read_spurr(u64 purr)
218 * cpus without PURR won't have a SPURR
219 * We already know the former when we use this, so tell gcc
221 if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
222 return mfspr(SPRN_SPURR);
227 * Account time for a transition between system, hard irq
230 void account_system_vtime(struct task_struct *tsk)
232 u64 now, nowscaled, delta, deltascaled, sys_time;
235 local_irq_save(flags);
237 nowscaled = read_spurr(now);
238 delta = now - get_paca()->startpurr;
239 deltascaled = nowscaled - get_paca()->startspurr;
240 get_paca()->startpurr = now;
241 get_paca()->startspurr = nowscaled;
242 if (!in_interrupt()) {
243 /* deltascaled includes both user and system time.
244 * Hence scale it based on the purr ratio to estimate
246 sys_time = get_paca()->system_time;
247 if (get_paca()->user_time)
248 deltascaled = deltascaled * sys_time /
249 (sys_time + get_paca()->user_time);
251 get_paca()->system_time = 0;
253 if (in_irq() || idle_task(smp_processor_id()) != tsk)
254 account_system_time(tsk, 0, delta, deltascaled);
256 account_idle_time(delta);
257 __get_cpu_var(cputime_last_delta) = delta;
258 __get_cpu_var(cputime_scaled_last_delta) = deltascaled;
259 local_irq_restore(flags);
261 EXPORT_SYMBOL_GPL(account_system_vtime);
264 * Transfer the user and system times accumulated in the paca
265 * by the exception entry and exit code to the generic process
266 * user and system time records.
267 * Must be called with interrupts disabled.
269 void account_process_tick(struct task_struct *tsk, int user_tick)
271 cputime_t utime, utimescaled;
273 utime = get_paca()->user_time;
274 get_paca()->user_time = 0;
275 utimescaled = cputime_to_scaled(utime);
276 account_user_time(tsk, utime, utimescaled);
280 * Stuff for accounting stolen time.
282 struct cpu_purr_data {
283 int initialized; /* thread is running */
284 u64 tb; /* last TB value read */
285 u64 purr; /* last PURR value read */
286 u64 spurr; /* last SPURR value read */
290 * Each entry in the cpu_purr_data array is manipulated only by its
291 * "owner" cpu -- usually in the timer interrupt but also occasionally
292 * in process context for cpu online. As long as cpus do not touch
293 * each others' cpu_purr_data, disabling local interrupts is
294 * sufficient to serialize accesses.
296 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
298 static void snapshot_tb_and_purr(void *data)
301 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
303 local_irq_save(flags);
304 p->tb = get_tb_or_rtc();
305 p->purr = mfspr(SPRN_PURR);
308 local_irq_restore(flags);
312 * Called during boot when all cpus have come up.
314 void snapshot_timebases(void)
316 if (!cpu_has_feature(CPU_FTR_PURR))
318 on_each_cpu(snapshot_tb_and_purr, NULL, 1);
322 * Must be called with interrupts disabled.
324 void calculate_steal_time(void)
328 struct cpu_purr_data *pme;
330 pme = &__get_cpu_var(cpu_purr_data);
331 if (!pme->initialized)
332 return; /* !CPU_FTR_PURR or early in early boot */
334 purr = mfspr(SPRN_PURR);
335 stolen = (tb - pme->tb) - (purr - pme->purr);
337 if (idle_task(smp_processor_id()) != current)
338 account_steal_time(stolen);
340 account_idle_time(stolen);
346 #ifdef CONFIG_PPC_SPLPAR
348 * Must be called before the cpu is added to the online map when
349 * a cpu is being brought up at runtime.
351 static void snapshot_purr(void)
353 struct cpu_purr_data *pme;
356 if (!cpu_has_feature(CPU_FTR_PURR))
358 local_irq_save(flags);
359 pme = &__get_cpu_var(cpu_purr_data);
361 pme->purr = mfspr(SPRN_PURR);
362 pme->initialized = 1;
363 local_irq_restore(flags);
366 #endif /* CONFIG_PPC_SPLPAR */
368 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
369 #define calc_cputime_factors()
370 #define calculate_steal_time() do { } while (0)
373 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
374 #define snapshot_purr() do { } while (0)
378 * Called when a cpu comes up after the system has finished booting,
379 * i.e. as a result of a hotplug cpu action.
381 void snapshot_timebase(void)
383 __get_cpu_var(last_jiffy) = get_tb_or_rtc();
387 void __delay(unsigned long loops)
395 /* the RTCL register wraps at 1000000000 */
396 diff = get_rtcl() - start;
399 } while (diff < loops);
402 while (get_tbl() - start < loops)
407 EXPORT_SYMBOL(__delay);
409 void udelay(unsigned long usecs)
411 __delay(tb_ticks_per_usec * usecs);
413 EXPORT_SYMBOL(udelay);
416 unsigned long profile_pc(struct pt_regs *regs)
418 unsigned long pc = instruction_pointer(regs);
420 if (in_lock_functions(pc))
425 EXPORT_SYMBOL(profile_pc);
428 #ifdef CONFIG_PPC_ISERIES
431 * This function recalibrates the timebase based on the 49-bit time-of-day
432 * value in the Titan chip. The Titan is much more accurate than the value
433 * returned by the service processor for the timebase frequency.
436 static int __init iSeries_tb_recal(void)
438 unsigned long titan, tb;
440 /* Make sure we only run on iSeries */
441 if (!firmware_has_feature(FW_FEATURE_ISERIES))
445 titan = HvCallXm_loadTod();
446 if ( iSeries_recal_titan ) {
447 unsigned long tb_ticks = tb - iSeries_recal_tb;
448 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
449 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
450 unsigned long new_tb_ticks_per_jiffy =
451 DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
452 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
454 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
455 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
457 if ( tick_diff < 0 ) {
458 tick_diff = -tick_diff;
462 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
463 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
464 new_tb_ticks_per_jiffy, sign, tick_diff );
465 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
466 tb_ticks_per_sec = new_tb_ticks_per_sec;
467 calc_cputime_factors();
468 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
469 setup_cputime_one_jiffy();
472 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
473 " new tb_ticks_per_jiffy = %lu\n"
474 " old tb_ticks_per_jiffy = %lu\n",
475 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
479 iSeries_recal_titan = titan;
480 iSeries_recal_tb = tb;
482 /* Called here as now we know accurate values for the timebase */
486 late_initcall(iSeries_tb_recal);
488 /* Called from platform early init */
489 void __init iSeries_time_init_early(void)
491 iSeries_recal_tb = get_tb();
492 iSeries_recal_titan = HvCallXm_loadTod();
494 #endif /* CONFIG_PPC_ISERIES */
496 #ifdef CONFIG_PERF_EVENTS
499 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
502 static inline unsigned long test_perf_event_pending(void)
506 asm volatile("lbz %0,%1(13)"
508 : "i" (offsetof(struct paca_struct, perf_event_pending)));
512 static inline void set_perf_event_pending_flag(void)
514 asm volatile("stb %0,%1(13)" : :
516 "i" (offsetof(struct paca_struct, perf_event_pending)));
519 static inline void clear_perf_event_pending(void)
521 asm volatile("stb %0,%1(13)" : :
523 "i" (offsetof(struct paca_struct, perf_event_pending)));
528 DEFINE_PER_CPU(u8, perf_event_pending);
530 #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
531 #define test_perf_event_pending() __get_cpu_var(perf_event_pending)
532 #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
534 #endif /* 32 vs 64 bit */
536 void set_perf_event_pending(void)
539 set_perf_event_pending_flag();
544 #else /* CONFIG_PERF_EVENTS */
546 #define test_perf_event_pending() 0
547 #define clear_perf_event_pending()
549 #endif /* CONFIG_PERF_EVENTS */
552 * For iSeries shared processors, we have to let the hypervisor
553 * set the hardware decrementer. We set a virtual decrementer
554 * in the lppaca and call the hypervisor if the virtual
555 * decrementer is less than the current value in the hardware
556 * decrementer. (almost always the new decrementer value will
557 * be greater than the current hardware decementer so the hypervisor
558 * call will not be needed)
562 * timer_interrupt - gets called when the decrementer overflows,
563 * with interrupts disabled.
565 void timer_interrupt(struct pt_regs * regs)
567 struct pt_regs *old_regs;
568 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
569 struct clock_event_device *evt = &decrementer->event;
572 trace_timer_interrupt_entry(regs);
574 __get_cpu_var(irq_stat).timer_irqs++;
576 /* Ensure a positive value is written to the decrementer, or else
577 * some CPUs will continuue to take decrementer exceptions */
578 set_dec(DECREMENTER_MAX);
580 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
581 if (atomic_read(&ppc_n_lost_interrupts) != 0)
585 old_regs = set_irq_regs(regs);
588 calculate_steal_time();
590 if (test_perf_event_pending()) {
591 clear_perf_event_pending();
592 perf_event_do_pending();
595 #ifdef CONFIG_PPC_ISERIES
596 if (firmware_has_feature(FW_FEATURE_ISERIES))
597 get_lppaca()->int_dword.fields.decr_int = 0;
600 now = get_tb_or_rtc();
601 if (now >= decrementer->next_tb) {
602 decrementer->next_tb = ~(u64)0;
603 if (evt->event_handler)
604 evt->event_handler(evt);
606 now = decrementer->next_tb - now;
607 if (now <= DECREMENTER_MAX)
611 #ifdef CONFIG_PPC_ISERIES
612 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
613 process_hvlpevents();
617 /* collect purr register values often, for accurate calculations */
618 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
619 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
620 cu->current_tb = mfspr(SPRN_PURR);
625 set_irq_regs(old_regs);
627 trace_timer_interrupt_exit(regs);
630 #ifdef CONFIG_SUSPEND
631 static void generic_suspend_disable_irqs(void)
633 /* Disable the decrementer, so that it doesn't interfere
642 static void generic_suspend_enable_irqs(void)
647 /* Overrides the weak version in kernel/power/main.c */
648 void arch_suspend_disable_irqs(void)
650 if (ppc_md.suspend_disable_irqs)
651 ppc_md.suspend_disable_irqs();
652 generic_suspend_disable_irqs();
655 /* Overrides the weak version in kernel/power/main.c */
656 void arch_suspend_enable_irqs(void)
658 generic_suspend_enable_irqs();
659 if (ppc_md.suspend_enable_irqs)
660 ppc_md.suspend_enable_irqs();
665 * Scheduler clock - returns current time in nanosec units.
667 * Note: mulhdu(a, b) (multiply high double unsigned) returns
668 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
669 * are 64-bit unsigned numbers.
671 unsigned long long sched_clock(void)
675 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
678 static int __init get_freq(char *name, int cells, unsigned long *val)
680 struct device_node *cpu;
681 const unsigned int *fp;
684 /* The cpu node should have timebase and clock frequency properties */
685 cpu = of_find_node_by_type(NULL, "cpu");
688 fp = of_get_property(cpu, name, NULL);
691 *val = of_read_ulong(fp, cells);
700 /* should become __cpuinit when secondary_cpu_time_init also is */
701 void start_cpu_decrementer(void)
703 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
704 /* Clear any pending timer interrupts */
705 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
707 /* Enable decrementer interrupt */
708 mtspr(SPRN_TCR, TCR_DIE);
709 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
712 void __init generic_calibrate_decr(void)
714 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
716 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
717 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
719 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
723 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
725 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
726 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
728 printk(KERN_ERR "WARNING: Estimating processor frequency "
733 int update_persistent_clock(struct timespec now)
737 if (!ppc_md.set_rtc_time)
740 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
744 return ppc_md.set_rtc_time(&tm);
747 static void __read_persistent_clock(struct timespec *ts)
750 static int first = 1;
753 /* XXX this is a litle fragile but will work okay in the short term */
756 if (ppc_md.time_init)
757 timezone_offset = ppc_md.time_init();
759 /* get_boot_time() isn't guaranteed to be safe to call late */
760 if (ppc_md.get_boot_time) {
761 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
765 if (!ppc_md.get_rtc_time) {
769 ppc_md.get_rtc_time(&tm);
771 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
772 tm.tm_hour, tm.tm_min, tm.tm_sec);
775 void read_persistent_clock(struct timespec *ts)
777 __read_persistent_clock(ts);
779 /* Sanitize it in case real time clock is set below EPOCH */
780 if (ts->tv_sec < 0) {
787 /* clocksource code */
788 static cycle_t rtc_read(struct clocksource *cs)
790 return (cycle_t)get_rtc();
793 static cycle_t timebase_read(struct clocksource *cs)
795 return (cycle_t)get_tb();
798 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
799 struct clocksource *clock, u32 mult)
801 u64 new_tb_to_xs, new_stamp_xsec;
804 if (clock != &clocksource_timebase)
807 /* Make userspace gettimeofday spin until we're done. */
808 ++vdso_data->tb_update_count;
811 /* XXX this assumes clock->shift == 22 */
812 /* 4611686018 ~= 2^(20+64-22) / 1e9 */
813 new_tb_to_xs = (u64) mult * 4611686018ULL;
814 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
815 do_div(new_stamp_xsec, 1000000000);
816 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
818 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
819 /* this is tv_nsec / 1e9 as a 0.32 fraction */
820 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
823 * tb_update_count is used to allow the userspace gettimeofday code
824 * to assure itself that it sees a consistent view of the tb_to_xs and
825 * stamp_xsec variables. It reads the tb_update_count, then reads
826 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
827 * the two values of tb_update_count match and are even then the
828 * tb_to_xs and stamp_xsec values are consistent. If not, then it
829 * loops back and reads them again until this criteria is met.
830 * We expect the caller to have done the first increment of
831 * vdso_data->tb_update_count already.
833 vdso_data->tb_orig_stamp = clock->cycle_last;
834 vdso_data->stamp_xsec = new_stamp_xsec;
835 vdso_data->tb_to_xs = new_tb_to_xs;
836 vdso_data->wtom_clock_sec = wtm->tv_sec;
837 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
838 vdso_data->stamp_xtime = *wall_time;
839 vdso_data->stamp_sec_fraction = frac_sec;
841 ++(vdso_data->tb_update_count);
844 void update_vsyscall_tz(void)
846 /* Make userspace gettimeofday spin until we're done. */
847 ++vdso_data->tb_update_count;
849 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
850 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
852 ++vdso_data->tb_update_count;
855 static void __init clocksource_init(void)
857 struct clocksource *clock;
860 clock = &clocksource_rtc;
862 clock = &clocksource_timebase;
864 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
866 if (clocksource_register(clock)) {
867 printk(KERN_ERR "clocksource: %s is already registered\n",
872 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
873 clock->name, clock->mult, clock->shift);
876 static int decrementer_set_next_event(unsigned long evt,
877 struct clock_event_device *dev)
879 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
884 static void decrementer_set_mode(enum clock_event_mode mode,
885 struct clock_event_device *dev)
887 if (mode != CLOCK_EVT_MODE_ONESHOT)
888 decrementer_set_next_event(DECREMENTER_MAX, dev);
891 static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec,
894 uint64_t tmp = ((uint64_t)ticks) << shift;
900 static void __init setup_clockevent_multiplier(unsigned long hz)
902 u64 mult, shift = 32;
905 mult = div_sc64(hz, NSEC_PER_SEC, shift);
906 if (mult && (mult >> 32UL) == 0UL)
912 decrementer_clockevent.shift = shift;
913 decrementer_clockevent.mult = mult;
916 static void register_decrementer_clockevent(int cpu)
918 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
920 *dec = decrementer_clockevent;
921 dec->cpumask = cpumask_of(cpu);
923 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
924 dec->name, dec->mult, dec->shift, cpu);
926 clockevents_register_device(dec);
929 static void __init init_decrementer_clockevent(void)
931 int cpu = smp_processor_id();
933 setup_clockevent_multiplier(ppc_tb_freq);
934 decrementer_clockevent.max_delta_ns =
935 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
936 decrementer_clockevent.min_delta_ns =
937 clockevent_delta2ns(2, &decrementer_clockevent);
939 register_decrementer_clockevent(cpu);
942 void secondary_cpu_time_init(void)
944 /* Start the decrementer on CPUs that have manual control
947 start_cpu_decrementer();
949 /* FIME: Should make unrelatred change to move snapshot_timebase
951 register_decrementer_clockevent(smp_processor_id());
954 /* This function is only called on the boot processor */
955 void __init time_init(void)
957 struct div_result res;
962 /* 601 processor: dec counts down by 128 every 128ns */
963 ppc_tb_freq = 1000000000;
965 /* Normal PowerPC with timebase register */
966 ppc_md.calibrate_decr();
967 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
968 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
969 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
970 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
973 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
974 tb_ticks_per_sec = ppc_tb_freq;
975 tb_ticks_per_usec = ppc_tb_freq / 1000000;
976 calc_cputime_factors();
977 setup_cputime_one_jiffy();
980 * Compute scale factor for sched_clock.
981 * The calibrate_decr() function has set tb_ticks_per_sec,
982 * which is the timebase frequency.
983 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
984 * the 128-bit result as a 64.64 fixed-point number.
985 * We then shift that number right until it is less than 1.0,
986 * giving us the scale factor and shift count to use in
989 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
990 scale = res.result_low;
991 for (shift = 0; res.result_high != 0; ++shift) {
992 scale = (scale >> 1) | (res.result_high << 63);
993 res.result_high >>= 1;
995 tb_to_ns_scale = scale;
996 tb_to_ns_shift = shift;
997 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
998 boot_tb = get_tb_or_rtc();
1000 /* If platform provided a timezone (pmac), we correct the time */
1001 if (timezone_offset) {
1002 sys_tz.tz_minuteswest = -timezone_offset / 60;
1003 sys_tz.tz_dsttime = 0;
1006 vdso_data->tb_update_count = 0;
1007 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1009 /* Start the decrementer on CPUs that have manual control
1012 start_cpu_decrementer();
1014 /* Register the clocksource, if we're not running on iSeries */
1015 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1018 init_decrementer_clockevent();
1023 #define STARTOFTIME 1970
1024 #define SECDAY 86400L
1025 #define SECYR (SECDAY * 365)
1026 #define leapyear(year) ((year) % 4 == 0 && \
1027 ((year) % 100 != 0 || (year) % 400 == 0))
1028 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1029 #define days_in_month(a) (month_days[(a) - 1])
1031 static int month_days[12] = {
1032 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1036 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1038 void GregorianDay(struct rtc_time * tm)
1043 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1045 lastYear = tm->tm_year - 1;
1048 * Number of leap corrections to apply up to end of last year
1050 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1053 * This year is a leap year if it is divisible by 4 except when it is
1054 * divisible by 100 unless it is divisible by 400
1056 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1058 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1060 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1063 tm->tm_wday = day % 7;
1066 void to_tm(int tim, struct rtc_time * tm)
1069 register long hms, day;
1074 /* Hours, minutes, seconds are easy */
1075 tm->tm_hour = hms / 3600;
1076 tm->tm_min = (hms % 3600) / 60;
1077 tm->tm_sec = (hms % 3600) % 60;
1079 /* Number of years in days */
1080 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1081 day -= days_in_year(i);
1084 /* Number of months in days left */
1085 if (leapyear(tm->tm_year))
1086 days_in_month(FEBRUARY) = 29;
1087 for (i = 1; day >= days_in_month(i); i++)
1088 day -= days_in_month(i);
1089 days_in_month(FEBRUARY) = 28;
1092 /* Days are what is left over (+1) from all that. */
1093 tm->tm_mday = day + 1;
1096 * Determine the day of week
1102 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1105 void div128_by_32(u64 dividend_high, u64 dividend_low,
1106 unsigned divisor, struct div_result *dr)
1108 unsigned long a, b, c, d;
1109 unsigned long w, x, y, z;
1112 a = dividend_high >> 32;
1113 b = dividend_high & 0xffffffff;
1114 c = dividend_low >> 32;
1115 d = dividend_low & 0xffffffff;
1118 ra = ((u64)(a - (w * divisor)) << 32) + b;
1120 rb = ((u64) do_div(ra, divisor) << 32) + c;
1123 rc = ((u64) do_div(rb, divisor) << 32) + d;
1126 do_div(rc, divisor);
1129 dr->result_high = ((u64)w << 32) + x;
1130 dr->result_low = ((u64)y << 32) + z;
1134 /* We don't need to calibrate delay, we use the CPU timebase for that */
1135 void calibrate_delay(void)
1137 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1138 * as the number of __delay(1) in a jiffy, so make it so
1140 loops_per_jiffy = tb_ticks_per_jiffy;
1143 static int __init rtc_init(void)
1145 struct platform_device *pdev;
1147 if (!ppc_md.get_rtc_time)
1150 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1152 return PTR_ERR(pdev);
1157 module_init(rtc_init);