2 * linux/drivers/clocksource/arm_arch_timer.c
4 * Copyright (C) 2011 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) "arm_arch_timer: " fmt
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
31 #include <asm/arch_timer.h>
34 #include <clocksource/arm_arch_timer.h>
37 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
39 #define CNTACR(n) (0x40 + ((n) * 4))
40 #define CNTACR_RPCT BIT(0)
41 #define CNTACR_RVCT BIT(1)
42 #define CNTACR_RFRQ BIT(2)
43 #define CNTACR_RVOFF BIT(3)
44 #define CNTACR_RWVT BIT(4)
45 #define CNTACR_RWPT BIT(5)
47 #define CNTVCT_LO 0x08
48 #define CNTVCT_HI 0x0c
50 #define CNTP_TVAL 0x28
52 #define CNTV_TVAL 0x38
55 #define ARCH_CP15_TIMER BIT(0)
56 #define ARCH_MEM_TIMER BIT(1)
57 static unsigned arch_timers_present __initdata;
59 static void __iomem *arch_counter_base;
63 struct clock_event_device evt;
66 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68 static u32 arch_timer_rate;
78 static int arch_timer_ppi[MAX_TIMER_PPI];
80 static struct clock_event_device __percpu *arch_timer_evt;
82 static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
83 static bool arch_timer_c3stop;
84 static bool arch_timer_mem_use_virtual;
85 static bool arch_counter_suspend_stop;
86 static bool vdso_default = true;
88 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
90 static int __init early_evtstrm_cfg(char *buf)
92 return strtobool(buf, &evtstrm_enable);
94 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
97 * Architected system timer support.
100 static __always_inline
101 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
102 struct clock_event_device *clk)
104 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
105 struct arch_timer *timer = to_arch_timer(clk);
107 case ARCH_TIMER_REG_CTRL:
108 writel_relaxed(val, timer->base + CNTP_CTL);
110 case ARCH_TIMER_REG_TVAL:
111 writel_relaxed(val, timer->base + CNTP_TVAL);
114 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
115 struct arch_timer *timer = to_arch_timer(clk);
117 case ARCH_TIMER_REG_CTRL:
118 writel_relaxed(val, timer->base + CNTV_CTL);
120 case ARCH_TIMER_REG_TVAL:
121 writel_relaxed(val, timer->base + CNTV_TVAL);
125 arch_timer_reg_write_cp15(access, reg, val);
129 static __always_inline
130 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
131 struct clock_event_device *clk)
135 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
136 struct arch_timer *timer = to_arch_timer(clk);
138 case ARCH_TIMER_REG_CTRL:
139 val = readl_relaxed(timer->base + CNTP_CTL);
141 case ARCH_TIMER_REG_TVAL:
142 val = readl_relaxed(timer->base + CNTP_TVAL);
145 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
146 struct arch_timer *timer = to_arch_timer(clk);
148 case ARCH_TIMER_REG_CTRL:
149 val = readl_relaxed(timer->base + CNTV_CTL);
151 case ARCH_TIMER_REG_TVAL:
152 val = readl_relaxed(timer->base + CNTV_TVAL);
156 val = arch_timer_reg_read_cp15(access, reg);
163 * Default to cp15 based access because arm64 uses this function for
164 * sched_clock() before DT is probed and the cp15 method is guaranteed
165 * to exist on arm64. arm doesn't use this before DT is probed so even
166 * if we don't have the cp15 accessors we won't have a problem.
168 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
170 static u64 arch_counter_read(struct clocksource *cs)
172 return arch_timer_read_counter();
175 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
177 return arch_timer_read_counter();
180 static struct clocksource clocksource_counter = {
181 .name = "arch_sys_counter",
183 .read = arch_counter_read,
184 .mask = CLOCKSOURCE_MASK(56),
185 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
188 static struct cyclecounter cyclecounter __ro_after_init = {
189 .read = arch_counter_read_cc,
190 .mask = CLOCKSOURCE_MASK(56),
193 #ifdef CONFIG_FSL_ERRATUM_A008585
195 * The number of retries is an arbitrary value well beyond the highest number
196 * of iterations the loop has been observed to take.
198 #define __fsl_a008585_read_reg(reg) ({ \
200 int _retries = 200; \
203 _old = read_sysreg(reg); \
204 _new = read_sysreg(reg); \
206 } while (unlikely(_old != _new) && _retries); \
208 WARN_ON_ONCE(!_retries); \
212 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
214 return __fsl_a008585_read_reg(cntp_tval_el0);
217 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
219 return __fsl_a008585_read_reg(cntv_tval_el0);
222 static u64 notrace fsl_a008585_read_cntvct_el0(void)
224 return __fsl_a008585_read_reg(cntvct_el0);
228 #ifdef CONFIG_HISILICON_ERRATUM_161010101
230 * Verify whether the value of the second read is larger than the first by
231 * less than 32 is the only way to confirm the value is correct, so clear the
232 * lower 5 bits to check whether the difference is greater than 32 or not.
233 * Theoretically the erratum should not occur more than twice in succession
234 * when reading the system counter, but it is possible that some interrupts
235 * may lead to more than twice read errors, triggering the warning, so setting
236 * the number of retries far beyond the number of iterations the loop has been
239 #define __hisi_161010101_read_reg(reg) ({ \
244 _old = read_sysreg(reg); \
245 _new = read_sysreg(reg); \
247 } while (unlikely((_new - _old) >> 5) && _retries); \
249 WARN_ON_ONCE(!_retries); \
253 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
255 return __hisi_161010101_read_reg(cntp_tval_el0);
258 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
260 return __hisi_161010101_read_reg(cntv_tval_el0);
263 static u64 notrace hisi_161010101_read_cntvct_el0(void)
265 return __hisi_161010101_read_reg(cntvct_el0);
269 #ifdef CONFIG_ARM64_ERRATUM_858921
270 static u64 notrace arm64_858921_read_cntvct_el0(void)
274 old = read_sysreg(cntvct_el0);
275 new = read_sysreg(cntvct_el0);
276 return (((old ^ new) >> 32) & 1) ? old : new;
280 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
281 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
282 timer_unstable_counter_workaround);
283 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
285 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
286 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
288 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
289 struct clock_event_device *clk)
292 u64 cval = evt + arch_counter_get_cntvct();
294 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
295 ctrl |= ARCH_TIMER_CTRL_ENABLE;
296 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
298 if (access == ARCH_TIMER_PHYS_ACCESS)
299 write_sysreg(cval, cntp_cval_el0);
301 write_sysreg(cval, cntv_cval_el0);
303 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
306 static int erratum_set_next_event_tval_virt(unsigned long evt,
307 struct clock_event_device *clk)
309 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
313 static int erratum_set_next_event_tval_phys(unsigned long evt,
314 struct clock_event_device *clk)
316 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
320 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
321 #ifdef CONFIG_FSL_ERRATUM_A008585
323 .match_type = ate_match_dt,
324 .id = "fsl,erratum-a008585",
325 .desc = "Freescale erratum a005858",
326 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
327 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
328 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
329 .set_next_event_phys = erratum_set_next_event_tval_phys,
330 .set_next_event_virt = erratum_set_next_event_tval_virt,
333 #ifdef CONFIG_HISILICON_ERRATUM_161010101
335 .match_type = ate_match_dt,
336 .id = "hisilicon,erratum-161010101",
337 .desc = "HiSilicon erratum 161010101",
338 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
339 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
340 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
341 .set_next_event_phys = erratum_set_next_event_tval_phys,
342 .set_next_event_virt = erratum_set_next_event_tval_virt,
345 #ifdef CONFIG_ARM64_ERRATUM_858921
347 .match_type = ate_match_local_cap_id,
348 .id = (void *)ARM64_WORKAROUND_858921,
349 .desc = "ARM erratum 858921",
350 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
355 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
359 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
362 const struct device_node *np = arg;
364 return of_property_read_bool(np, wa->id);
368 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
371 return this_cpu_has_cap((uintptr_t)wa->id);
374 static const struct arch_timer_erratum_workaround *
375 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
376 ate_match_fn_t match_fn,
381 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
382 if (ool_workarounds[i].match_type != type)
385 if (match_fn(&ool_workarounds[i], arg))
386 return &ool_workarounds[i];
393 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
399 __this_cpu_write(timer_unstable_counter_workaround, wa);
401 for_each_possible_cpu(i)
402 per_cpu(timer_unstable_counter_workaround, i) = wa;
405 static_branch_enable(&arch_timer_read_ool_enabled);
408 * Don't use the vdso fastpath if errata require using the
409 * out-of-line counter accessor. We may change our mind pretty
410 * late in the game (with a per-CPU erratum, for example), so
411 * change both the default value and the vdso itself.
413 if (wa->read_cntvct_el0) {
414 clocksource_counter.archdata.vdso_direct = false;
415 vdso_default = false;
419 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
422 const struct arch_timer_erratum_workaround *wa;
423 ate_match_fn_t match_fn = NULL;
428 match_fn = arch_timer_check_dt_erratum;
430 case ate_match_local_cap_id:
431 match_fn = arch_timer_check_local_cap_erratum;
439 wa = arch_timer_iterate_errata(type, match_fn, arg);
443 if (needs_unstable_timer_counter_workaround()) {
444 const struct arch_timer_erratum_workaround *__wa;
445 __wa = __this_cpu_read(timer_unstable_counter_workaround);
446 if (__wa && wa != __wa)
447 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
448 wa->desc, __wa->desc);
454 arch_timer_enable_workaround(wa, local);
455 pr_info("Enabling %s workaround for %s\n",
456 local ? "local" : "global", wa->desc);
459 #define erratum_handler(fn, r, ...) \
462 if (needs_unstable_timer_counter_workaround()) { \
463 const struct arch_timer_erratum_workaround *__wa; \
464 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
465 if (__wa && __wa->fn) { \
466 r = __wa->fn(__VA_ARGS__); \
477 static bool arch_timer_this_cpu_has_cntvct_wa(void)
479 const struct arch_timer_erratum_workaround *wa;
481 wa = __this_cpu_read(timer_unstable_counter_workaround);
482 return wa && wa->read_cntvct_el0;
485 #define arch_timer_check_ool_workaround(t,a) do { } while(0)
486 #define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
487 #define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
488 #define erratum_handler(fn, r, ...) ({false;})
489 #define arch_timer_this_cpu_has_cntvct_wa() ({false;})
490 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
492 static __always_inline irqreturn_t timer_handler(const int access,
493 struct clock_event_device *evt)
497 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
498 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
499 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
500 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
501 evt->event_handler(evt);
508 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
510 struct clock_event_device *evt = dev_id;
512 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
515 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
517 struct clock_event_device *evt = dev_id;
519 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
522 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
524 struct clock_event_device *evt = dev_id;
526 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
529 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
531 struct clock_event_device *evt = dev_id;
533 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
536 static __always_inline int timer_shutdown(const int access,
537 struct clock_event_device *clk)
541 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
542 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
543 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
548 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
550 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
553 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
555 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
558 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
560 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
563 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
565 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
568 static __always_inline void set_next_event(const int access, unsigned long evt,
569 struct clock_event_device *clk)
572 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
573 ctrl |= ARCH_TIMER_CTRL_ENABLE;
574 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
575 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
576 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
579 static int arch_timer_set_next_event_virt(unsigned long evt,
580 struct clock_event_device *clk)
584 if (erratum_handler(set_next_event_virt, ret, evt, clk))
587 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
591 static int arch_timer_set_next_event_phys(unsigned long evt,
592 struct clock_event_device *clk)
596 if (erratum_handler(set_next_event_phys, ret, evt, clk))
599 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
603 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
604 struct clock_event_device *clk)
606 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
610 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
611 struct clock_event_device *clk)
613 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
617 static void __arch_timer_setup(unsigned type,
618 struct clock_event_device *clk)
620 clk->features = CLOCK_EVT_FEAT_ONESHOT;
622 if (type == ARCH_CP15_TIMER) {
623 if (arch_timer_c3stop)
624 clk->features |= CLOCK_EVT_FEAT_C3STOP;
625 clk->name = "arch_sys_timer";
627 clk->cpumask = cpumask_of(smp_processor_id());
628 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
629 switch (arch_timer_uses_ppi) {
631 clk->set_state_shutdown = arch_timer_shutdown_virt;
632 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
633 clk->set_next_event = arch_timer_set_next_event_virt;
635 case PHYS_SECURE_PPI:
636 case PHYS_NONSECURE_PPI:
638 clk->set_state_shutdown = arch_timer_shutdown_phys;
639 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
640 clk->set_next_event = arch_timer_set_next_event_phys;
646 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
648 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
649 clk->name = "arch_mem_timer";
651 clk->cpumask = cpu_all_mask;
652 if (arch_timer_mem_use_virtual) {
653 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
654 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
655 clk->set_next_event =
656 arch_timer_set_next_event_virt_mem;
658 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
659 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
660 clk->set_next_event =
661 arch_timer_set_next_event_phys_mem;
665 clk->set_state_shutdown(clk);
667 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
670 static void arch_timer_evtstrm_enable(int divider)
672 u32 cntkctl = arch_timer_get_cntkctl();
674 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
675 /* Set the divider and enable virtual event stream */
676 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
677 | ARCH_TIMER_VIRT_EVT_EN;
678 arch_timer_set_cntkctl(cntkctl);
679 elf_hwcap |= HWCAP_EVTSTRM;
681 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
685 static void arch_timer_configure_evtstream(void)
687 int evt_stream_div, pos;
689 /* Find the closest power of two to the divisor */
690 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
691 pos = fls(evt_stream_div);
692 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
694 /* enable event stream */
695 arch_timer_evtstrm_enable(min(pos, 15));
698 static void arch_counter_set_user_access(void)
700 u32 cntkctl = arch_timer_get_cntkctl();
702 /* Disable user access to the timers and both counters */
703 /* Also disable virtual event stream */
704 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
705 | ARCH_TIMER_USR_VT_ACCESS_EN
706 | ARCH_TIMER_USR_VCT_ACCESS_EN
707 | ARCH_TIMER_VIRT_EVT_EN
708 | ARCH_TIMER_USR_PCT_ACCESS_EN);
711 * Enable user access to the virtual counter if it doesn't
712 * need to be workaround. The vdso may have been already
715 if (arch_timer_this_cpu_has_cntvct_wa())
716 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
718 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
720 arch_timer_set_cntkctl(cntkctl);
723 static bool arch_timer_has_nonsecure_ppi(void)
725 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
726 arch_timer_ppi[PHYS_NONSECURE_PPI]);
729 static u32 check_ppi_trigger(int irq)
731 u32 flags = irq_get_trigger_type(irq);
733 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
734 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
735 pr_warn("WARNING: Please fix your firmware\n");
736 flags = IRQF_TRIGGER_LOW;
742 static int arch_timer_starting_cpu(unsigned int cpu)
744 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
747 __arch_timer_setup(ARCH_CP15_TIMER, clk);
749 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
750 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
752 if (arch_timer_has_nonsecure_ppi()) {
753 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
754 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
757 arch_counter_set_user_access();
759 arch_timer_configure_evtstream();
765 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
767 /* Who has more than one independent system counter? */
772 * Try to determine the frequency from the device tree or CNTFRQ,
773 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
775 if (!acpi_disabled ||
776 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
778 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
780 arch_timer_rate = arch_timer_get_cntfrq();
783 /* Check the timer frequency. */
784 if (arch_timer_rate == 0)
785 pr_warn("Architected timer frequency not available\n");
788 static void arch_timer_banner(unsigned type)
790 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
791 type & ARCH_CP15_TIMER ? "cp15" : "",
792 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
793 type & ARCH_MEM_TIMER ? "mmio" : "",
794 (unsigned long)arch_timer_rate / 1000000,
795 (unsigned long)(arch_timer_rate / 10000) % 100,
796 type & ARCH_CP15_TIMER ?
797 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
799 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
800 type & ARCH_MEM_TIMER ?
801 arch_timer_mem_use_virtual ? "virt" : "phys" :
805 u32 arch_timer_get_rate(void)
807 return arch_timer_rate;
810 static u64 arch_counter_get_cntvct_mem(void)
812 u32 vct_lo, vct_hi, tmp_hi;
815 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
816 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
817 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
818 } while (vct_hi != tmp_hi);
820 return ((u64) vct_hi << 32) | vct_lo;
823 static struct arch_timer_kvm_info arch_timer_kvm_info;
825 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
827 return &arch_timer_kvm_info;
830 static void __init arch_counter_register(unsigned type)
834 /* Register the CP15 based counter if we have one */
835 if (type & ARCH_CP15_TIMER) {
836 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
837 arch_timer_read_counter = arch_counter_get_cntvct;
839 arch_timer_read_counter = arch_counter_get_cntpct;
841 clocksource_counter.archdata.vdso_direct = vdso_default;
843 arch_timer_read_counter = arch_counter_get_cntvct_mem;
846 if (!arch_counter_suspend_stop)
847 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
848 start_count = arch_timer_read_counter();
849 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
850 cyclecounter.mult = clocksource_counter.mult;
851 cyclecounter.shift = clocksource_counter.shift;
852 timecounter_init(&arch_timer_kvm_info.timecounter,
853 &cyclecounter, start_count);
855 /* 56 bits minimum, so we assume worst case rollover */
856 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
859 static void arch_timer_stop(struct clock_event_device *clk)
861 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
862 clk->irq, smp_processor_id());
864 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
865 if (arch_timer_has_nonsecure_ppi())
866 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
868 clk->set_state_shutdown(clk);
871 static int arch_timer_dying_cpu(unsigned int cpu)
873 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
875 arch_timer_stop(clk);
880 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
881 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
882 unsigned long action, void *hcpu)
884 if (action == CPU_PM_ENTER)
885 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
886 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
887 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
891 static struct notifier_block arch_timer_cpu_pm_notifier = {
892 .notifier_call = arch_timer_cpu_pm_notify,
895 static int __init arch_timer_cpu_pm_init(void)
897 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
900 static void __init arch_timer_cpu_pm_deinit(void)
902 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
906 static int __init arch_timer_cpu_pm_init(void)
911 static void __init arch_timer_cpu_pm_deinit(void)
916 static int __init arch_timer_register(void)
921 arch_timer_evt = alloc_percpu(struct clock_event_device);
922 if (!arch_timer_evt) {
927 ppi = arch_timer_ppi[arch_timer_uses_ppi];
928 switch (arch_timer_uses_ppi) {
930 err = request_percpu_irq(ppi, arch_timer_handler_virt,
931 "arch_timer", arch_timer_evt);
933 case PHYS_SECURE_PPI:
934 case PHYS_NONSECURE_PPI:
935 err = request_percpu_irq(ppi, arch_timer_handler_phys,
936 "arch_timer", arch_timer_evt);
937 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
938 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
939 err = request_percpu_irq(ppi, arch_timer_handler_phys,
940 "arch_timer", arch_timer_evt);
942 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
947 err = request_percpu_irq(ppi, arch_timer_handler_phys,
948 "arch_timer", arch_timer_evt);
955 pr_err("arch_timer: can't register interrupt %d (%d)\n",
960 err = arch_timer_cpu_pm_init();
962 goto out_unreg_notify;
965 /* Register and immediately configure the timer on the boot CPU */
966 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
967 "clockevents/arm/arch_timer:starting",
968 arch_timer_starting_cpu, arch_timer_dying_cpu);
970 goto out_unreg_cpupm;
974 arch_timer_cpu_pm_deinit();
977 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
978 if (arch_timer_has_nonsecure_ppi())
979 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
983 free_percpu(arch_timer_evt);
988 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
992 struct arch_timer *t;
994 t = kzalloc(sizeof(*t), GFP_KERNEL);
1000 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
1002 if (arch_timer_mem_use_virtual)
1003 func = arch_timer_handler_virt_mem;
1005 func = arch_timer_handler_phys_mem;
1007 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1009 pr_err("arch_timer: Failed to request mem timer irq\n");
1016 static const struct of_device_id arch_timer_of_match[] __initconst = {
1017 { .compatible = "arm,armv7-timer", },
1018 { .compatible = "arm,armv8-timer", },
1022 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1023 { .compatible = "arm,armv7-timer-mem", },
1028 arch_timer_needs_probing(int type, const struct of_device_id *matches)
1030 struct device_node *dn;
1031 bool needs_probing = false;
1033 dn = of_find_matching_node(NULL, matches);
1034 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
1035 needs_probing = true;
1038 return needs_probing;
1041 static int __init arch_timer_common_init(void)
1043 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
1045 /* Wait until both nodes are probed if we have two timers */
1046 if ((arch_timers_present & mask) != mask) {
1047 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
1049 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
1053 arch_timer_banner(arch_timers_present);
1054 arch_counter_register(arch_timers_present);
1055 return arch_timer_arch_init();
1058 static int __init arch_timer_init(void)
1062 * If HYP mode is available, we know that the physical timer
1063 * has been configured to be accessible from PL1. Use it, so
1064 * that a guest can use the virtual timer instead.
1066 * If no interrupt provided for virtual timer, we'll have to
1067 * stick to the physical timer. It'd better be accessible...
1069 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1070 * accesses to CNTP_*_EL1 registers are silently redirected to
1071 * their CNTHP_*_EL2 counterparts, and use a different PPI
1074 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
1077 if (is_kernel_in_hyp_mode()) {
1078 arch_timer_uses_ppi = HYP_PPI;
1079 has_ppi = !!arch_timer_ppi[HYP_PPI];
1081 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1082 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
1083 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
1087 pr_warn("arch_timer: No interrupt available, giving up\n");
1092 ret = arch_timer_register();
1096 ret = arch_timer_common_init();
1100 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
1105 static int __init arch_timer_of_init(struct device_node *np)
1109 if (arch_timers_present & ARCH_CP15_TIMER) {
1110 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
1114 arch_timers_present |= ARCH_CP15_TIMER;
1115 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
1116 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1118 arch_timer_detect_rate(NULL, np);
1120 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1122 /* Check for globally applicable workarounds */
1123 arch_timer_check_ool_workaround(ate_match_dt, np);
1126 * If we cannot rely on firmware initializing the timer registers then
1127 * we should use the physical timers instead.
1129 if (IS_ENABLED(CONFIG_ARM) &&
1130 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1131 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1133 /* On some systems, the counter stops ticking when in suspend. */
1134 arch_counter_suspend_stop = of_property_read_bool(np,
1135 "arm,no-tick-in-suspend");
1137 return arch_timer_init();
1139 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1140 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1142 static int __init arch_timer_mem_init(struct device_node *np)
1144 struct device_node *frame, *best_frame = NULL;
1145 void __iomem *cntctlbase, *base;
1146 unsigned int irq, ret = -EINVAL;
1149 arch_timers_present |= ARCH_MEM_TIMER;
1150 cntctlbase = of_iomap(np, 0);
1152 pr_err("arch_timer: Can't find CNTCTLBase\n");
1156 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1159 * Try to find a virtual capable frame. Otherwise fall back to a
1160 * physical capable frame.
1162 for_each_available_child_of_node(np, frame) {
1166 if (of_property_read_u32(frame, "frame-number", &n)) {
1167 pr_err("arch_timer: Missing frame-number\n");
1172 /* Try enabling everything, and see what sticks */
1173 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1174 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1175 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1176 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1178 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1179 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1180 of_node_put(best_frame);
1182 arch_timer_mem_use_virtual = true;
1186 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1189 of_node_put(best_frame);
1190 best_frame = of_node_get(frame);
1194 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1197 pr_err("arch_timer: Can't map frame's registers\n");
1201 if (arch_timer_mem_use_virtual)
1202 irq = irq_of_parse_and_map(best_frame, 1);
1204 irq = irq_of_parse_and_map(best_frame, 0);
1208 pr_err("arch_timer: Frame missing %s irq",
1209 arch_timer_mem_use_virtual ? "virt" : "phys");
1213 arch_timer_detect_rate(base, np);
1214 ret = arch_timer_mem_register(base, irq);
1218 return arch_timer_common_init();
1220 iounmap(cntctlbase);
1221 of_node_put(best_frame);
1224 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1225 arch_timer_mem_init);
1228 static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1230 int trigger, polarity;
1235 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1236 : ACPI_LEVEL_SENSITIVE;
1238 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1241 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1244 /* Initialize per-processor generic timer */
1245 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1247 struct acpi_table_gtdt *gtdt;
1249 if (arch_timers_present & ARCH_CP15_TIMER) {
1250 pr_warn("arch_timer: already initialized, skipping\n");
1254 gtdt = container_of(table, struct acpi_table_gtdt, header);
1256 arch_timers_present |= ARCH_CP15_TIMER;
1258 arch_timer_ppi[PHYS_SECURE_PPI] =
1259 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1260 gtdt->secure_el1_flags);
1262 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1263 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1264 gtdt->non_secure_el1_flags);
1266 arch_timer_ppi[VIRT_PPI] =
1267 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1268 gtdt->virtual_timer_flags);
1270 arch_timer_ppi[HYP_PPI] =
1271 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1272 gtdt->non_secure_el2_flags);
1274 /* Get the frequency from CNTFRQ */
1275 arch_timer_detect_rate(NULL, NULL);
1277 /* Always-on capability */
1278 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1283 CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);