2 * linux/drivers/clocksource/arm_arch_timer.c
4 * Copyright (C) 2011 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) "arm_arch_timer: " fmt
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
26 #include <linux/slab.h>
27 #include <linux/sched_clock.h>
28 #include <linux/acpi.h>
30 #include <asm/arch_timer.h>
33 #include <clocksource/arm_arch_timer.h>
36 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38 #define CNTACR(n) (0x40 + ((n) * 4))
39 #define CNTACR_RPCT BIT(0)
40 #define CNTACR_RVCT BIT(1)
41 #define CNTACR_RFRQ BIT(2)
42 #define CNTACR_RVOFF BIT(3)
43 #define CNTACR_RWVT BIT(4)
44 #define CNTACR_RWPT BIT(5)
46 #define CNTVCT_LO 0x08
47 #define CNTVCT_HI 0x0c
49 #define CNTP_TVAL 0x28
51 #define CNTV_TVAL 0x38
54 #define ARCH_CP15_TIMER BIT(0)
55 #define ARCH_MEM_TIMER BIT(1)
56 static unsigned arch_timers_present __initdata;
58 static void __iomem *arch_counter_base;
62 struct clock_event_device evt;
65 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
67 static u32 arch_timer_rate;
77 static int arch_timer_ppi[MAX_TIMER_PPI];
79 static struct clock_event_device __percpu *arch_timer_evt;
81 static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
82 static bool arch_timer_c3stop;
83 static bool arch_timer_mem_use_virtual;
84 static bool arch_counter_suspend_stop;
86 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
88 static int __init early_evtstrm_cfg(char *buf)
90 return strtobool(buf, &evtstrm_enable);
92 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
95 * Architected system timer support.
98 #ifdef CONFIG_FSL_ERRATUM_A008585
100 * The number of retries is an arbitrary value well beyond the highest number
101 * of iterations the loop has been observed to take.
103 #define __fsl_a008585_read_reg(reg) ({ \
105 int _retries = 200; \
108 _old = read_sysreg(reg); \
109 _new = read_sysreg(reg); \
111 } while (unlikely(_old != _new) && _retries); \
113 WARN_ON_ONCE(!_retries); \
117 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
119 return __fsl_a008585_read_reg(cntp_tval_el0);
122 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
124 return __fsl_a008585_read_reg(cntv_tval_el0);
127 static u64 notrace fsl_a008585_read_cntvct_el0(void)
129 return __fsl_a008585_read_reg(cntvct_el0);
133 #ifdef CONFIG_HISILICON_ERRATUM_161010101
135 * Verify whether the value of the second read is larger than the first by
136 * less than 32 is the only way to confirm the value is correct, so clear the
137 * lower 5 bits to check whether the difference is greater than 32 or not.
138 * Theoretically the erratum should not occur more than twice in succession
139 * when reading the system counter, but it is possible that some interrupts
140 * may lead to more than twice read errors, triggering the warning, so setting
141 * the number of retries far beyond the number of iterations the loop has been
144 #define __hisi_161010101_read_reg(reg) ({ \
149 _old = read_sysreg(reg); \
150 _new = read_sysreg(reg); \
152 } while (unlikely((_new - _old) >> 5) && _retries); \
154 WARN_ON_ONCE(!_retries); \
158 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
160 return __hisi_161010101_read_reg(cntp_tval_el0);
163 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
165 return __hisi_161010101_read_reg(cntv_tval_el0);
168 static u64 notrace hisi_161010101_read_cntvct_el0(void)
170 return __hisi_161010101_read_reg(cntvct_el0);
174 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
175 const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
176 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
178 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
179 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
181 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
182 #ifdef CONFIG_FSL_ERRATUM_A008585
184 .id = "fsl,erratum-a008585",
185 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
186 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
187 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
190 #ifdef CONFIG_HISILICON_ERRATUM_161010101
192 .id = "hisilicon,erratum-161010101",
193 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
194 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
195 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
199 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
201 static __always_inline
202 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
203 struct clock_event_device *clk)
205 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
206 struct arch_timer *timer = to_arch_timer(clk);
208 case ARCH_TIMER_REG_CTRL:
209 writel_relaxed(val, timer->base + CNTP_CTL);
211 case ARCH_TIMER_REG_TVAL:
212 writel_relaxed(val, timer->base + CNTP_TVAL);
215 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
216 struct arch_timer *timer = to_arch_timer(clk);
218 case ARCH_TIMER_REG_CTRL:
219 writel_relaxed(val, timer->base + CNTV_CTL);
221 case ARCH_TIMER_REG_TVAL:
222 writel_relaxed(val, timer->base + CNTV_TVAL);
226 arch_timer_reg_write_cp15(access, reg, val);
230 static __always_inline
231 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
232 struct clock_event_device *clk)
236 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
237 struct arch_timer *timer = to_arch_timer(clk);
239 case ARCH_TIMER_REG_CTRL:
240 val = readl_relaxed(timer->base + CNTP_CTL);
242 case ARCH_TIMER_REG_TVAL:
243 val = readl_relaxed(timer->base + CNTP_TVAL);
246 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
247 struct arch_timer *timer = to_arch_timer(clk);
249 case ARCH_TIMER_REG_CTRL:
250 val = readl_relaxed(timer->base + CNTV_CTL);
252 case ARCH_TIMER_REG_TVAL:
253 val = readl_relaxed(timer->base + CNTV_TVAL);
257 val = arch_timer_reg_read_cp15(access, reg);
263 static __always_inline irqreturn_t timer_handler(const int access,
264 struct clock_event_device *evt)
268 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
269 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
270 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
271 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
272 evt->event_handler(evt);
279 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
281 struct clock_event_device *evt = dev_id;
283 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
286 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
288 struct clock_event_device *evt = dev_id;
290 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
293 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
295 struct clock_event_device *evt = dev_id;
297 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
300 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
302 struct clock_event_device *evt = dev_id;
304 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
307 static __always_inline int timer_shutdown(const int access,
308 struct clock_event_device *clk)
312 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
313 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
314 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
319 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
321 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
324 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
326 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
329 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
331 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
334 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
336 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
339 static __always_inline void set_next_event(const int access, unsigned long evt,
340 struct clock_event_device *clk)
343 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
344 ctrl |= ARCH_TIMER_CTRL_ENABLE;
345 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
346 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
347 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
350 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
351 static __always_inline void erratum_set_next_event_generic(const int access,
352 unsigned long evt, struct clock_event_device *clk)
355 u64 cval = evt + arch_counter_get_cntvct();
357 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
358 ctrl |= ARCH_TIMER_CTRL_ENABLE;
359 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
361 if (access == ARCH_TIMER_PHYS_ACCESS)
362 write_sysreg(cval, cntp_cval_el0);
363 else if (access == ARCH_TIMER_VIRT_ACCESS)
364 write_sysreg(cval, cntv_cval_el0);
366 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
369 static int erratum_set_next_event_virt(unsigned long evt,
370 struct clock_event_device *clk)
372 erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
376 static int erratum_set_next_event_phys(unsigned long evt,
377 struct clock_event_device *clk)
379 erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
382 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
384 static int arch_timer_set_next_event_virt(unsigned long evt,
385 struct clock_event_device *clk)
387 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
391 static int arch_timer_set_next_event_phys(unsigned long evt,
392 struct clock_event_device *clk)
394 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
398 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
399 struct clock_event_device *clk)
401 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
405 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
406 struct clock_event_device *clk)
408 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
412 static void erratum_workaround_set_sne(struct clock_event_device *clk)
414 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
415 if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
418 if (arch_timer_uses_ppi == VIRT_PPI)
419 clk->set_next_event = erratum_set_next_event_virt;
421 clk->set_next_event = erratum_set_next_event_phys;
425 static void __arch_timer_setup(unsigned type,
426 struct clock_event_device *clk)
428 clk->features = CLOCK_EVT_FEAT_ONESHOT;
430 if (type == ARCH_CP15_TIMER) {
431 if (arch_timer_c3stop)
432 clk->features |= CLOCK_EVT_FEAT_C3STOP;
433 clk->name = "arch_sys_timer";
435 clk->cpumask = cpumask_of(smp_processor_id());
436 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
437 switch (arch_timer_uses_ppi) {
439 clk->set_state_shutdown = arch_timer_shutdown_virt;
440 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
441 clk->set_next_event = arch_timer_set_next_event_virt;
443 case PHYS_SECURE_PPI:
444 case PHYS_NONSECURE_PPI:
446 clk->set_state_shutdown = arch_timer_shutdown_phys;
447 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
448 clk->set_next_event = arch_timer_set_next_event_phys;
454 erratum_workaround_set_sne(clk);
456 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
457 clk->name = "arch_mem_timer";
459 clk->cpumask = cpu_all_mask;
460 if (arch_timer_mem_use_virtual) {
461 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
462 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
463 clk->set_next_event =
464 arch_timer_set_next_event_virt_mem;
466 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
467 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
468 clk->set_next_event =
469 arch_timer_set_next_event_phys_mem;
473 clk->set_state_shutdown(clk);
475 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
478 static void arch_timer_evtstrm_enable(int divider)
480 u32 cntkctl = arch_timer_get_cntkctl();
482 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
483 /* Set the divider and enable virtual event stream */
484 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
485 | ARCH_TIMER_VIRT_EVT_EN;
486 arch_timer_set_cntkctl(cntkctl);
487 elf_hwcap |= HWCAP_EVTSTRM;
489 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
493 static void arch_timer_configure_evtstream(void)
495 int evt_stream_div, pos;
497 /* Find the closest power of two to the divisor */
498 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
499 pos = fls(evt_stream_div);
500 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
502 /* enable event stream */
503 arch_timer_evtstrm_enable(min(pos, 15));
506 static void arch_counter_set_user_access(void)
508 u32 cntkctl = arch_timer_get_cntkctl();
510 /* Disable user access to the timers and the physical counter */
511 /* Also disable virtual event stream */
512 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
513 | ARCH_TIMER_USR_VT_ACCESS_EN
514 | ARCH_TIMER_VIRT_EVT_EN
515 | ARCH_TIMER_USR_PCT_ACCESS_EN);
517 /* Enable user access to the virtual counter */
518 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
520 arch_timer_set_cntkctl(cntkctl);
523 static bool arch_timer_has_nonsecure_ppi(void)
525 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
526 arch_timer_ppi[PHYS_NONSECURE_PPI]);
529 static u32 check_ppi_trigger(int irq)
531 u32 flags = irq_get_trigger_type(irq);
533 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
534 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
535 pr_warn("WARNING: Please fix your firmware\n");
536 flags = IRQF_TRIGGER_LOW;
542 static int arch_timer_starting_cpu(unsigned int cpu)
544 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
547 __arch_timer_setup(ARCH_CP15_TIMER, clk);
549 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
550 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
552 if (arch_timer_has_nonsecure_ppi()) {
553 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
554 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
557 arch_counter_set_user_access();
559 arch_timer_configure_evtstream();
565 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
567 /* Who has more than one independent system counter? */
572 * Try to determine the frequency from the device tree or CNTFRQ,
573 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
575 if (!acpi_disabled ||
576 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
578 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
580 arch_timer_rate = arch_timer_get_cntfrq();
583 /* Check the timer frequency. */
584 if (arch_timer_rate == 0)
585 pr_warn("Architected timer frequency not available\n");
588 static void arch_timer_banner(unsigned type)
590 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
591 type & ARCH_CP15_TIMER ? "cp15" : "",
592 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
593 type & ARCH_MEM_TIMER ? "mmio" : "",
594 (unsigned long)arch_timer_rate / 1000000,
595 (unsigned long)(arch_timer_rate / 10000) % 100,
596 type & ARCH_CP15_TIMER ?
597 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
599 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
600 type & ARCH_MEM_TIMER ?
601 arch_timer_mem_use_virtual ? "virt" : "phys" :
605 u32 arch_timer_get_rate(void)
607 return arch_timer_rate;
610 static u64 arch_counter_get_cntvct_mem(void)
612 u32 vct_lo, vct_hi, tmp_hi;
615 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
616 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
617 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
618 } while (vct_hi != tmp_hi);
620 return ((u64) vct_hi << 32) | vct_lo;
624 * Default to cp15 based access because arm64 uses this function for
625 * sched_clock() before DT is probed and the cp15 method is guaranteed
626 * to exist on arm64. arm doesn't use this before DT is probed so even
627 * if we don't have the cp15 accessors we won't have a problem.
629 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
631 static u64 arch_counter_read(struct clocksource *cs)
633 return arch_timer_read_counter();
636 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
638 return arch_timer_read_counter();
641 static struct clocksource clocksource_counter = {
642 .name = "arch_sys_counter",
644 .read = arch_counter_read,
645 .mask = CLOCKSOURCE_MASK(56),
646 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
649 static struct cyclecounter cyclecounter __ro_after_init = {
650 .read = arch_counter_read_cc,
651 .mask = CLOCKSOURCE_MASK(56),
654 static struct arch_timer_kvm_info arch_timer_kvm_info;
656 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
658 return &arch_timer_kvm_info;
661 static void __init arch_counter_register(unsigned type)
665 /* Register the CP15 based counter if we have one */
666 if (type & ARCH_CP15_TIMER) {
667 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
668 arch_timer_read_counter = arch_counter_get_cntvct;
670 arch_timer_read_counter = arch_counter_get_cntpct;
672 clocksource_counter.archdata.vdso_direct = true;
674 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
676 * Don't use the vdso fastpath if errata require using
677 * the out-of-line counter accessor.
679 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
680 clocksource_counter.archdata.vdso_direct = false;
683 arch_timer_read_counter = arch_counter_get_cntvct_mem;
686 if (!arch_counter_suspend_stop)
687 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
688 start_count = arch_timer_read_counter();
689 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
690 cyclecounter.mult = clocksource_counter.mult;
691 cyclecounter.shift = clocksource_counter.shift;
692 timecounter_init(&arch_timer_kvm_info.timecounter,
693 &cyclecounter, start_count);
695 /* 56 bits minimum, so we assume worst case rollover */
696 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
699 static void arch_timer_stop(struct clock_event_device *clk)
701 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
702 clk->irq, smp_processor_id());
704 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
705 if (arch_timer_has_nonsecure_ppi())
706 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
708 clk->set_state_shutdown(clk);
711 static int arch_timer_dying_cpu(unsigned int cpu)
713 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
715 arch_timer_stop(clk);
720 static unsigned int saved_cntkctl;
721 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
722 unsigned long action, void *hcpu)
724 if (action == CPU_PM_ENTER)
725 saved_cntkctl = arch_timer_get_cntkctl();
726 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
727 arch_timer_set_cntkctl(saved_cntkctl);
731 static struct notifier_block arch_timer_cpu_pm_notifier = {
732 .notifier_call = arch_timer_cpu_pm_notify,
735 static int __init arch_timer_cpu_pm_init(void)
737 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
740 static void __init arch_timer_cpu_pm_deinit(void)
742 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
746 static int __init arch_timer_cpu_pm_init(void)
751 static void __init arch_timer_cpu_pm_deinit(void)
756 static int __init arch_timer_register(void)
761 arch_timer_evt = alloc_percpu(struct clock_event_device);
762 if (!arch_timer_evt) {
767 ppi = arch_timer_ppi[arch_timer_uses_ppi];
768 switch (arch_timer_uses_ppi) {
770 err = request_percpu_irq(ppi, arch_timer_handler_virt,
771 "arch_timer", arch_timer_evt);
773 case PHYS_SECURE_PPI:
774 case PHYS_NONSECURE_PPI:
775 err = request_percpu_irq(ppi, arch_timer_handler_phys,
776 "arch_timer", arch_timer_evt);
777 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
778 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
779 err = request_percpu_irq(ppi, arch_timer_handler_phys,
780 "arch_timer", arch_timer_evt);
782 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
787 err = request_percpu_irq(ppi, arch_timer_handler_phys,
788 "arch_timer", arch_timer_evt);
795 pr_err("arch_timer: can't register interrupt %d (%d)\n",
800 err = arch_timer_cpu_pm_init();
802 goto out_unreg_notify;
805 /* Register and immediately configure the timer on the boot CPU */
806 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
807 "clockevents/arm/arch_timer:starting",
808 arch_timer_starting_cpu, arch_timer_dying_cpu);
810 goto out_unreg_cpupm;
814 arch_timer_cpu_pm_deinit();
817 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
818 if (arch_timer_has_nonsecure_ppi())
819 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
823 free_percpu(arch_timer_evt);
828 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
832 struct arch_timer *t;
834 t = kzalloc(sizeof(*t), GFP_KERNEL);
840 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
842 if (arch_timer_mem_use_virtual)
843 func = arch_timer_handler_virt_mem;
845 func = arch_timer_handler_phys_mem;
847 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
849 pr_err("arch_timer: Failed to request mem timer irq\n");
856 static const struct of_device_id arch_timer_of_match[] __initconst = {
857 { .compatible = "arm,armv7-timer", },
858 { .compatible = "arm,armv8-timer", },
862 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
863 { .compatible = "arm,armv7-timer-mem", },
868 arch_timer_needs_probing(int type, const struct of_device_id *matches)
870 struct device_node *dn;
871 bool needs_probing = false;
873 dn = of_find_matching_node(NULL, matches);
874 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
875 needs_probing = true;
878 return needs_probing;
881 static int __init arch_timer_common_init(void)
883 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
885 /* Wait until both nodes are probed if we have two timers */
886 if ((arch_timers_present & mask) != mask) {
887 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
889 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
893 arch_timer_banner(arch_timers_present);
894 arch_counter_register(arch_timers_present);
895 return arch_timer_arch_init();
898 static int __init arch_timer_init(void)
902 * If HYP mode is available, we know that the physical timer
903 * has been configured to be accessible from PL1. Use it, so
904 * that a guest can use the virtual timer instead.
906 * If no interrupt provided for virtual timer, we'll have to
907 * stick to the physical timer. It'd better be accessible...
909 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
910 * accesses to CNTP_*_EL1 registers are silently redirected to
911 * their CNTHP_*_EL2 counterparts, and use a different PPI
914 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
917 if (is_kernel_in_hyp_mode()) {
918 arch_timer_uses_ppi = HYP_PPI;
919 has_ppi = !!arch_timer_ppi[HYP_PPI];
921 arch_timer_uses_ppi = PHYS_SECURE_PPI;
922 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
923 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
927 pr_warn("arch_timer: No interrupt available, giving up\n");
932 ret = arch_timer_register();
936 ret = arch_timer_common_init();
940 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
945 static int __init arch_timer_of_init(struct device_node *np)
949 if (arch_timers_present & ARCH_CP15_TIMER) {
950 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
954 arch_timers_present |= ARCH_CP15_TIMER;
955 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
956 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
958 arch_timer_detect_rate(NULL, np);
960 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
962 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
963 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
964 if (of_property_read_bool(np, ool_workarounds[i].id)) {
965 timer_unstable_counter_workaround = &ool_workarounds[i];
966 static_branch_enable(&arch_timer_read_ool_enabled);
967 pr_info("arch_timer: Enabling workaround for %s\n",
968 timer_unstable_counter_workaround->id);
975 * If we cannot rely on firmware initializing the timer registers then
976 * we should use the physical timers instead.
978 if (IS_ENABLED(CONFIG_ARM) &&
979 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
980 arch_timer_uses_ppi = PHYS_SECURE_PPI;
982 /* On some systems, the counter stops ticking when in suspend. */
983 arch_counter_suspend_stop = of_property_read_bool(np,
984 "arm,no-tick-in-suspend");
986 return arch_timer_init();
988 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
989 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
991 static int __init arch_timer_mem_init(struct device_node *np)
993 struct device_node *frame, *best_frame = NULL;
994 void __iomem *cntctlbase, *base;
995 unsigned int irq, ret = -EINVAL;
998 arch_timers_present |= ARCH_MEM_TIMER;
999 cntctlbase = of_iomap(np, 0);
1001 pr_err("arch_timer: Can't find CNTCTLBase\n");
1005 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1008 * Try to find a virtual capable frame. Otherwise fall back to a
1009 * physical capable frame.
1011 for_each_available_child_of_node(np, frame) {
1015 if (of_property_read_u32(frame, "frame-number", &n)) {
1016 pr_err("arch_timer: Missing frame-number\n");
1021 /* Try enabling everything, and see what sticks */
1022 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1023 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1024 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1025 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1027 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1028 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1029 of_node_put(best_frame);
1031 arch_timer_mem_use_virtual = true;
1035 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1038 of_node_put(best_frame);
1039 best_frame = of_node_get(frame);
1043 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1046 pr_err("arch_timer: Can't map frame's registers\n");
1050 if (arch_timer_mem_use_virtual)
1051 irq = irq_of_parse_and_map(best_frame, 1);
1053 irq = irq_of_parse_and_map(best_frame, 0);
1057 pr_err("arch_timer: Frame missing %s irq",
1058 arch_timer_mem_use_virtual ? "virt" : "phys");
1062 arch_timer_detect_rate(base, np);
1063 ret = arch_timer_mem_register(base, irq);
1067 return arch_timer_common_init();
1069 iounmap(cntctlbase);
1070 of_node_put(best_frame);
1073 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1074 arch_timer_mem_init);
1077 static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1079 int trigger, polarity;
1084 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1085 : ACPI_LEVEL_SENSITIVE;
1087 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1090 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1093 /* Initialize per-processor generic timer */
1094 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1096 struct acpi_table_gtdt *gtdt;
1098 if (arch_timers_present & ARCH_CP15_TIMER) {
1099 pr_warn("arch_timer: already initialized, skipping\n");
1103 gtdt = container_of(table, struct acpi_table_gtdt, header);
1105 arch_timers_present |= ARCH_CP15_TIMER;
1107 arch_timer_ppi[PHYS_SECURE_PPI] =
1108 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1109 gtdt->secure_el1_flags);
1111 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1112 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1113 gtdt->non_secure_el1_flags);
1115 arch_timer_ppi[VIRT_PPI] =
1116 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1117 gtdt->virtual_timer_flags);
1119 arch_timer_ppi[HYP_PPI] =
1120 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1121 gtdt->non_secure_el2_flags);
1123 /* Get the frequency from CNTFRQ */
1124 arch_timer_detect_rate(NULL, NULL);
1126 /* Always-on capability */
1127 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1132 CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);