8459d19a84f5ac1dc1319105e6bd2bb199be2c8e
[linux-block.git] / drivers / clocksource / arm_arch_timer.c
1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #define pr_fmt(fmt)     "arm_arch_timer: " fmt
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
30
31 #include <asm/arch_timer.h>
32 #include <asm/virt.h>
33
34 #include <clocksource/arm_arch_timer.h>
35
36 #define CNTTIDR         0x08
37 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38
39 #define CNTACR(n)       (0x40 + ((n) * 4))
40 #define CNTACR_RPCT     BIT(0)
41 #define CNTACR_RVCT     BIT(1)
42 #define CNTACR_RFRQ     BIT(2)
43 #define CNTACR_RVOFF    BIT(3)
44 #define CNTACR_RWVT     BIT(4)
45 #define CNTACR_RWPT     BIT(5)
46
47 #define CNTVCT_LO       0x08
48 #define CNTVCT_HI       0x0c
49 #define CNTFRQ          0x10
50 #define CNTP_TVAL       0x28
51 #define CNTP_CTL        0x2c
52 #define CNTV_TVAL       0x38
53 #define CNTV_CTL        0x3c
54
55 #define ARCH_CP15_TIMER BIT(0)
56 #define ARCH_MEM_TIMER  BIT(1)
57 static unsigned arch_timers_present __initdata;
58
59 static void __iomem *arch_counter_base;
60
61 struct arch_timer {
62         void __iomem *base;
63         struct clock_event_device evt;
64 };
65
66 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
67
68 static u32 arch_timer_rate;
69
70 enum ppi_nr {
71         PHYS_SECURE_PPI,
72         PHYS_NONSECURE_PPI,
73         VIRT_PPI,
74         HYP_PPI,
75         MAX_TIMER_PPI
76 };
77
78 static int arch_timer_ppi[MAX_TIMER_PPI];
79
80 static struct clock_event_device __percpu *arch_timer_evt;
81
82 static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
83 static bool arch_timer_c3stop;
84 static bool arch_timer_mem_use_virtual;
85 static bool arch_counter_suspend_stop;
86 static bool vdso_default = true;
87
88 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
89
90 static int __init early_evtstrm_cfg(char *buf)
91 {
92         return strtobool(buf, &evtstrm_enable);
93 }
94 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
95
96 /*
97  * Architected system timer support.
98  */
99
100 static __always_inline
101 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
102                           struct clock_event_device *clk)
103 {
104         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
105                 struct arch_timer *timer = to_arch_timer(clk);
106                 switch (reg) {
107                 case ARCH_TIMER_REG_CTRL:
108                         writel_relaxed(val, timer->base + CNTP_CTL);
109                         break;
110                 case ARCH_TIMER_REG_TVAL:
111                         writel_relaxed(val, timer->base + CNTP_TVAL);
112                         break;
113                 }
114         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
115                 struct arch_timer *timer = to_arch_timer(clk);
116                 switch (reg) {
117                 case ARCH_TIMER_REG_CTRL:
118                         writel_relaxed(val, timer->base + CNTV_CTL);
119                         break;
120                 case ARCH_TIMER_REG_TVAL:
121                         writel_relaxed(val, timer->base + CNTV_TVAL);
122                         break;
123                 }
124         } else {
125                 arch_timer_reg_write_cp15(access, reg, val);
126         }
127 }
128
129 static __always_inline
130 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
131                         struct clock_event_device *clk)
132 {
133         u32 val;
134
135         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
136                 struct arch_timer *timer = to_arch_timer(clk);
137                 switch (reg) {
138                 case ARCH_TIMER_REG_CTRL:
139                         val = readl_relaxed(timer->base + CNTP_CTL);
140                         break;
141                 case ARCH_TIMER_REG_TVAL:
142                         val = readl_relaxed(timer->base + CNTP_TVAL);
143                         break;
144                 }
145         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
146                 struct arch_timer *timer = to_arch_timer(clk);
147                 switch (reg) {
148                 case ARCH_TIMER_REG_CTRL:
149                         val = readl_relaxed(timer->base + CNTV_CTL);
150                         break;
151                 case ARCH_TIMER_REG_TVAL:
152                         val = readl_relaxed(timer->base + CNTV_TVAL);
153                         break;
154                 }
155         } else {
156                 val = arch_timer_reg_read_cp15(access, reg);
157         }
158
159         return val;
160 }
161
162 /*
163  * Default to cp15 based access because arm64 uses this function for
164  * sched_clock() before DT is probed and the cp15 method is guaranteed
165  * to exist on arm64. arm doesn't use this before DT is probed so even
166  * if we don't have the cp15 accessors we won't have a problem.
167  */
168 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
169
170 static u64 arch_counter_read(struct clocksource *cs)
171 {
172         return arch_timer_read_counter();
173 }
174
175 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
176 {
177         return arch_timer_read_counter();
178 }
179
180 static struct clocksource clocksource_counter = {
181         .name   = "arch_sys_counter",
182         .rating = 400,
183         .read   = arch_counter_read,
184         .mask   = CLOCKSOURCE_MASK(56),
185         .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
186 };
187
188 static struct cyclecounter cyclecounter __ro_after_init = {
189         .read   = arch_counter_read_cc,
190         .mask   = CLOCKSOURCE_MASK(56),
191 };
192
193 #ifdef CONFIG_FSL_ERRATUM_A008585
194 /*
195  * The number of retries is an arbitrary value well beyond the highest number
196  * of iterations the loop has been observed to take.
197  */
198 #define __fsl_a008585_read_reg(reg) ({                  \
199         u64 _old, _new;                                 \
200         int _retries = 200;                             \
201                                                         \
202         do {                                            \
203                 _old = read_sysreg(reg);                \
204                 _new = read_sysreg(reg);                \
205                 _retries--;                             \
206         } while (unlikely(_old != _new) && _retries);   \
207                                                         \
208         WARN_ON_ONCE(!_retries);                        \
209         _new;                                           \
210 })
211
212 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
213 {
214         return __fsl_a008585_read_reg(cntp_tval_el0);
215 }
216
217 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
218 {
219         return __fsl_a008585_read_reg(cntv_tval_el0);
220 }
221
222 static u64 notrace fsl_a008585_read_cntvct_el0(void)
223 {
224         return __fsl_a008585_read_reg(cntvct_el0);
225 }
226 #endif
227
228 #ifdef CONFIG_HISILICON_ERRATUM_161010101
229 /*
230  * Verify whether the value of the second read is larger than the first by
231  * less than 32 is the only way to confirm the value is correct, so clear the
232  * lower 5 bits to check whether the difference is greater than 32 or not.
233  * Theoretically the erratum should not occur more than twice in succession
234  * when reading the system counter, but it is possible that some interrupts
235  * may lead to more than twice read errors, triggering the warning, so setting
236  * the number of retries far beyond the number of iterations the loop has been
237  * observed to take.
238  */
239 #define __hisi_161010101_read_reg(reg) ({                               \
240         u64 _old, _new;                                         \
241         int _retries = 50;                                      \
242                                                                 \
243         do {                                                    \
244                 _old = read_sysreg(reg);                        \
245                 _new = read_sysreg(reg);                        \
246                 _retries--;                                     \
247         } while (unlikely((_new - _old) >> 5) && _retries);     \
248                                                                 \
249         WARN_ON_ONCE(!_retries);                                \
250         _new;                                                   \
251 })
252
253 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
254 {
255         return __hisi_161010101_read_reg(cntp_tval_el0);
256 }
257
258 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
259 {
260         return __hisi_161010101_read_reg(cntv_tval_el0);
261 }
262
263 static u64 notrace hisi_161010101_read_cntvct_el0(void)
264 {
265         return __hisi_161010101_read_reg(cntvct_el0);
266 }
267 #endif
268
269 #ifdef CONFIG_ARM64_ERRATUM_858921
270 static u64 notrace arm64_858921_read_cntvct_el0(void)
271 {
272         u64 old, new;
273
274         old = read_sysreg(cntvct_el0);
275         new = read_sysreg(cntvct_el0);
276         return (((old ^ new) >> 32) & 1) ? old : new;
277 }
278 #endif
279
280 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
281 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
282                timer_unstable_counter_workaround);
283 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
284
285 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
286 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
287
288 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
289                                                 struct clock_event_device *clk)
290 {
291         unsigned long ctrl;
292         u64 cval = evt + arch_counter_get_cntvct();
293
294         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
295         ctrl |= ARCH_TIMER_CTRL_ENABLE;
296         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
297
298         if (access == ARCH_TIMER_PHYS_ACCESS)
299                 write_sysreg(cval, cntp_cval_el0);
300         else
301                 write_sysreg(cval, cntv_cval_el0);
302
303         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
304 }
305
306 static int erratum_set_next_event_tval_virt(unsigned long evt,
307                                             struct clock_event_device *clk)
308 {
309         erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
310         return 0;
311 }
312
313 static int erratum_set_next_event_tval_phys(unsigned long evt,
314                                             struct clock_event_device *clk)
315 {
316         erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
317         return 0;
318 }
319
320 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
321 #ifdef CONFIG_FSL_ERRATUM_A008585
322         {
323                 .match_type = ate_match_dt,
324                 .id = "fsl,erratum-a008585",
325                 .desc = "Freescale erratum a005858",
326                 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
327                 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
328                 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
329                 .set_next_event_phys = erratum_set_next_event_tval_phys,
330                 .set_next_event_virt = erratum_set_next_event_tval_virt,
331         },
332 #endif
333 #ifdef CONFIG_HISILICON_ERRATUM_161010101
334         {
335                 .match_type = ate_match_dt,
336                 .id = "hisilicon,erratum-161010101",
337                 .desc = "HiSilicon erratum 161010101",
338                 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
339                 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
340                 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
341                 .set_next_event_phys = erratum_set_next_event_tval_phys,
342                 .set_next_event_virt = erratum_set_next_event_tval_virt,
343         },
344 #endif
345 #ifdef CONFIG_ARM64_ERRATUM_858921
346         {
347                 .match_type = ate_match_local_cap_id,
348                 .id = (void *)ARM64_WORKAROUND_858921,
349                 .desc = "ARM erratum 858921",
350                 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
351         },
352 #endif
353 };
354
355 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
356                                const void *);
357
358 static
359 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
360                                  const void *arg)
361 {
362         const struct device_node *np = arg;
363
364         return of_property_read_bool(np, wa->id);
365 }
366
367 static
368 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
369                                         const void *arg)
370 {
371         return this_cpu_has_cap((uintptr_t)wa->id);
372 }
373
374 static const struct arch_timer_erratum_workaround *
375 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
376                           ate_match_fn_t match_fn,
377                           void *arg)
378 {
379         int i;
380
381         for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
382                 if (ool_workarounds[i].match_type != type)
383                         continue;
384
385                 if (match_fn(&ool_workarounds[i], arg))
386                         return &ool_workarounds[i];
387         }
388
389         return NULL;
390 }
391
392 static
393 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
394                                   bool local)
395 {
396         int i;
397
398         if (local) {
399                 __this_cpu_write(timer_unstable_counter_workaround, wa);
400         } else {
401                 for_each_possible_cpu(i)
402                         per_cpu(timer_unstable_counter_workaround, i) = wa;
403         }
404
405         static_branch_enable(&arch_timer_read_ool_enabled);
406
407         /*
408          * Don't use the vdso fastpath if errata require using the
409          * out-of-line counter accessor. We may change our mind pretty
410          * late in the game (with a per-CPU erratum, for example), so
411          * change both the default value and the vdso itself.
412          */
413         if (wa->read_cntvct_el0) {
414                 clocksource_counter.archdata.vdso_direct = false;
415                 vdso_default = false;
416         }
417 }
418
419 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
420                                             void *arg)
421 {
422         const struct arch_timer_erratum_workaround *wa;
423         ate_match_fn_t match_fn = NULL;
424         bool local = false;
425
426         switch (type) {
427         case ate_match_dt:
428                 match_fn = arch_timer_check_dt_erratum;
429                 break;
430         case ate_match_local_cap_id:
431                 match_fn = arch_timer_check_local_cap_erratum;
432                 local = true;
433                 break;
434         default:
435                 WARN_ON(1);
436                 return;
437         }
438
439         wa = arch_timer_iterate_errata(type, match_fn, arg);
440         if (!wa)
441                 return;
442
443         if (needs_unstable_timer_counter_workaround()) {
444                 const struct arch_timer_erratum_workaround *__wa;
445                 __wa = __this_cpu_read(timer_unstable_counter_workaround);
446                 if (__wa && wa != __wa)
447                         pr_warn("Can't enable workaround for %s (clashes with %s\n)",
448                                 wa->desc, __wa->desc);
449
450                 if (__wa)
451                         return;
452         }
453
454         arch_timer_enable_workaround(wa, local);
455         pr_info("Enabling %s workaround for %s\n",
456                 local ? "local" : "global", wa->desc);
457 }
458
459 #define erratum_handler(fn, r, ...)                                     \
460 ({                                                                      \
461         bool __val;                                                     \
462         if (needs_unstable_timer_counter_workaround()) {                \
463                 const struct arch_timer_erratum_workaround *__wa;       \
464                 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
465                 if (__wa && __wa->fn) {                                 \
466                         r = __wa->fn(__VA_ARGS__);                      \
467                         __val = true;                                   \
468                 } else {                                                \
469                         __val = false;                                  \
470                 }                                                       \
471         } else {                                                        \
472                 __val = false;                                          \
473         }                                                               \
474         __val;                                                          \
475 })
476
477 static bool arch_timer_this_cpu_has_cntvct_wa(void)
478 {
479         const struct arch_timer_erratum_workaround *wa;
480
481         wa = __this_cpu_read(timer_unstable_counter_workaround);
482         return wa && wa->read_cntvct_el0;
483 }
484 #else
485 #define arch_timer_check_ool_workaround(t,a)            do { } while(0)
486 #define erratum_set_next_event_tval_virt(...)           ({BUG(); 0;})
487 #define erratum_set_next_event_tval_phys(...)           ({BUG(); 0;})
488 #define erratum_handler(fn, r, ...)                     ({false;})
489 #define arch_timer_this_cpu_has_cntvct_wa()             ({false;})
490 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
491
492 static __always_inline irqreturn_t timer_handler(const int access,
493                                         struct clock_event_device *evt)
494 {
495         unsigned long ctrl;
496
497         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
498         if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
499                 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
500                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
501                 evt->event_handler(evt);
502                 return IRQ_HANDLED;
503         }
504
505         return IRQ_NONE;
506 }
507
508 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
509 {
510         struct clock_event_device *evt = dev_id;
511
512         return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
513 }
514
515 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
516 {
517         struct clock_event_device *evt = dev_id;
518
519         return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
520 }
521
522 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
523 {
524         struct clock_event_device *evt = dev_id;
525
526         return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
527 }
528
529 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
530 {
531         struct clock_event_device *evt = dev_id;
532
533         return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
534 }
535
536 static __always_inline int timer_shutdown(const int access,
537                                           struct clock_event_device *clk)
538 {
539         unsigned long ctrl;
540
541         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
542         ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
543         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
544
545         return 0;
546 }
547
548 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
549 {
550         return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
551 }
552
553 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
554 {
555         return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
556 }
557
558 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
559 {
560         return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
561 }
562
563 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
564 {
565         return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
566 }
567
568 static __always_inline void set_next_event(const int access, unsigned long evt,
569                                            struct clock_event_device *clk)
570 {
571         unsigned long ctrl;
572         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
573         ctrl |= ARCH_TIMER_CTRL_ENABLE;
574         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
575         arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
576         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
577 }
578
579 static int arch_timer_set_next_event_virt(unsigned long evt,
580                                           struct clock_event_device *clk)
581 {
582         int ret;
583
584         if (erratum_handler(set_next_event_virt, ret, evt, clk))
585                 return ret;
586
587         set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
588         return 0;
589 }
590
591 static int arch_timer_set_next_event_phys(unsigned long evt,
592                                           struct clock_event_device *clk)
593 {
594         int ret;
595
596         if (erratum_handler(set_next_event_phys, ret, evt, clk))
597                 return ret;
598
599         set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
600         return 0;
601 }
602
603 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
604                                               struct clock_event_device *clk)
605 {
606         set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
607         return 0;
608 }
609
610 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
611                                               struct clock_event_device *clk)
612 {
613         set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
614         return 0;
615 }
616
617 static void __arch_timer_setup(unsigned type,
618                                struct clock_event_device *clk)
619 {
620         clk->features = CLOCK_EVT_FEAT_ONESHOT;
621
622         if (type == ARCH_CP15_TIMER) {
623                 if (arch_timer_c3stop)
624                         clk->features |= CLOCK_EVT_FEAT_C3STOP;
625                 clk->name = "arch_sys_timer";
626                 clk->rating = 450;
627                 clk->cpumask = cpumask_of(smp_processor_id());
628                 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
629                 switch (arch_timer_uses_ppi) {
630                 case VIRT_PPI:
631                         clk->set_state_shutdown = arch_timer_shutdown_virt;
632                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
633                         clk->set_next_event = arch_timer_set_next_event_virt;
634                         break;
635                 case PHYS_SECURE_PPI:
636                 case PHYS_NONSECURE_PPI:
637                 case HYP_PPI:
638                         clk->set_state_shutdown = arch_timer_shutdown_phys;
639                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
640                         clk->set_next_event = arch_timer_set_next_event_phys;
641                         break;
642                 default:
643                         BUG();
644                 }
645
646                 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
647         } else {
648                 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
649                 clk->name = "arch_mem_timer";
650                 clk->rating = 400;
651                 clk->cpumask = cpu_all_mask;
652                 if (arch_timer_mem_use_virtual) {
653                         clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
654                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
655                         clk->set_next_event =
656                                 arch_timer_set_next_event_virt_mem;
657                 } else {
658                         clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
659                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
660                         clk->set_next_event =
661                                 arch_timer_set_next_event_phys_mem;
662                 }
663         }
664
665         clk->set_state_shutdown(clk);
666
667         clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
668 }
669
670 static void arch_timer_evtstrm_enable(int divider)
671 {
672         u32 cntkctl = arch_timer_get_cntkctl();
673
674         cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
675         /* Set the divider and enable virtual event stream */
676         cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
677                         | ARCH_TIMER_VIRT_EVT_EN;
678         arch_timer_set_cntkctl(cntkctl);
679         elf_hwcap |= HWCAP_EVTSTRM;
680 #ifdef CONFIG_COMPAT
681         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
682 #endif
683 }
684
685 static void arch_timer_configure_evtstream(void)
686 {
687         int evt_stream_div, pos;
688
689         /* Find the closest power of two to the divisor */
690         evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
691         pos = fls(evt_stream_div);
692         if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
693                 pos--;
694         /* enable event stream */
695         arch_timer_evtstrm_enable(min(pos, 15));
696 }
697
698 static void arch_counter_set_user_access(void)
699 {
700         u32 cntkctl = arch_timer_get_cntkctl();
701
702         /* Disable user access to the timers and both counters */
703         /* Also disable virtual event stream */
704         cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
705                         | ARCH_TIMER_USR_VT_ACCESS_EN
706                         | ARCH_TIMER_USR_VCT_ACCESS_EN
707                         | ARCH_TIMER_VIRT_EVT_EN
708                         | ARCH_TIMER_USR_PCT_ACCESS_EN);
709
710         /*
711          * Enable user access to the virtual counter if it doesn't
712          * need to be workaround. The vdso may have been already
713          * disabled though.
714          */
715         if (arch_timer_this_cpu_has_cntvct_wa())
716                 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
717         else
718                 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
719
720         arch_timer_set_cntkctl(cntkctl);
721 }
722
723 static bool arch_timer_has_nonsecure_ppi(void)
724 {
725         return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
726                 arch_timer_ppi[PHYS_NONSECURE_PPI]);
727 }
728
729 static u32 check_ppi_trigger(int irq)
730 {
731         u32 flags = irq_get_trigger_type(irq);
732
733         if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
734                 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
735                 pr_warn("WARNING: Please fix your firmware\n");
736                 flags = IRQF_TRIGGER_LOW;
737         }
738
739         return flags;
740 }
741
742 static int arch_timer_starting_cpu(unsigned int cpu)
743 {
744         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
745         u32 flags;
746
747         __arch_timer_setup(ARCH_CP15_TIMER, clk);
748
749         flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
750         enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
751
752         if (arch_timer_has_nonsecure_ppi()) {
753                 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
754                 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
755         }
756
757         arch_counter_set_user_access();
758         if (evtstrm_enable)
759                 arch_timer_configure_evtstream();
760
761         return 0;
762 }
763
764 static void
765 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
766 {
767         /* Who has more than one independent system counter? */
768         if (arch_timer_rate)
769                 return;
770
771         /*
772          * Try to determine the frequency from the device tree or CNTFRQ,
773          * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
774          */
775         if (!acpi_disabled ||
776             of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
777                 if (cntbase)
778                         arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
779                 else
780                         arch_timer_rate = arch_timer_get_cntfrq();
781         }
782
783         /* Check the timer frequency. */
784         if (arch_timer_rate == 0)
785                 pr_warn("Architected timer frequency not available\n");
786 }
787
788 static void arch_timer_banner(unsigned type)
789 {
790         pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
791                      type & ARCH_CP15_TIMER ? "cp15" : "",
792                      type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  " and " : "",
793                      type & ARCH_MEM_TIMER ? "mmio" : "",
794                      (unsigned long)arch_timer_rate / 1000000,
795                      (unsigned long)(arch_timer_rate / 10000) % 100,
796                      type & ARCH_CP15_TIMER ?
797                      (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
798                         "",
799                      type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  "/" : "",
800                      type & ARCH_MEM_TIMER ?
801                         arch_timer_mem_use_virtual ? "virt" : "phys" :
802                         "");
803 }
804
805 u32 arch_timer_get_rate(void)
806 {
807         return arch_timer_rate;
808 }
809
810 static u64 arch_counter_get_cntvct_mem(void)
811 {
812         u32 vct_lo, vct_hi, tmp_hi;
813
814         do {
815                 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
816                 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
817                 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
818         } while (vct_hi != tmp_hi);
819
820         return ((u64) vct_hi << 32) | vct_lo;
821 }
822
823 static struct arch_timer_kvm_info arch_timer_kvm_info;
824
825 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
826 {
827         return &arch_timer_kvm_info;
828 }
829
830 static void __init arch_counter_register(unsigned type)
831 {
832         u64 start_count;
833
834         /* Register the CP15 based counter if we have one */
835         if (type & ARCH_CP15_TIMER) {
836                 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
837                         arch_timer_read_counter = arch_counter_get_cntvct;
838                 else
839                         arch_timer_read_counter = arch_counter_get_cntpct;
840
841                 clocksource_counter.archdata.vdso_direct = vdso_default;
842         } else {
843                 arch_timer_read_counter = arch_counter_get_cntvct_mem;
844         }
845
846         if (!arch_counter_suspend_stop)
847                 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
848         start_count = arch_timer_read_counter();
849         clocksource_register_hz(&clocksource_counter, arch_timer_rate);
850         cyclecounter.mult = clocksource_counter.mult;
851         cyclecounter.shift = clocksource_counter.shift;
852         timecounter_init(&arch_timer_kvm_info.timecounter,
853                          &cyclecounter, start_count);
854
855         /* 56 bits minimum, so we assume worst case rollover */
856         sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
857 }
858
859 static void arch_timer_stop(struct clock_event_device *clk)
860 {
861         pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
862                  clk->irq, smp_processor_id());
863
864         disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
865         if (arch_timer_has_nonsecure_ppi())
866                 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
867
868         clk->set_state_shutdown(clk);
869 }
870
871 static int arch_timer_dying_cpu(unsigned int cpu)
872 {
873         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
874
875         arch_timer_stop(clk);
876         return 0;
877 }
878
879 #ifdef CONFIG_CPU_PM
880 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
881 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
882                                     unsigned long action, void *hcpu)
883 {
884         if (action == CPU_PM_ENTER)
885                 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
886         else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
887                 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
888         return NOTIFY_OK;
889 }
890
891 static struct notifier_block arch_timer_cpu_pm_notifier = {
892         .notifier_call = arch_timer_cpu_pm_notify,
893 };
894
895 static int __init arch_timer_cpu_pm_init(void)
896 {
897         return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
898 }
899
900 static void __init arch_timer_cpu_pm_deinit(void)
901 {
902         WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
903 }
904
905 #else
906 static int __init arch_timer_cpu_pm_init(void)
907 {
908         return 0;
909 }
910
911 static void __init arch_timer_cpu_pm_deinit(void)
912 {
913 }
914 #endif
915
916 static int __init arch_timer_register(void)
917 {
918         int err;
919         int ppi;
920
921         arch_timer_evt = alloc_percpu(struct clock_event_device);
922         if (!arch_timer_evt) {
923                 err = -ENOMEM;
924                 goto out;
925         }
926
927         ppi = arch_timer_ppi[arch_timer_uses_ppi];
928         switch (arch_timer_uses_ppi) {
929         case VIRT_PPI:
930                 err = request_percpu_irq(ppi, arch_timer_handler_virt,
931                                          "arch_timer", arch_timer_evt);
932                 break;
933         case PHYS_SECURE_PPI:
934         case PHYS_NONSECURE_PPI:
935                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
936                                          "arch_timer", arch_timer_evt);
937                 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
938                         ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
939                         err = request_percpu_irq(ppi, arch_timer_handler_phys,
940                                                  "arch_timer", arch_timer_evt);
941                         if (err)
942                                 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
943                                                 arch_timer_evt);
944                 }
945                 break;
946         case HYP_PPI:
947                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
948                                          "arch_timer", arch_timer_evt);
949                 break;
950         default:
951                 BUG();
952         }
953
954         if (err) {
955                 pr_err("arch_timer: can't register interrupt %d (%d)\n",
956                        ppi, err);
957                 goto out_free;
958         }
959
960         err = arch_timer_cpu_pm_init();
961         if (err)
962                 goto out_unreg_notify;
963
964
965         /* Register and immediately configure the timer on the boot CPU */
966         err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
967                                 "clockevents/arm/arch_timer:starting",
968                                 arch_timer_starting_cpu, arch_timer_dying_cpu);
969         if (err)
970                 goto out_unreg_cpupm;
971         return 0;
972
973 out_unreg_cpupm:
974         arch_timer_cpu_pm_deinit();
975
976 out_unreg_notify:
977         free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
978         if (arch_timer_has_nonsecure_ppi())
979                 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
980                                 arch_timer_evt);
981
982 out_free:
983         free_percpu(arch_timer_evt);
984 out:
985         return err;
986 }
987
988 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
989 {
990         int ret;
991         irq_handler_t func;
992         struct arch_timer *t;
993
994         t = kzalloc(sizeof(*t), GFP_KERNEL);
995         if (!t)
996                 return -ENOMEM;
997
998         t->base = base;
999         t->evt.irq = irq;
1000         __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
1001
1002         if (arch_timer_mem_use_virtual)
1003                 func = arch_timer_handler_virt_mem;
1004         else
1005                 func = arch_timer_handler_phys_mem;
1006
1007         ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1008         if (ret) {
1009                 pr_err("arch_timer: Failed to request mem timer irq\n");
1010                 kfree(t);
1011         }
1012
1013         return ret;
1014 }
1015
1016 static const struct of_device_id arch_timer_of_match[] __initconst = {
1017         { .compatible   = "arm,armv7-timer",    },
1018         { .compatible   = "arm,armv8-timer",    },
1019         {},
1020 };
1021
1022 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1023         { .compatible   = "arm,armv7-timer-mem", },
1024         {},
1025 };
1026
1027 static bool __init
1028 arch_timer_needs_probing(int type, const struct of_device_id *matches)
1029 {
1030         struct device_node *dn;
1031         bool needs_probing = false;
1032
1033         dn = of_find_matching_node(NULL, matches);
1034         if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
1035                 needs_probing = true;
1036         of_node_put(dn);
1037
1038         return needs_probing;
1039 }
1040
1041 static int __init arch_timer_common_init(void)
1042 {
1043         unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
1044
1045         /* Wait until both nodes are probed if we have two timers */
1046         if ((arch_timers_present & mask) != mask) {
1047                 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
1048                         return 0;
1049                 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
1050                         return 0;
1051         }
1052
1053         arch_timer_banner(arch_timers_present);
1054         arch_counter_register(arch_timers_present);
1055         return arch_timer_arch_init();
1056 }
1057
1058 static int __init arch_timer_init(void)
1059 {
1060         int ret;
1061         /*
1062          * If HYP mode is available, we know that the physical timer
1063          * has been configured to be accessible from PL1. Use it, so
1064          * that a guest can use the virtual timer instead.
1065          *
1066          * If no interrupt provided for virtual timer, we'll have to
1067          * stick to the physical timer. It'd better be accessible...
1068          *
1069          * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1070          * accesses to CNTP_*_EL1 registers are silently redirected to
1071          * their CNTHP_*_EL2 counterparts, and use a different PPI
1072          * number.
1073          */
1074         if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
1075                 bool has_ppi;
1076
1077                 if (is_kernel_in_hyp_mode()) {
1078                         arch_timer_uses_ppi = HYP_PPI;
1079                         has_ppi = !!arch_timer_ppi[HYP_PPI];
1080                 } else {
1081                         arch_timer_uses_ppi = PHYS_SECURE_PPI;
1082                         has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
1083                                    !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
1084                 }
1085
1086                 if (!has_ppi) {
1087                         pr_warn("arch_timer: No interrupt available, giving up\n");
1088                         return -EINVAL;
1089                 }
1090         }
1091
1092         ret = arch_timer_register();
1093         if (ret)
1094                 return ret;
1095
1096         ret = arch_timer_common_init();
1097         if (ret)
1098                 return ret;
1099
1100         arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
1101         
1102         return 0;
1103 }
1104
1105 static int __init arch_timer_of_init(struct device_node *np)
1106 {
1107         int i;
1108
1109         if (arch_timers_present & ARCH_CP15_TIMER) {
1110                 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
1111                 return 0;
1112         }
1113
1114         arch_timers_present |= ARCH_CP15_TIMER;
1115         for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
1116                 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1117
1118         arch_timer_detect_rate(NULL, np);
1119
1120         arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1121
1122         /* Check for globally applicable workarounds */
1123         arch_timer_check_ool_workaround(ate_match_dt, np);
1124
1125         /*
1126          * If we cannot rely on firmware initializing the timer registers then
1127          * we should use the physical timers instead.
1128          */
1129         if (IS_ENABLED(CONFIG_ARM) &&
1130             of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1131                 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1132
1133         /* On some systems, the counter stops ticking when in suspend. */
1134         arch_counter_suspend_stop = of_property_read_bool(np,
1135                                                          "arm,no-tick-in-suspend");
1136
1137         return arch_timer_init();
1138 }
1139 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1140 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1141
1142 static int __init arch_timer_mem_init(struct device_node *np)
1143 {
1144         struct device_node *frame, *best_frame = NULL;
1145         void __iomem *cntctlbase, *base;
1146         unsigned int irq, ret = -EINVAL;
1147         u32 cnttidr;
1148
1149         arch_timers_present |= ARCH_MEM_TIMER;
1150         cntctlbase = of_iomap(np, 0);
1151         if (!cntctlbase) {
1152                 pr_err("arch_timer: Can't find CNTCTLBase\n");
1153                 return -ENXIO;
1154         }
1155
1156         cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1157
1158         /*
1159          * Try to find a virtual capable frame. Otherwise fall back to a
1160          * physical capable frame.
1161          */
1162         for_each_available_child_of_node(np, frame) {
1163                 int n;
1164                 u32 cntacr;
1165
1166                 if (of_property_read_u32(frame, "frame-number", &n)) {
1167                         pr_err("arch_timer: Missing frame-number\n");
1168                         of_node_put(frame);
1169                         goto out;
1170                 }
1171
1172                 /* Try enabling everything, and see what sticks */
1173                 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1174                          CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1175                 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1176                 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1177
1178                 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1179                     !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1180                         of_node_put(best_frame);
1181                         best_frame = frame;
1182                         arch_timer_mem_use_virtual = true;
1183                         break;
1184                 }
1185
1186                 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1187                         continue;
1188
1189                 of_node_put(best_frame);
1190                 best_frame = of_node_get(frame);
1191         }
1192
1193         ret= -ENXIO;
1194         base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1195                                                          "arch_mem_timer");
1196         if (IS_ERR(base)) {
1197                 pr_err("arch_timer: Can't map frame's registers\n");
1198                 goto out;
1199         }
1200
1201         if (arch_timer_mem_use_virtual)
1202                 irq = irq_of_parse_and_map(best_frame, 1);
1203         else
1204                 irq = irq_of_parse_and_map(best_frame, 0);
1205
1206         ret = -EINVAL;
1207         if (!irq) {
1208                 pr_err("arch_timer: Frame missing %s irq",
1209                        arch_timer_mem_use_virtual ? "virt" : "phys");
1210                 goto out;
1211         }
1212
1213         arch_timer_detect_rate(base, np);
1214         ret = arch_timer_mem_register(base, irq);
1215         if (ret)
1216                 goto out;
1217
1218         return arch_timer_common_init();
1219 out:
1220         iounmap(cntctlbase);
1221         of_node_put(best_frame);
1222         return ret;
1223 }
1224 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1225                        arch_timer_mem_init);
1226
1227 #ifdef CONFIG_ACPI
1228 static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1229 {
1230         int trigger, polarity;
1231
1232         if (!interrupt)
1233                 return 0;
1234
1235         trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1236                         : ACPI_LEVEL_SENSITIVE;
1237
1238         polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1239                         : ACPI_ACTIVE_HIGH;
1240
1241         return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1242 }
1243
1244 /* Initialize per-processor generic timer */
1245 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1246 {
1247         struct acpi_table_gtdt *gtdt;
1248
1249         if (arch_timers_present & ARCH_CP15_TIMER) {
1250                 pr_warn("arch_timer: already initialized, skipping\n");
1251                 return -EINVAL;
1252         }
1253
1254         gtdt = container_of(table, struct acpi_table_gtdt, header);
1255
1256         arch_timers_present |= ARCH_CP15_TIMER;
1257
1258         arch_timer_ppi[PHYS_SECURE_PPI] =
1259                 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1260                 gtdt->secure_el1_flags);
1261
1262         arch_timer_ppi[PHYS_NONSECURE_PPI] =
1263                 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1264                 gtdt->non_secure_el1_flags);
1265
1266         arch_timer_ppi[VIRT_PPI] =
1267                 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1268                 gtdt->virtual_timer_flags);
1269
1270         arch_timer_ppi[HYP_PPI] =
1271                 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1272                 gtdt->non_secure_el2_flags);
1273
1274         /* Get the frequency from CNTFRQ */
1275         arch_timer_detect_rate(NULL, NULL);
1276
1277         /* Always-on capability */
1278         arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1279
1280         arch_timer_init();
1281         return 0;
1282 }
1283 CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1284 #endif