Commit | Line | Data |
---|---|---|
bb24c471 JP |
1 | /* |
2 | * apb_timer.c: Driver for Langwell APB timers | |
3 | * | |
4 | * (C) Copyright 2009 Intel Corporation | |
5 | * Author: Jacob Pan (jacob.jun.pan@intel.com) | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; version 2 | |
10 | * of the License. | |
11 | * | |
12 | * Note: | |
13 | * Langwell is the south complex of Intel Moorestown MID platform. There are | |
14 | * eight external timers in total that can be used by the operating system. | |
15 | * The timer information, such as frequency and addresses, is provided to the | |
16 | * OS via SFI tables. | |
17 | * Timer interrupts are routed via FW/HW emulated IOAPIC independently via | |
18 | * individual redirection table entries (RTE). | |
19 | * Unlike HPET, there is no master counter, therefore one of the timers are | |
20 | * used as clocksource. The overall allocation looks like: | |
21 | * - timer 0 - NR_CPUs for per cpu timer | |
22 | * - one timer for clocksource | |
23 | * - one timer for watchdog driver. | |
24 | * It is also worth notice that APB timer does not support true one-shot mode, | |
25 | * free-running mode will be used here to emulate one-shot mode. | |
26 | * APB timer can also be used as broadcast timer along with per cpu local APIC | |
27 | * timer, but by default APB timer has higher rating than local APIC timers. | |
28 | */ | |
29 | ||
30 | #include <linux/clocksource.h> | |
31 | #include <linux/clockchips.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/errno.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/sysdev.h> | |
36 | #include <linux/pm.h> | |
37 | #include <linux/pci.h> | |
38 | #include <linux/sfi.h> | |
39 | #include <linux/interrupt.h> | |
40 | #include <linux/cpu.h> | |
41 | #include <linux/irq.h> | |
42 | ||
43 | #include <asm/fixmap.h> | |
44 | #include <asm/apb_timer.h> | |
45 | ||
46 | #define APBT_MASK CLOCKSOURCE_MASK(32) | |
47 | #define APBT_SHIFT 22 | |
48 | #define APBT_CLOCKEVENT_RATING 150 | |
49 | #define APBT_CLOCKSOURCE_RATING 250 | |
50 | #define APBT_MIN_DELTA_USEC 200 | |
51 | ||
52 | #define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt) | |
53 | #define APBT_CLOCKEVENT0_NUM (0) | |
54 | #define APBT_CLOCKEVENT1_NUM (1) | |
55 | #define APBT_CLOCKSOURCE_NUM (2) | |
56 | ||
57 | static unsigned long apbt_address; | |
58 | static int apb_timer_block_enabled; | |
59 | static void __iomem *apbt_virt_address; | |
60 | static int phy_cs_timer_id; | |
61 | ||
62 | /* | |
63 | * Common DW APB timer info | |
64 | */ | |
65 | static uint64_t apbt_freq; | |
66 | ||
67 | static void apbt_set_mode(enum clock_event_mode mode, | |
68 | struct clock_event_device *evt); | |
69 | static int apbt_next_event(unsigned long delta, | |
70 | struct clock_event_device *evt); | |
71 | static cycle_t apbt_read_clocksource(struct clocksource *cs); | |
72 | static void apbt_restart_clocksource(void); | |
73 | ||
74 | struct apbt_dev { | |
75 | struct clock_event_device evt; | |
76 | unsigned int num; | |
77 | int cpu; | |
78 | unsigned int irq; | |
79 | unsigned int tick; | |
80 | unsigned int count; | |
81 | unsigned int flags; | |
82 | char name[10]; | |
83 | }; | |
84 | ||
85 | int disable_apbt_percpu __cpuinitdata; | |
86 | ||
87 | #ifdef CONFIG_SMP | |
88 | static unsigned int apbt_num_timers_used; | |
89 | static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev); | |
90 | static struct apbt_dev *apbt_devs; | |
91 | #endif | |
92 | ||
93 | static inline unsigned long apbt_readl_reg(unsigned long a) | |
94 | { | |
95 | return readl(apbt_virt_address + a); | |
96 | } | |
97 | ||
98 | static inline void apbt_writel_reg(unsigned long d, unsigned long a) | |
99 | { | |
100 | writel(d, apbt_virt_address + a); | |
101 | } | |
102 | ||
103 | static inline unsigned long apbt_readl(int n, unsigned long a) | |
104 | { | |
105 | return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE); | |
106 | } | |
107 | ||
108 | static inline void apbt_writel(int n, unsigned long d, unsigned long a) | |
109 | { | |
110 | writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE); | |
111 | } | |
112 | ||
113 | static inline void apbt_set_mapping(void) | |
114 | { | |
115 | struct sfi_timer_table_entry *mtmr; | |
116 | ||
117 | if (apbt_virt_address) { | |
118 | pr_debug("APBT base already mapped\n"); | |
119 | return; | |
120 | } | |
121 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | |
122 | if (mtmr == NULL) { | |
123 | printk(KERN_ERR "Failed to get MTMR %d from SFI\n", | |
124 | APBT_CLOCKEVENT0_NUM); | |
125 | return; | |
126 | } | |
127 | apbt_address = (unsigned long)mtmr->phys_addr; | |
128 | if (!apbt_address) { | |
129 | printk(KERN_WARNING "No timer base from SFI, use default\n"); | |
130 | apbt_address = APBT_DEFAULT_BASE; | |
131 | } | |
132 | apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE); | |
133 | if (apbt_virt_address) { | |
134 | pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\ | |
135 | (void *)apbt_address, (void *)apbt_virt_address); | |
136 | } else { | |
137 | pr_debug("Failed mapping APBT phy address at %p\n",\ | |
138 | (void *)apbt_address); | |
139 | goto panic_noapbt; | |
140 | } | |
141 | apbt_freq = mtmr->freq_hz / USEC_PER_SEC; | |
142 | sfi_free_mtmr(mtmr); | |
143 | ||
144 | /* Now figure out the physical timer id for clocksource device */ | |
145 | mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM); | |
146 | if (mtmr == NULL) | |
147 | goto panic_noapbt; | |
148 | ||
149 | /* Now figure out the physical timer id */ | |
150 | phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) | |
151 | / APBTMRS_REG_SIZE; | |
152 | pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id); | |
153 | return; | |
154 | ||
155 | panic_noapbt: | |
156 | panic("Failed to setup APB system timer\n"); | |
157 | ||
158 | } | |
159 | ||
160 | static inline void apbt_clear_mapping(void) | |
161 | { | |
162 | iounmap(apbt_virt_address); | |
163 | apbt_virt_address = NULL; | |
164 | } | |
165 | ||
166 | /* | |
167 | * APBT timer interrupt enable / disable | |
168 | */ | |
169 | static inline int is_apbt_capable(void) | |
170 | { | |
171 | return apbt_virt_address ? 1 : 0; | |
172 | } | |
173 | ||
174 | static struct clocksource clocksource_apbt = { | |
175 | .name = "apbt", | |
176 | .rating = APBT_CLOCKSOURCE_RATING, | |
177 | .read = apbt_read_clocksource, | |
178 | .mask = APBT_MASK, | |
179 | .shift = APBT_SHIFT, | |
180 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
181 | .resume = apbt_restart_clocksource, | |
182 | }; | |
183 | ||
184 | /* boot APB clock event device */ | |
185 | static struct clock_event_device apbt_clockevent = { | |
186 | .name = "apbt0", | |
187 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
188 | .set_mode = apbt_set_mode, | |
189 | .set_next_event = apbt_next_event, | |
190 | .shift = APBT_SHIFT, | |
191 | .irq = 0, | |
192 | .rating = APBT_CLOCKEVENT_RATING, | |
193 | }; | |
194 | ||
195 | /* | |
196 | * if user does not want to use per CPU apb timer, just give it a lower rating | |
197 | * than local apic timer and skip the late per cpu timer init. | |
198 | */ | |
199 | static inline int __init setup_x86_mrst_timer(char *arg) | |
200 | { | |
201 | if (!arg) | |
202 | return -EINVAL; | |
203 | ||
204 | if (strcmp("apbt_only", arg) == 0) | |
205 | disable_apbt_percpu = 0; | |
206 | else if (strcmp("lapic_and_apbt", arg) == 0) | |
207 | disable_apbt_percpu = 1; | |
208 | else { | |
209 | pr_warning("X86 MRST timer option %s not recognised" | |
210 | " use x86_mrst_timer=apbt_only or lapic_and_apbt\n", | |
211 | arg); | |
212 | return -EINVAL; | |
213 | } | |
214 | return 0; | |
215 | } | |
216 | __setup("x86_mrst_timer=", setup_x86_mrst_timer); | |
217 | ||
218 | /* | |
219 | * start count down from 0xffff_ffff. this is done by toggling the enable bit | |
220 | * then load initial load count to ~0. | |
221 | */ | |
222 | static void apbt_start_counter(int n) | |
223 | { | |
224 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | |
225 | ||
226 | ctrl &= ~APBTMR_CONTROL_ENABLE; | |
227 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | |
228 | apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT); | |
229 | /* enable, mask interrupt */ | |
230 | ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; | |
231 | ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT); | |
232 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | |
233 | /* read it once to get cached counter value initialized */ | |
234 | apbt_read_clocksource(&clocksource_apbt); | |
235 | } | |
236 | ||
237 | static irqreturn_t apbt_interrupt_handler(int irq, void *data) | |
238 | { | |
239 | struct apbt_dev *dev = (struct apbt_dev *)data; | |
240 | struct clock_event_device *aevt = &dev->evt; | |
241 | ||
242 | if (!aevt->event_handler) { | |
243 | printk(KERN_INFO "Spurious APBT timer interrupt on %d\n", | |
244 | dev->num); | |
245 | return IRQ_NONE; | |
246 | } | |
247 | aevt->event_handler(aevt); | |
248 | return IRQ_HANDLED; | |
249 | } | |
250 | ||
251 | static void apbt_restart_clocksource(void) | |
252 | { | |
253 | apbt_start_counter(phy_cs_timer_id); | |
254 | } | |
255 | ||
256 | /* Setup IRQ routing via IOAPIC */ | |
257 | #ifdef CONFIG_SMP | |
258 | static void apbt_setup_irq(struct apbt_dev *adev) | |
259 | { | |
260 | struct irq_chip *chip; | |
261 | struct irq_desc *desc; | |
262 | ||
263 | /* timer0 irq has been setup early */ | |
264 | if (adev->irq == 0) | |
265 | return; | |
266 | desc = irq_to_desc(adev->irq); | |
267 | chip = get_irq_chip(adev->irq); | |
268 | disable_irq(adev->irq); | |
269 | desc->status |= IRQ_MOVE_PCNTXT; | |
270 | irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); | |
271 | /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ | |
272 | set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); | |
273 | enable_irq(adev->irq); | |
274 | if (system_state == SYSTEM_BOOTING) | |
275 | if (request_irq(adev->irq, apbt_interrupt_handler, | |
276 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | |
277 | adev->name, adev)) { | |
278 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | |
279 | adev->num); | |
280 | } | |
281 | } | |
282 | #endif | |
283 | ||
284 | static void apbt_enable_int(int n) | |
285 | { | |
286 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | |
287 | /* clear pending intr */ | |
288 | apbt_readl(n, APBTMR_N_EOI); | |
289 | ctrl &= ~APBTMR_CONTROL_INT; | |
290 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | |
291 | } | |
292 | ||
293 | static void apbt_disable_int(int n) | |
294 | { | |
295 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | |
296 | ||
297 | ctrl |= APBTMR_CONTROL_INT; | |
298 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | |
299 | } | |
300 | ||
301 | ||
302 | static int __init apbt_clockevent_register(void) | |
303 | { | |
304 | struct sfi_timer_table_entry *mtmr; | |
305 | ||
306 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | |
307 | if (mtmr == NULL) { | |
308 | printk(KERN_ERR "Failed to get MTMR %d from SFI\n", | |
309 | APBT_CLOCKEVENT0_NUM); | |
310 | return -ENODEV; | |
311 | } | |
312 | ||
313 | /* | |
314 | * We need to calculate the scaled math multiplication factor for | |
315 | * nanosecond to apbt tick conversion. | |
316 | * mult = (nsec/cycle)*2^APBT_SHIFT | |
317 | */ | |
318 | apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz | |
319 | , NSEC_PER_SEC, APBT_SHIFT); | |
320 | ||
321 | /* Calculate the min / max delta */ | |
322 | apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | |
323 | &apbt_clockevent); | |
324 | apbt_clockevent.min_delta_ns = clockevent_delta2ns( | |
325 | APBT_MIN_DELTA_USEC*apbt_freq, | |
326 | &apbt_clockevent); | |
327 | /* | |
328 | * Start apbt with the boot cpu mask and make it | |
329 | * global if not used for per cpu timer. | |
330 | */ | |
331 | apbt_clockevent.cpumask = cpumask_of(smp_processor_id()); | |
332 | ||
333 | if (disable_apbt_percpu) { | |
334 | apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; | |
335 | global_clock_event = &apbt_clockevent; | |
336 | printk(KERN_DEBUG "%s clockevent registered as global\n", | |
337 | global_clock_event->name); | |
338 | } | |
339 | ||
340 | if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler, | |
341 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | |
342 | apbt_clockevent.name, &apbt_clockevent)) { | |
343 | printk(KERN_ERR "Failed request IRQ for APBT%d\n", | |
344 | apbt_clockevent.irq); | |
345 | } | |
346 | ||
347 | clockevents_register_device(&apbt_clockevent); | |
348 | /* Start APBT 0 interrupts */ | |
349 | apbt_enable_int(APBT_CLOCKEVENT0_NUM); | |
350 | ||
351 | sfi_free_mtmr(mtmr); | |
352 | return 0; | |
353 | } | |
354 | ||
355 | #ifdef CONFIG_SMP | |
356 | /* Should be called with per cpu */ | |
357 | void apbt_setup_secondary_clock(void) | |
358 | { | |
359 | struct apbt_dev *adev; | |
360 | struct clock_event_device *aevt; | |
361 | int cpu; | |
362 | ||
363 | /* Don't register boot CPU clockevent */ | |
364 | cpu = smp_processor_id(); | |
365 | if (cpu == boot_cpu_id) | |
366 | return; | |
367 | /* | |
368 | * We need to calculate the scaled math multiplication factor for | |
369 | * nanosecond to apbt tick conversion. | |
370 | * mult = (nsec/cycle)*2^APBT_SHIFT | |
371 | */ | |
372 | printk(KERN_INFO "Init per CPU clockevent %d\n", cpu); | |
373 | adev = &per_cpu(cpu_apbt_dev, cpu); | |
374 | aevt = &adev->evt; | |
375 | ||
376 | memcpy(aevt, &apbt_clockevent, sizeof(*aevt)); | |
377 | aevt->cpumask = cpumask_of(cpu); | |
378 | aevt->name = adev->name; | |
379 | aevt->mode = CLOCK_EVT_MODE_UNUSED; | |
380 | ||
381 | printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n", | |
382 | cpu, aevt->name, *(u32 *)aevt->cpumask); | |
383 | ||
384 | apbt_setup_irq(adev); | |
385 | ||
386 | clockevents_register_device(aevt); | |
387 | ||
388 | apbt_enable_int(cpu); | |
389 | ||
390 | return; | |
391 | } | |
392 | ||
393 | /* | |
394 | * this notify handler process CPU hotplug events. in case of S0i3, nonboot | |
395 | * cpus are disabled/enabled frequently, for performance reasons, we keep the | |
396 | * per cpu timer irq registered so that we do need to do free_irq/request_irq. | |
397 | * | |
398 | * TODO: it might be more reliable to directly disable percpu clockevent device | |
399 | * without the notifier chain. currently, cpu 0 may get interrupts from other | |
400 | * cpu timers during the offline process due to the ordering of notification. | |
401 | * the extra interrupt is harmless. | |
402 | */ | |
403 | static int apbt_cpuhp_notify(struct notifier_block *n, | |
404 | unsigned long action, void *hcpu) | |
405 | { | |
406 | unsigned long cpu = (unsigned long)hcpu; | |
407 | struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); | |
408 | ||
409 | switch (action & 0xf) { | |
410 | case CPU_DEAD: | |
411 | apbt_disable_int(cpu); | |
412 | if (system_state == SYSTEM_RUNNING) | |
413 | pr_debug("skipping APBT CPU %lu offline\n", cpu); | |
414 | else if (adev) { | |
415 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); | |
416 | free_irq(adev->irq, adev); | |
417 | } | |
418 | break; | |
419 | default: | |
420 | pr_debug(KERN_INFO "APBT notified %lu, no action\n", action); | |
421 | } | |
422 | return NOTIFY_OK; | |
423 | } | |
424 | ||
425 | static __init int apbt_late_init(void) | |
426 | { | |
427 | if (disable_apbt_percpu) | |
428 | return 0; | |
429 | /* This notifier should be called after workqueue is ready */ | |
430 | hotcpu_notifier(apbt_cpuhp_notify, -20); | |
431 | return 0; | |
432 | } | |
433 | fs_initcall(apbt_late_init); | |
434 | #else | |
435 | ||
436 | void apbt_setup_secondary_clock(void) {} | |
437 | ||
438 | #endif /* CONFIG_SMP */ | |
439 | ||
440 | static void apbt_set_mode(enum clock_event_mode mode, | |
441 | struct clock_event_device *evt) | |
442 | { | |
443 | unsigned long ctrl; | |
444 | uint64_t delta; | |
445 | int timer_num; | |
446 | struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); | |
447 | ||
448 | timer_num = adev->num; | |
449 | pr_debug("%s CPU %d timer %d mode=%d\n", | |
450 | __func__, first_cpu(*evt->cpumask), timer_num, mode); | |
451 | ||
452 | switch (mode) { | |
453 | case CLOCK_EVT_MODE_PERIODIC: | |
454 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult; | |
455 | delta >>= apbt_clockevent.shift; | |
456 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | |
457 | ctrl |= APBTMR_CONTROL_MODE_PERIODIC; | |
458 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
459 | /* | |
460 | * DW APB p. 46, have to disable timer before load counter, | |
461 | * may cause sync problem. | |
462 | */ | |
463 | ctrl &= ~APBTMR_CONTROL_ENABLE; | |
464 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
465 | udelay(1); | |
466 | pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ); | |
467 | apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); | |
468 | ctrl |= APBTMR_CONTROL_ENABLE; | |
469 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
470 | break; | |
471 | /* APB timer does not have one-shot mode, use free running mode */ | |
472 | case CLOCK_EVT_MODE_ONESHOT: | |
473 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | |
474 | /* | |
475 | * set free running mode, this mode will let timer reload max | |
476 | * timeout which will give time (3min on 25MHz clock) to rearm | |
477 | * the next event, therefore emulate the one-shot mode. | |
478 | */ | |
479 | ctrl &= ~APBTMR_CONTROL_ENABLE; | |
480 | ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; | |
481 | ||
482 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
483 | /* write again to set free running mode */ | |
484 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
485 | ||
486 | /* | |
487 | * DW APB p. 46, load counter with all 1s before starting free | |
488 | * running mode. | |
489 | */ | |
490 | apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT); | |
491 | ctrl &= ~APBTMR_CONTROL_INT; | |
492 | ctrl |= APBTMR_CONTROL_ENABLE; | |
493 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
494 | break; | |
495 | ||
496 | case CLOCK_EVT_MODE_UNUSED: | |
497 | case CLOCK_EVT_MODE_SHUTDOWN: | |
498 | apbt_disable_int(timer_num); | |
499 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | |
500 | ctrl &= ~APBTMR_CONTROL_ENABLE; | |
501 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
502 | break; | |
503 | ||
504 | case CLOCK_EVT_MODE_RESUME: | |
505 | apbt_enable_int(timer_num); | |
506 | break; | |
507 | } | |
508 | } | |
509 | ||
510 | static int apbt_next_event(unsigned long delta, | |
511 | struct clock_event_device *evt) | |
512 | { | |
513 | unsigned long ctrl; | |
514 | int timer_num; | |
515 | ||
516 | struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); | |
517 | ||
518 | timer_num = adev->num; | |
519 | /* Disable timer */ | |
520 | ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); | |
521 | ctrl &= ~APBTMR_CONTROL_ENABLE; | |
522 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
523 | /* write new count */ | |
524 | apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); | |
525 | ctrl |= APBTMR_CONTROL_ENABLE; | |
526 | apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); | |
527 | return 0; | |
528 | } | |
529 | ||
530 | /* | |
531 | * APB timer clock is not in sync with pclk on Langwell, which translates to | |
532 | * unreliable read value caused by sampling error. the error does not add up | |
533 | * overtime and only happens when sampling a 0 as a 1 by mistake. so the time | |
534 | * would go backwards. the following code is trying to prevent time traveling | |
535 | * backwards. little bit paranoid. | |
536 | */ | |
537 | static cycle_t apbt_read_clocksource(struct clocksource *cs) | |
538 | { | |
539 | unsigned long t0, t1, t2; | |
540 | static unsigned long last_read; | |
541 | ||
542 | bad_count: | |
543 | t1 = apbt_readl(phy_cs_timer_id, | |
544 | APBTMR_N_CURRENT_VALUE); | |
545 | t2 = apbt_readl(phy_cs_timer_id, | |
546 | APBTMR_N_CURRENT_VALUE); | |
547 | if (unlikely(t1 < t2)) { | |
548 | pr_debug("APBT: read current count error %lx:%lx:%lx\n", | |
549 | t1, t2, t2 - t1); | |
550 | goto bad_count; | |
551 | } | |
552 | /* | |
553 | * check against cached last read, makes sure time does not go back. | |
554 | * it could be a normal rollover but we will do tripple check anyway | |
555 | */ | |
556 | if (unlikely(t2 > last_read)) { | |
557 | /* check if we have a normal rollover */ | |
558 | unsigned long raw_intr_status = | |
559 | apbt_readl_reg(APBTMRS_RAW_INT_STATUS); | |
560 | /* | |
561 | * cs timer interrupt is masked but raw intr bit is set if | |
562 | * rollover occurs. then we read EOI reg to clear it. | |
563 | */ | |
564 | if (raw_intr_status & (1 << phy_cs_timer_id)) { | |
565 | apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); | |
566 | goto out; | |
567 | } | |
568 | pr_debug("APB CS going back %lx:%lx:%lx ", | |
569 | t2, last_read, t2 - last_read); | |
570 | bad_count_x3: | |
571 | pr_debug(KERN_INFO "tripple check enforced\n"); | |
572 | t0 = apbt_readl(phy_cs_timer_id, | |
573 | APBTMR_N_CURRENT_VALUE); | |
574 | udelay(1); | |
575 | t1 = apbt_readl(phy_cs_timer_id, | |
576 | APBTMR_N_CURRENT_VALUE); | |
577 | udelay(1); | |
578 | t2 = apbt_readl(phy_cs_timer_id, | |
579 | APBTMR_N_CURRENT_VALUE); | |
580 | if ((t2 > t1) || (t1 > t0)) { | |
581 | printk(KERN_ERR "Error: APB CS tripple check failed\n"); | |
582 | goto bad_count_x3; | |
583 | } | |
584 | } | |
585 | out: | |
586 | last_read = t2; | |
587 | return (cycle_t)~t2; | |
588 | } | |
589 | ||
590 | static int apbt_clocksource_register(void) | |
591 | { | |
592 | u64 start, now; | |
593 | cycle_t t1; | |
594 | ||
595 | /* Start the counter, use timer 2 as source, timer 0/1 for event */ | |
596 | apbt_start_counter(phy_cs_timer_id); | |
597 | ||
598 | /* Verify whether apbt counter works */ | |
599 | t1 = apbt_read_clocksource(&clocksource_apbt); | |
600 | rdtscll(start); | |
601 | ||
602 | /* | |
603 | * We don't know the TSC frequency yet, but waiting for | |
604 | * 200000 TSC cycles is safe: | |
605 | * 4 GHz == 50us | |
606 | * 1 GHz == 200us | |
607 | */ | |
608 | do { | |
609 | rep_nop(); | |
610 | rdtscll(now); | |
611 | } while ((now - start) < 200000UL); | |
612 | ||
613 | /* APBT is the only always on clocksource, it has to work! */ | |
614 | if (t1 == apbt_read_clocksource(&clocksource_apbt)) | |
615 | panic("APBT counter not counting. APBT disabled\n"); | |
616 | ||
617 | /* | |
618 | * initialize and register APBT clocksource | |
619 | * convert that to ns/clock cycle | |
620 | * mult = (ns/c) * 2^APBT_SHIFT | |
621 | */ | |
622 | clocksource_apbt.mult = div_sc(MSEC_PER_SEC, | |
623 | (unsigned long) apbt_freq, APBT_SHIFT); | |
624 | clocksource_register(&clocksource_apbt); | |
625 | ||
626 | return 0; | |
627 | } | |
628 | ||
629 | /* | |
630 | * Early setup the APBT timer, only use timer 0 for booting then switch to | |
631 | * per CPU timer if possible. | |
632 | * returns 1 if per cpu apbt is setup | |
633 | * returns 0 if no per cpu apbt is chosen | |
634 | * panic if set up failed, this is the only platform timer on Moorestown. | |
635 | */ | |
636 | void __init apbt_time_init(void) | |
637 | { | |
638 | #ifdef CONFIG_SMP | |
639 | int i; | |
640 | struct sfi_timer_table_entry *p_mtmr; | |
641 | unsigned int percpu_timer; | |
642 | struct apbt_dev *adev; | |
643 | #endif | |
644 | ||
645 | if (apb_timer_block_enabled) | |
646 | return; | |
647 | apbt_set_mapping(); | |
648 | if (apbt_virt_address) { | |
649 | pr_debug("Found APBT version 0x%lx\n",\ | |
650 | apbt_readl_reg(APBTMRS_COMP_VERSION)); | |
651 | } else | |
652 | goto out_noapbt; | |
653 | /* | |
654 | * Read the frequency and check for a sane value, for ESL model | |
655 | * we extend the possible clock range to allow time scaling. | |
656 | */ | |
657 | ||
658 | if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { | |
659 | pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq); | |
660 | goto out_noapbt; | |
661 | } | |
662 | if (apbt_clocksource_register()) { | |
663 | pr_debug("APBT has failed to register clocksource\n"); | |
664 | goto out_noapbt; | |
665 | } | |
666 | if (!apbt_clockevent_register()) | |
667 | apb_timer_block_enabled = 1; | |
668 | else { | |
669 | pr_debug("APBT has failed to register clockevent\n"); | |
670 | goto out_noapbt; | |
671 | } | |
672 | #ifdef CONFIG_SMP | |
673 | /* kernel cmdline disable apb timer, so we will use lapic timers */ | |
674 | if (disable_apbt_percpu) { | |
675 | printk(KERN_INFO "apbt: disabled per cpu timer\n"); | |
676 | return; | |
677 | } | |
678 | pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); | |
679 | if (num_possible_cpus() <= sfi_mtimer_num) { | |
680 | percpu_timer = 1; | |
681 | apbt_num_timers_used = num_possible_cpus(); | |
682 | } else { | |
683 | percpu_timer = 0; | |
684 | apbt_num_timers_used = 1; | |
685 | adev = &per_cpu(cpu_apbt_dev, 0); | |
686 | adev->flags &= ~APBT_DEV_USED; | |
687 | } | |
688 | pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); | |
689 | ||
690 | /* here we set up per CPU timer data structure */ | |
691 | apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used, | |
692 | GFP_KERNEL); | |
693 | if (!apbt_devs) { | |
694 | printk(KERN_ERR "Failed to allocate APB timer devices\n"); | |
695 | return; | |
696 | } | |
697 | for (i = 0; i < apbt_num_timers_used; i++) { | |
698 | adev = &per_cpu(cpu_apbt_dev, i); | |
699 | adev->num = i; | |
700 | adev->cpu = i; | |
701 | p_mtmr = sfi_get_mtmr(i); | |
702 | if (p_mtmr) { | |
703 | adev->tick = p_mtmr->freq_hz; | |
704 | adev->irq = p_mtmr->irq; | |
705 | } else | |
706 | printk(KERN_ERR "Failed to get timer for cpu %d\n", i); | |
707 | adev->count = 0; | |
708 | sprintf(adev->name, "apbt%d", i); | |
709 | } | |
710 | #endif | |
711 | ||
712 | return; | |
713 | ||
714 | out_noapbt: | |
715 | apbt_clear_mapping(); | |
716 | apb_timer_block_enabled = 0; | |
717 | panic("failed to enable APB timer\n"); | |
718 | } | |
719 | ||
720 | static inline void apbt_disable(int n) | |
721 | { | |
722 | if (is_apbt_capable()) { | |
723 | unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); | |
724 | ctrl &= ~APBTMR_CONTROL_ENABLE; | |
725 | apbt_writel(n, ctrl, APBTMR_N_CONTROL); | |
726 | } | |
727 | } | |
728 | ||
729 | /* called before apb_timer_enable, use early map */ | |
730 | unsigned long apbt_quick_calibrate() | |
731 | { | |
732 | int i, scale; | |
733 | u64 old, new; | |
734 | cycle_t t1, t2; | |
735 | unsigned long khz = 0; | |
736 | u32 loop, shift; | |
737 | ||
738 | apbt_set_mapping(); | |
739 | apbt_start_counter(phy_cs_timer_id); | |
740 | ||
741 | /* check if the timer can count down, otherwise return */ | |
742 | old = apbt_read_clocksource(&clocksource_apbt); | |
743 | i = 10000; | |
744 | while (--i) { | |
745 | if (old != apbt_read_clocksource(&clocksource_apbt)) | |
746 | break; | |
747 | } | |
748 | if (!i) | |
749 | goto failed; | |
750 | ||
751 | /* count 16 ms */ | |
752 | loop = (apbt_freq * 1000) << 4; | |
753 | ||
754 | /* restart the timer to ensure it won't get to 0 in the calibration */ | |
755 | apbt_start_counter(phy_cs_timer_id); | |
756 | ||
757 | old = apbt_read_clocksource(&clocksource_apbt); | |
758 | old += loop; | |
759 | ||
760 | t1 = __native_read_tsc(); | |
761 | ||
762 | do { | |
763 | new = apbt_read_clocksource(&clocksource_apbt); | |
764 | } while (new < old); | |
765 | ||
766 | t2 = __native_read_tsc(); | |
767 | ||
768 | shift = 5; | |
769 | if (unlikely(loop >> shift == 0)) { | |
770 | printk(KERN_INFO | |
771 | "APBT TSC calibration failed, not enough resolution\n"); | |
772 | return 0; | |
773 | } | |
774 | scale = (int)div_u64((t2 - t1), loop >> shift); | |
775 | khz = (scale * apbt_freq * 1000) >> shift; | |
776 | printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz); | |
777 | return khz; | |
778 | failed: | |
779 | return 0; | |
780 | } |