Merge branches 'core/debug', 'core/futexes', 'core/locking', 'core/rcu', 'core/signal...
[linux-2.6-block.git] / arch / sh / kernel / timers / timer-tmu.c
CommitLineData
aa01666d
PM
1/*
2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
3 *
57be2b48 4 * Copyright (C) 2005 - 2007 Paul Mundt
aa01666d
PM
5 *
6 * TMU handling code hacked out of arch/sh/kernel/time.c
7 *
8 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
9 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
10 * Copyright (C) 2002, 2003, 2004 Paul Mundt
11 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
aa01666d 20#include <linux/seqlock.h>
57be2b48 21#include <linux/clockchips.h>
aa01666d
PM
22#include <asm/timer.h>
23#include <asm/rtc.h>
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/clock.h>
27
28#define TMU_TOCR_INIT 0x00
57be2b48 29#define TMU_TCR_INIT 0x0020
aa01666d 30
61c66387
FV
31#define TMU0 (0)
32#define TMU1 (1)
33
34static inline void _tmu_start(int tmu_num)
57be2b48 35{
61c66387 36 ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<<tmu_num), TMU_012_TSTR);
57be2b48 37}
aa01666d 38
61c66387 39static inline void _tmu_set_irq(int tmu_num, int enabled)
aa01666d 40{
61c66387
FV
41 register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
42 ctrl_outw( (enabled ? ctrl_inw(tmu_tcr) | (1<<5) : ctrl_inw(tmu_tcr) & ~(1<<5)), tmu_tcr);
43}
aa01666d 44
61c66387
FV
45static inline void _tmu_stop(int tmu_num)
46{
47 ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~(0x1<<tmu_num), TMU_012_TSTR);
48}
49
50static inline void _tmu_clear_status(int tmu_num)
51{
52 register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
53 /* Clear UNF bit */
54 ctrl_outw(ctrl_inw(tmu_tcr) & ~0x100, tmu_tcr);
55}
aa01666d 56
61c66387
FV
57static inline unsigned long _tmu_read(int tmu_num)
58{
59 return ctrl_inl(TMU0_TCNT+0xC*tmu_num);
60}
61
62static int tmu_timer_start(void)
63{
64 _tmu_start(TMU0);
65 _tmu_start(TMU1);
66 _tmu_set_irq(TMU0,1);
67 return 0;
57be2b48 68}
aa01666d 69
57be2b48
PM
70static int tmu_timer_stop(void)
71{
61c66387
FV
72 _tmu_stop(TMU0);
73 _tmu_stop(TMU1);
74 _tmu_clear_status(TMU0);
57be2b48
PM
75 return 0;
76}
aa01666d 77
61c66387
FV
78/*
79 * also when the module_clk is scaled the TMU1
80 * will show the same frequency
81 */
82static int tmus_are_scaled;
83
57be2b48
PM
84static cycle_t tmu_timer_read(void)
85{
61c66387
FV
86 return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
87}
88
89
90static unsigned long tmu_latest_interval[3];
91static void tmu_timer_set_interval(int tmu_num, unsigned long interval, unsigned int reload)
92{
93 unsigned long tmu_tcnt = TMU0_TCNT + tmu_num*0xC;
94 unsigned long tmu_tcor = TMU0_TCOR + tmu_num*0xC;
95
96 _tmu_stop(tmu_num);
97
98 ctrl_outl(interval, tmu_tcnt);
99 tmu_latest_interval[tmu_num] = interval;
100
101 /*
102 * TCNT reloads from TCOR on underflow, clear it if we don't
103 * intend to auto-reload
104 */
105 ctrl_outl( reload ? interval : 0 , tmu_tcor);
106
107 _tmu_start(tmu_num);
57be2b48
PM
108}
109
110static int tmu_set_next_event(unsigned long cycles,
111 struct clock_event_device *evt)
112{
61c66387
FV
113 tmu_timer_set_interval(TMU0,cycles, evt->mode == CLOCK_EVT_MODE_PERIODIC);
114 _tmu_set_irq(TMU0,1);
57be2b48
PM
115 return 0;
116}
aa01666d 117
57be2b48
PM
118static void tmu_set_mode(enum clock_event_mode mode,
119 struct clock_event_device *evt)
120{
121 switch (mode) {
122 case CLOCK_EVT_MODE_PERIODIC:
2cd0ebc8 123 ctrl_outl(tmu_latest_interval[TMU0], TMU0_TCOR);
57be2b48
PM
124 break;
125 case CLOCK_EVT_MODE_ONESHOT:
126 ctrl_outl(0, TMU0_TCOR);
127 break;
128 case CLOCK_EVT_MODE_UNUSED:
129 case CLOCK_EVT_MODE_SHUTDOWN:
18de5bc4 130 case CLOCK_EVT_MODE_RESUME:
57be2b48
PM
131 break;
132 }
aa01666d
PM
133}
134
57be2b48
PM
135static struct clock_event_device tmu0_clockevent = {
136 .name = "tmu0",
137 .shift = 32,
138 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
139 .set_mode = tmu_set_mode,
140 .set_next_event = tmu_set_next_event,
141};
142
35f3c518 143static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
aa01666d 144{
57be2b48 145 struct clock_event_device *evt = &tmu0_clockevent;
61c66387
FV
146 _tmu_clear_status(TMU0);
147 _tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT);
aa01666d 148
57be2b48 149 evt->event_handler(evt);
aa01666d
PM
150
151 return IRQ_HANDLED;
152}
153
57be2b48 154static struct irqaction tmu0_irq = {
61c66387 155 .name = "periodic/oneshot timer",
aa01666d 156 .handler = tmu_timer_interrupt,
e9485bae 157 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
aa01666d
PM
158 .mask = CPU_MASK_NONE,
159};
160
61c66387 161static void __init tmu_clk_init(struct clk *clk)
aa01666d 162{
61c66387
FV
163 u8 divisor = TMU_TCR_INIT & 0x7;
164 int tmu_num = clk->name[3]-'0';
165 ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC));
166 clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
aa01666d
PM
167}
168
61c66387 169static void tmu_clk_recalc(struct clk *clk)
aa01666d 170{
61c66387
FV
171 int tmu_num = clk->name[3]-'0';
172 unsigned long prev_rate = clk_get_rate(clk);
173 unsigned long flags;
174 u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7;
175 clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
aa01666d 176
61c66387
FV
177 if(prev_rate==clk_get_rate(clk))
178 return;
aa01666d 179
61c66387
FV
180 if(tmu_num)
181 return; /* No more work on TMU1 */
aa01666d 182
61c66387
FV
183 local_irq_save(flags);
184 tmus_are_scaled = (prev_rate > clk->rate);
3aa770e7 185
61c66387
FV
186 _tmu_stop(TMU0);
187
188 tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC,
189 tmu0_clockevent.shift);
190 tmu0_clockevent.max_delta_ns =
191 clockevent_delta2ns(-1, &tmu0_clockevent);
192 tmu0_clockevent.min_delta_ns =
193 clockevent_delta2ns(1, &tmu0_clockevent);
194
195 if (tmus_are_scaled)
196 tmu_latest_interval[TMU0] >>= 1;
197 else
198 tmu_latest_interval[TMU0] <<= 1;
199
200 tmu_timer_set_interval(TMU0,
201 tmu_latest_interval[TMU0],
202 tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC);
203
204 _tmu_start(TMU0);
205
206 local_irq_restore(flags);
3aa770e7
AS
207}
208
61c66387
FV
209static struct clk_ops tmu_clk_ops = {
210 .init = tmu_clk_init,
211 .recalc = tmu_clk_recalc,
212};
213
214static struct clk tmu0_clk = {
215 .name = "tmu0_clk",
216 .ops = &tmu_clk_ops,
57be2b48
PM
217};
218
219static struct clk tmu1_clk = {
220 .name = "tmu1_clk",
61c66387 221 .ops = &tmu_clk_ops,
57be2b48
PM
222};
223
aa01666d
PM
224static int tmu_timer_init(void)
225{
226 unsigned long interval;
57be2b48 227 unsigned long frequency;
aa01666d 228
57be2b48 229 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
aa01666d 230
1d118562 231 tmu0_clk.parent = clk_get(NULL, "module_clk");
57be2b48 232 tmu1_clk.parent = clk_get(NULL, "module_clk");
aa01666d 233
3aa770e7 234 tmu_timer_stop();
57be2b48 235
3ea6bc3d 236#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
31a49c4b 237 !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
3ea6bc3d 238 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
2b1bd1ac
PM
239 !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
240 !defined(CONFIG_CPU_SUBTYPE_SHX3)
aa01666d
PM
241 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
242#endif
243
244 clk_register(&tmu0_clk);
57be2b48 245 clk_register(&tmu1_clk);
aa01666d 246 clk_enable(&tmu0_clk);
57be2b48 247 clk_enable(&tmu1_clk);
aa01666d 248
57be2b48
PM
249 frequency = clk_get_rate(&tmu0_clk);
250 interval = (frequency + HZ / 2) / HZ;
aa01666d 251
61c66387
FV
252 tmu_timer_set_interval(TMU0,interval, 1);
253 tmu_timer_set_interval(TMU1,~0,1);
aa01666d 254
61c66387
FV
255 _tmu_start(TMU1);
256
257 sh_hpt_frequency = clk_get_rate(&tmu1_clk);
57be2b48
PM
258
259 tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
260 tmu0_clockevent.shift);
261 tmu0_clockevent.max_delta_ns =
262 clockevent_delta2ns(-1, &tmu0_clockevent);
263 tmu0_clockevent.min_delta_ns =
264 clockevent_delta2ns(1, &tmu0_clockevent);
265
266 tmu0_clockevent.cpumask = cpumask_of_cpu(0);
267
268 clockevents_register_device(&tmu0_clockevent);
aa01666d
PM
269
270 return 0;
271}
272
4c1cfab1 273static struct sys_timer_ops tmu_timer_ops = {
aa01666d 274 .init = tmu_timer_init,
3aa770e7
AS
275 .start = tmu_timer_start,
276 .stop = tmu_timer_stop,
57be2b48 277 .read = tmu_timer_read,
aa01666d
PM
278};
279
280struct sys_timer tmu_timer = {
281 .name = "tmu",
282 .ops = &tmu_timer_ops,
283};