exfat: add exfat_get_empty_dentry_set() helper
[linux-2.6-block.git] / drivers / clocksource / sh_tmu.c
CommitLineData
0b9294fe 1// SPDX-License-Identifier: GPL-2.0
9570ef20
MD
2/*
3 * SuperH Timer Support - TMU
4 *
5 * Copyright (C) 2009 Magnus Damm
9570ef20
MD
6 */
7
13931f80
LP
8#include <linux/clk.h>
9#include <linux/clockchips.h>
10#include <linux/clocksource.h>
11#include <linux/delay.h>
12#include <linux/err.h>
9570ef20 13#include <linux/init.h>
9570ef20 14#include <linux/interrupt.h>
9570ef20 15#include <linux/io.h>
13931f80 16#include <linux/ioport.h>
9570ef20 17#include <linux/irq.h>
7deeab5d 18#include <linux/module.h>
3e29b554 19#include <linux/of.h>
13931f80 20#include <linux/platform_device.h>
2ee619f9 21#include <linux/pm_domain.h>
eaa49a8c 22#include <linux/pm_runtime.h>
13931f80
LP
23#include <linux/sh_timer.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
9570ef20 26
507fd01d
BG
27#ifdef CONFIG_SUPERH
28#include <asm/platform_early.h>
29#endif
30
8c7f21e6 31enum sh_tmu_model {
8c7f21e6
LP
32 SH_TMU,
33 SH_TMU_SH3,
34};
35
0a72aa39 36struct sh_tmu_device;
de2d12c7
LP
37
38struct sh_tmu_channel {
0a72aa39 39 struct sh_tmu_device *tmu;
fe68eb80 40 unsigned int index;
de2d12c7 41
de693461 42 void __iomem *base;
1c56cf6b 43 int irq;
de2d12c7 44
9570ef20
MD
45 unsigned long periodic;
46 struct clock_event_device ced;
47 struct clocksource cs;
eaa49a8c 48 bool cs_enabled;
61a53bfa 49 unsigned int enable_count;
9570ef20
MD
50};
51
0a72aa39 52struct sh_tmu_device {
de2d12c7
LP
53 struct platform_device *pdev;
54
55 void __iomem *mapbase;
56 struct clk *clk;
c3c0a20d 57 unsigned long rate;
de2d12c7 58
8c7f21e6
LP
59 enum sh_tmu_model model;
60
2b027f1f
LP
61 raw_spinlock_t lock; /* Protect the shared start/stop register */
62
a5de49f4
LP
63 struct sh_tmu_channel *channels;
64 unsigned int num_channels;
8c7f21e6
LP
65
66 bool has_clockevent;
67 bool has_clocksource;
de2d12c7
LP
68};
69
9570ef20
MD
70#define TSTR -1 /* shared register */
71#define TCOR 0 /* channel register */
72#define TCNT 1 /* channel register */
73#define TCR 2 /* channel register */
74
5cfe2d15
LP
75#define TCR_UNF (1 << 8)
76#define TCR_UNIE (1 << 5)
77#define TCR_TPSC_CLK4 (0 << 0)
78#define TCR_TPSC_CLK16 (1 << 0)
79#define TCR_TPSC_CLK64 (2 << 0)
80#define TCR_TPSC_CLK256 (3 << 0)
81#define TCR_TPSC_CLK1024 (4 << 0)
82#define TCR_TPSC_MASK (7 << 0)
83
de2d12c7 84static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
9570ef20 85{
9570ef20
MD
86 unsigned long offs;
87
8c7f21e6
LP
88 if (reg_nr == TSTR) {
89 switch (ch->tmu->model) {
8c7f21e6
LP
90 case SH_TMU_SH3:
91 return ioread8(ch->tmu->mapbase + 2);
92 case SH_TMU:
93 return ioread8(ch->tmu->mapbase + 4);
94 }
95 }
9570ef20
MD
96
97 offs = reg_nr << 2;
98
99 if (reg_nr == TCR)
de693461 100 return ioread16(ch->base + offs);
9570ef20 101 else
de693461 102 return ioread32(ch->base + offs);
9570ef20
MD
103}
104
de2d12c7 105static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
9570ef20
MD
106 unsigned long value)
107{
9570ef20
MD
108 unsigned long offs;
109
110 if (reg_nr == TSTR) {
8c7f21e6 111 switch (ch->tmu->model) {
8c7f21e6
LP
112 case SH_TMU_SH3:
113 return iowrite8(value, ch->tmu->mapbase + 2);
114 case SH_TMU:
115 return iowrite8(value, ch->tmu->mapbase + 4);
116 }
9570ef20
MD
117 }
118
119 offs = reg_nr << 2;
120
121 if (reg_nr == TCR)
de693461 122 iowrite16(value, ch->base + offs);
9570ef20 123 else
de693461 124 iowrite32(value, ch->base + offs);
9570ef20
MD
125}
126
de2d12c7 127static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
9570ef20 128{
9570ef20
MD
129 unsigned long flags, value;
130
131 /* start stop register shared by multiple timer channels */
2b027f1f 132 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
de2d12c7 133 value = sh_tmu_read(ch, TSTR);
9570ef20
MD
134
135 if (start)
fe68eb80 136 value |= 1 << ch->index;
9570ef20 137 else
fe68eb80 138 value &= ~(1 << ch->index);
9570ef20 139
de2d12c7 140 sh_tmu_write(ch, TSTR, value);
2b027f1f 141 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
9570ef20
MD
142}
143
de2d12c7 144static int __sh_tmu_enable(struct sh_tmu_channel *ch)
9570ef20 145{
9570ef20
MD
146 int ret;
147
d4905ce3 148 /* enable clock */
de2d12c7 149 ret = clk_enable(ch->tmu->clk);
9570ef20 150 if (ret) {
fe68eb80
LP
151 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
152 ch->index);
9570ef20
MD
153 return ret;
154 }
155
156 /* make sure channel is disabled */
de2d12c7 157 sh_tmu_start_stop_ch(ch, 0);
9570ef20
MD
158
159 /* maximum timeout */
de2d12c7
LP
160 sh_tmu_write(ch, TCOR, 0xffffffff);
161 sh_tmu_write(ch, TCNT, 0xffffffff);
9570ef20
MD
162
163 /* configure channel to parent clock / 4, irq off */
5cfe2d15 164 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
9570ef20
MD
165
166 /* enable channel */
de2d12c7 167 sh_tmu_start_stop_ch(ch, 1);
9570ef20
MD
168
169 return 0;
170}
171
de2d12c7 172static int sh_tmu_enable(struct sh_tmu_channel *ch)
61a53bfa 173{
de2d12c7 174 if (ch->enable_count++ > 0)
61a53bfa
RW
175 return 0;
176
de2d12c7
LP
177 pm_runtime_get_sync(&ch->tmu->pdev->dev);
178 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
61a53bfa 179
de2d12c7 180 return __sh_tmu_enable(ch);
61a53bfa
RW
181}
182
de2d12c7 183static void __sh_tmu_disable(struct sh_tmu_channel *ch)
9570ef20
MD
184{
185 /* disable channel */
de2d12c7 186 sh_tmu_start_stop_ch(ch, 0);
9570ef20 187
be890a1a 188 /* disable interrupts in TMU block */
5cfe2d15 189 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
be890a1a 190
d4905ce3 191 /* stop clock */
de2d12c7 192 clk_disable(ch->tmu->clk);
9570ef20
MD
193}
194
de2d12c7 195static void sh_tmu_disable(struct sh_tmu_channel *ch)
61a53bfa 196{
de2d12c7 197 if (WARN_ON(ch->enable_count == 0))
61a53bfa
RW
198 return;
199
de2d12c7 200 if (--ch->enable_count > 0)
61a53bfa
RW
201 return;
202
de2d12c7 203 __sh_tmu_disable(ch);
61a53bfa 204
de2d12c7
LP
205 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
206 pm_runtime_put(&ch->tmu->pdev->dev);
61a53bfa
RW
207}
208
de2d12c7 209static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
9570ef20
MD
210 int periodic)
211{
212 /* stop timer */
de2d12c7 213 sh_tmu_start_stop_ch(ch, 0);
9570ef20
MD
214
215 /* acknowledge interrupt */
de2d12c7 216 sh_tmu_read(ch, TCR);
9570ef20
MD
217
218 /* enable interrupt */
5cfe2d15 219 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
9570ef20
MD
220
221 /* reload delta value in case of periodic timer */
222 if (periodic)
de2d12c7 223 sh_tmu_write(ch, TCOR, delta);
9570ef20 224 else
de2d12c7 225 sh_tmu_write(ch, TCOR, 0xffffffff);
9570ef20 226
de2d12c7 227 sh_tmu_write(ch, TCNT, delta);
9570ef20
MD
228
229 /* start timer */
de2d12c7 230 sh_tmu_start_stop_ch(ch, 1);
9570ef20
MD
231}
232
233static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
234{
de2d12c7 235 struct sh_tmu_channel *ch = dev_id;
9570ef20
MD
236
237 /* disable or acknowledge interrupt */
2bcc4da3 238 if (clockevent_state_oneshot(&ch->ced))
5cfe2d15 239 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
9570ef20 240 else
5cfe2d15 241 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
9570ef20
MD
242
243 /* notify clockevent layer */
de2d12c7 244 ch->ced.event_handler(&ch->ced);
9570ef20
MD
245 return IRQ_HANDLED;
246}
247
de2d12c7 248static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
9570ef20 249{
de2d12c7 250 return container_of(cs, struct sh_tmu_channel, cs);
9570ef20
MD
251}
252
a5a1d1c2 253static u64 sh_tmu_clocksource_read(struct clocksource *cs)
9570ef20 254{
de2d12c7 255 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
9570ef20 256
de2d12c7 257 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
9570ef20
MD
258}
259
260static int sh_tmu_clocksource_enable(struct clocksource *cs)
261{
de2d12c7 262 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
0aeac458 263 int ret;
9570ef20 264
de2d12c7 265 if (WARN_ON(ch->cs_enabled))
61a53bfa
RW
266 return 0;
267
de2d12c7 268 ret = sh_tmu_enable(ch);
c3c0a20d 269 if (!ret)
de2d12c7 270 ch->cs_enabled = true;
61a53bfa 271
0aeac458 272 return ret;
9570ef20
MD
273}
274
275static void sh_tmu_clocksource_disable(struct clocksource *cs)
276{
de2d12c7 277 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
eaa49a8c 278
de2d12c7 279 if (WARN_ON(!ch->cs_enabled))
61a53bfa 280 return;
eaa49a8c 281
de2d12c7
LP
282 sh_tmu_disable(ch);
283 ch->cs_enabled = false;
eaa49a8c
RW
284}
285
286static void sh_tmu_clocksource_suspend(struct clocksource *cs)
287{
de2d12c7 288 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
eaa49a8c 289
de2d12c7 290 if (!ch->cs_enabled)
61a53bfa 291 return;
eaa49a8c 292
de2d12c7
LP
293 if (--ch->enable_count == 0) {
294 __sh_tmu_disable(ch);
fc519890 295 dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
61a53bfa 296 }
eaa49a8c
RW
297}
298
299static void sh_tmu_clocksource_resume(struct clocksource *cs)
300{
de2d12c7 301 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
eaa49a8c 302
de2d12c7 303 if (!ch->cs_enabled)
61a53bfa
RW
304 return;
305
de2d12c7 306 if (ch->enable_count++ == 0) {
fc519890 307 dev_pm_genpd_resume(&ch->tmu->pdev->dev);
de2d12c7 308 __sh_tmu_enable(ch);
61a53bfa 309 }
9570ef20
MD
310}
311
de2d12c7 312static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
f1010ed1 313 const char *name)
9570ef20 314{
de2d12c7 315 struct clocksource *cs = &ch->cs;
9570ef20
MD
316
317 cs->name = name;
f1010ed1 318 cs->rating = 200;
9570ef20
MD
319 cs->read = sh_tmu_clocksource_read;
320 cs->enable = sh_tmu_clocksource_enable;
321 cs->disable = sh_tmu_clocksource_disable;
eaa49a8c
RW
322 cs->suspend = sh_tmu_clocksource_suspend;
323 cs->resume = sh_tmu_clocksource_resume;
9570ef20
MD
324 cs->mask = CLOCKSOURCE_MASK(32);
325 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
66f49121 326
fe68eb80
LP
327 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
328 ch->index);
0aeac458 329
c3c0a20d 330 clocksource_register_hz(cs, ch->tmu->rate);
9570ef20
MD
331 return 0;
332}
333
de2d12c7 334static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
9570ef20 335{
de2d12c7 336 return container_of(ced, struct sh_tmu_channel, ced);
9570ef20
MD
337}
338
de2d12c7 339static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
9570ef20 340{
de2d12c7 341 sh_tmu_enable(ch);
9570ef20 342
9570ef20 343 if (periodic) {
c3c0a20d 344 ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
de2d12c7 345 sh_tmu_set_next(ch, ch->periodic, 1);
9570ef20
MD
346 }
347}
348
2bcc4da3
VK
349static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
350{
351 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
352
452b1324
VK
353 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
354 sh_tmu_disable(ch);
2bcc4da3
VK
355 return 0;
356}
357
358static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
359 int periodic)
9570ef20 360{
de2d12c7 361 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
9570ef20
MD
362
363 /* deal with old setting first */
2bcc4da3 364 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
de2d12c7 365 sh_tmu_disable(ch);
9570ef20 366
2bcc4da3
VK
367 dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
368 ch->index, periodic ? "periodic" : "oneshot");
369 sh_tmu_clock_event_start(ch, periodic);
370 return 0;
371}
372
373static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
374{
375 return sh_tmu_clock_event_set_state(ced, 0);
376}
377
378static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
379{
380 return sh_tmu_clock_event_set_state(ced, 1);
9570ef20
MD
381}
382
383static int sh_tmu_clock_event_next(unsigned long delta,
384 struct clock_event_device *ced)
385{
de2d12c7 386 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
9570ef20 387
2bcc4da3 388 BUG_ON(!clockevent_state_oneshot(ced));
9570ef20
MD
389
390 /* program new delta value */
de2d12c7 391 sh_tmu_set_next(ch, delta, 0);
9570ef20
MD
392 return 0;
393}
394
eaa49a8c
RW
395static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
396{
fc519890 397 dev_pm_genpd_suspend(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
eaa49a8c
RW
398}
399
400static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
401{
fc519890 402 dev_pm_genpd_resume(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
eaa49a8c
RW
403}
404
de2d12c7 405static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
f1010ed1 406 const char *name)
9570ef20 407{
de2d12c7 408 struct clock_event_device *ced = &ch->ced;
9570ef20
MD
409 int ret;
410
9570ef20
MD
411 ced->name = name;
412 ced->features = CLOCK_EVT_FEAT_PERIODIC;
413 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
f1010ed1 414 ced->rating = 200;
f2a54738 415 ced->cpumask = cpu_possible_mask;
9570ef20 416 ced->set_next_event = sh_tmu_clock_event_next;
2bcc4da3
VK
417 ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
418 ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
419 ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
eaa49a8c
RW
420 ced->suspend = sh_tmu_clock_event_suspend;
421 ced->resume = sh_tmu_clock_event_resume;
9570ef20 422
fe68eb80
LP
423 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
424 ch->index);
3977407e 425
c3c0a20d 426 clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
da64c2a8 427
de2d12c7 428 ret = request_irq(ch->irq, sh_tmu_interrupt,
1c56cf6b 429 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
de2d12c7 430 dev_name(&ch->tmu->pdev->dev), ch);
9570ef20 431 if (ret) {
fe68eb80
LP
432 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
433 ch->index, ch->irq);
9570ef20
MD
434 return;
435 }
9570ef20
MD
436}
437
84876d05 438static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
f1010ed1 439 bool clockevent, bool clocksource)
9570ef20 440{
8c7f21e6
LP
441 if (clockevent) {
442 ch->tmu->has_clockevent = true;
f1010ed1 443 sh_tmu_register_clockevent(ch, name);
8c7f21e6
LP
444 } else if (clocksource) {
445 ch->tmu->has_clocksource = true;
f1010ed1 446 sh_tmu_register_clocksource(ch, name);
8c7f21e6 447 }
9570ef20
MD
448
449 return 0;
450}
451
8c7f21e6
LP
452static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
453 bool clockevent, bool clocksource,
a94ddaa6
LP
454 struct sh_tmu_device *tmu)
455{
8c7f21e6
LP
456 /* Skip unused channels. */
457 if (!clockevent && !clocksource)
458 return 0;
a94ddaa6 459
a94ddaa6 460 ch->tmu = tmu;
681b9e85 461 ch->index = index;
a94ddaa6 462
681b9e85
LP
463 if (tmu->model == SH_TMU_SH3)
464 ch->base = tmu->mapbase + 4 + ch->index * 12;
465 else
466 ch->base = tmu->mapbase + 8 + ch->index * 12;
fe68eb80 467
c54697ae 468 ch->irq = platform_get_irq(tmu->pdev, index);
9f475d08 469 if (ch->irq < 0)
a94ddaa6 470 return ch->irq;
a94ddaa6
LP
471
472 ch->cs_enabled = false;
473 ch->enable_count = 0;
474
84876d05 475 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
8c7f21e6 476 clockevent, clocksource);
a94ddaa6
LP
477}
478
8c7f21e6 479static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
9570ef20 480{
9570ef20 481 struct resource *res;
9570ef20 482
0a72aa39 483 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
9570ef20 484 if (!res) {
0a72aa39 485 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
8c7f21e6 486 return -ENXIO;
9570ef20
MD
487 }
488
4bdc0d67 489 tmu->mapbase = ioremap(res->start, resource_size(res));
8c7f21e6
LP
490 if (tmu->mapbase == NULL)
491 return -ENXIO;
492
8c7f21e6
LP
493 return 0;
494}
de693461 495
3e29b554
LP
496static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
497{
498 struct device_node *np = tmu->pdev->dev.of_node;
499
500 tmu->model = SH_TMU;
501 tmu->num_channels = 3;
502
503 of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
504
505 if (tmu->num_channels != 2 && tmu->num_channels != 3) {
506 dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
507 tmu->num_channels);
508 return -EINVAL;
509 }
510
511 return 0;
512}
513
8c7f21e6
LP
514static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
515{
8c7f21e6
LP
516 unsigned int i;
517 int ret;
518
8c7f21e6 519 tmu->pdev = pdev;
8c7f21e6 520
2b027f1f
LP
521 raw_spin_lock_init(&tmu->lock);
522
3e29b554
LP
523 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
524 ret = sh_tmu_parse_dt(tmu);
525 if (ret < 0)
526 return ret;
527 } else if (pdev->dev.platform_data) {
528 const struct platform_device_id *id = pdev->id_entry;
529 struct sh_timer_config *cfg = pdev->dev.platform_data;
530
531 tmu->model = id->driver_data;
532 tmu->num_channels = hweight8(cfg->channels_mask);
533 } else {
534 dev_err(&tmu->pdev->dev, "missing platform data\n");
535 return -ENXIO;
536 }
537
8c7f21e6 538 /* Get hold of clock. */
681b9e85 539 tmu->clk = clk_get(&tmu->pdev->dev, "fck");
0a72aa39
LP
540 if (IS_ERR(tmu->clk)) {
541 dev_err(&tmu->pdev->dev, "cannot get clock\n");
8c7f21e6 542 return PTR_ERR(tmu->clk);
9570ef20 543 }
1c09eb3e 544
0a72aa39 545 ret = clk_prepare(tmu->clk);
1c09eb3e 546 if (ret < 0)
8c7f21e6
LP
547 goto err_clk_put;
548
c3c0a20d
NS
549 /* Determine clock rate. */
550 ret = clk_enable(tmu->clk);
551 if (ret < 0)
552 goto err_clk_unprepare;
553
554 tmu->rate = clk_get_rate(tmu->clk) / 4;
555 clk_disable(tmu->clk);
556
8c7f21e6
LP
557 /* Map the memory resource. */
558 ret = sh_tmu_map_memory(tmu);
559 if (ret < 0) {
560 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
561 goto err_clk_unprepare;
562 }
1c09eb3e 563
8c7f21e6 564 /* Allocate and setup the channels. */
6396bb22 565 tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
8c7f21e6 566 GFP_KERNEL);
a5de49f4
LP
567 if (tmu->channels == NULL) {
568 ret = -ENOMEM;
8c7f21e6 569 goto err_unmap;
a5de49f4
LP
570 }
571
681b9e85
LP
572 /*
573 * Use the first channel as a clock event device and the second channel
574 * as a clock source.
575 */
576 for (i = 0; i < tmu->num_channels; ++i) {
577 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
578 i == 0, i == 1, tmu);
8c7f21e6
LP
579 if (ret < 0)
580 goto err_unmap;
8c7f21e6 581 }
a5de49f4 582
8c7f21e6 583 platform_set_drvdata(pdev, tmu);
394a4486
LP
584
585 return 0;
586
8c7f21e6 587err_unmap:
a5de49f4 588 kfree(tmu->channels);
681b9e85 589 iounmap(tmu->mapbase);
8c7f21e6 590err_clk_unprepare:
0a72aa39 591 clk_unprepare(tmu->clk);
8c7f21e6 592err_clk_put:
0a72aa39 593 clk_put(tmu->clk);
9570ef20
MD
594 return ret;
595}
596
1850514b 597static int sh_tmu_probe(struct platform_device *pdev)
9570ef20 598{
0a72aa39 599 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
9570ef20
MD
600 int ret;
601
201e9109 602 if (!is_sh_early_platform_device(pdev)) {
61a53bfa
RW
603 pm_runtime_set_active(&pdev->dev);
604 pm_runtime_enable(&pdev->dev);
eaa49a8c 605 }
2ee619f9 606
0a72aa39 607 if (tmu) {
214a607a 608 dev_info(&pdev->dev, "kept as earlytimer\n");
61a53bfa 609 goto out;
9570ef20
MD
610 }
611
3b77a83e 612 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
814876b0 613 if (tmu == NULL)
9570ef20 614 return -ENOMEM;
9570ef20 615
0a72aa39 616 ret = sh_tmu_setup(tmu, pdev);
9570ef20 617 if (ret) {
0a72aa39 618 kfree(tmu);
61a53bfa
RW
619 pm_runtime_idle(&pdev->dev);
620 return ret;
9570ef20 621 }
507fd01d 622
201e9109 623 if (is_sh_early_platform_device(pdev))
61a53bfa
RW
624 return 0;
625
626 out:
8c7f21e6 627 if (tmu->has_clockevent || tmu->has_clocksource)
61a53bfa
RW
628 pm_runtime_irq_safe(&pdev->dev);
629 else
630 pm_runtime_idle(&pdev->dev);
631
632 return 0;
9570ef20
MD
633}
634
8c7f21e6 635static const struct platform_device_id sh_tmu_id_table[] = {
8c7f21e6
LP
636 { "sh-tmu", SH_TMU },
637 { "sh-tmu-sh3", SH_TMU_SH3 },
638 { }
639};
640MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
641
3e29b554
LP
642static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
643 { .compatible = "renesas,tmu" },
644 { }
645};
646MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
647
9570ef20
MD
648static struct platform_driver sh_tmu_device_driver = {
649 .probe = sh_tmu_probe,
9570ef20
MD
650 .driver = {
651 .name = "sh_tmu",
3e29b554 652 .of_match_table = of_match_ptr(sh_tmu_of_table),
d8c695d3 653 .suppress_bind_attrs = true,
8c7f21e6
LP
654 },
655 .id_table = sh_tmu_id_table,
9570ef20
MD
656};
657
658static int __init sh_tmu_init(void)
659{
660 return platform_driver_register(&sh_tmu_device_driver);
661}
662
663static void __exit sh_tmu_exit(void)
664{
665 platform_driver_unregister(&sh_tmu_device_driver);
666}
667
507fd01d 668#ifdef CONFIG_SUPERH
201e9109 669sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
507fd01d
BG
670#endif
671
b9773c3f 672subsys_initcall(sh_tmu_init);
9570ef20
MD
673module_exit(sh_tmu_exit);
674
675MODULE_AUTHOR("Magnus Damm");
676MODULE_DESCRIPTION("SuperH TMU Timer Driver");