Linux 4.16-rc1
[linux-2.6-block.git] / drivers / clk / ti / clkctrl.c
CommitLineData
88a17252
TK
1/*
2 * OMAP clkctrl clock support
3 *
4 * Copyright (C) 2017 Texas Instruments, Inc.
5 *
6 * Tero Kristo <t-kristo@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk-provider.h>
19#include <linux/slab.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/clk/ti.h>
23#include <linux/delay.h>
3d8598fb 24#include <linux/timekeeping.h>
88a17252
TK
25#include "clock.h"
26
27#define NO_IDLEST 0x1
28
29#define OMAP4_MODULEMODE_MASK 0x3
30
31#define MODULEMODE_HWCTRL 0x1
32#define MODULEMODE_SWCTRL 0x2
33
34#define OMAP4_IDLEST_MASK (0x3 << 16)
35#define OMAP4_IDLEST_SHIFT 16
36
37#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
38#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
39#define CLKCTRL_IDLEST_DISABLED 0x3
40
41/* These timeouts are in us */
42#define OMAP4_MAX_MODULE_READY_TIME 2000
43#define OMAP4_MAX_MODULE_DISABLE_TIME 5000
44
45static bool _early_timeout = true;
46
47struct omap_clkctrl_provider {
48 void __iomem *base;
49 struct list_head clocks;
ddfb183e 50 char *clkdm_name;
88a17252
TK
51};
52
53struct omap_clkctrl_clk {
54 struct clk_hw *clk;
55 u16 reg_offset;
56 int bit_offset;
57 struct list_head node;
58};
59
60union omap4_timeout {
61 u32 cycles;
62 ktime_t start;
63};
64
65static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
66 { 0 },
67};
68
69static u32 _omap4_idlest(u32 val)
70{
71 val &= OMAP4_IDLEST_MASK;
72 val >>= OMAP4_IDLEST_SHIFT;
73
74 return val;
75}
76
77static bool _omap4_is_idle(u32 val)
78{
79 val = _omap4_idlest(val);
80
81 return val == CLKCTRL_IDLEST_DISABLED;
82}
83
84static bool _omap4_is_ready(u32 val)
85{
86 val = _omap4_idlest(val);
87
88 return val == CLKCTRL_IDLEST_FUNCTIONAL ||
89 val == CLKCTRL_IDLEST_INTERFACE_IDLE;
90}
91
92static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
93{
3d8598fb
TK
94 /*
95 * There are two special cases where ktime_to_ns() can't be
96 * used to track the timeouts. First one is during early boot
97 * when the timers haven't been initialized yet. The second
98 * one is during suspend-resume cycle while timekeeping is
99 * being suspended / resumed. Clocksource for the system
100 * can be from a timer that requires pm_runtime access, which
101 * will eventually bring us here with timekeeping_suspended,
102 * during both suspend entry and resume paths. This happens
103 * at least on am43xx platform.
104 */
105 if (unlikely(_early_timeout || timekeeping_suspended)) {
88a17252
TK
106 if (time->cycles++ < timeout) {
107 udelay(1);
108 return false;
109 }
110 } else {
111 if (!ktime_to_ns(time->start)) {
112 time->start = ktime_get();
113 return false;
114 }
115
116 if (ktime_us_delta(ktime_get(), time->start) < timeout) {
117 cpu_relax();
118 return false;
119 }
120 }
121
122 return true;
123}
124
125static int __init _omap4_disable_early_timeout(void)
126{
127 _early_timeout = false;
128
129 return 0;
130}
131arch_initcall(_omap4_disable_early_timeout);
132
133static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
134{
135 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
136 u32 val;
137 int ret;
138 union omap4_timeout timeout = { 0 };
139
140 if (!clk->enable_bit)
141 return 0;
142
143 if (clk->clkdm) {
144 ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
145 if (ret) {
146 WARN(1,
147 "%s: could not enable %s's clockdomain %s: %d\n",
148 __func__, clk_hw_get_name(hw),
149 clk->clkdm_name, ret);
150 return ret;
151 }
152 }
153
154 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
155
156 val &= ~OMAP4_MODULEMODE_MASK;
157 val |= clk->enable_bit;
158
159 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
160
161 if (clk->flags & NO_IDLEST)
162 return 0;
163
164 /* Wait until module is enabled */
165 while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
166 if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
167 pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
168 return -EBUSY;
169 }
170 }
171
172 return 0;
173}
174
175static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
176{
177 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
178 u32 val;
179 union omap4_timeout timeout = { 0 };
180
181 if (!clk->enable_bit)
182 return;
183
184 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
185
186 val &= ~OMAP4_MODULEMODE_MASK;
187
188 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
189
190 if (clk->flags & NO_IDLEST)
191 goto exit;
192
193 /* Wait until module is disabled */
194 while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
195 if (_omap4_is_timeout(&timeout,
196 OMAP4_MAX_MODULE_DISABLE_TIME)) {
197 pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
198 break;
199 }
200 }
201
202exit:
203 if (clk->clkdm)
204 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
205}
206
207static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
208{
209 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
210 u32 val;
211
212 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
213
214 if (val & clk->enable_bit)
215 return 1;
216
217 return 0;
218}
219
220static const struct clk_ops omap4_clkctrl_clk_ops = {
221 .enable = _omap4_clkctrl_clk_enable,
222 .disable = _omap4_clkctrl_clk_disable,
223 .is_enabled = _omap4_clkctrl_clk_is_enabled,
ddfb183e 224 .init = omap2_init_clk_clkdm,
88a17252
TK
225};
226
227static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
228 void *data)
229{
230 struct omap_clkctrl_provider *provider = data;
231 struct omap_clkctrl_clk *entry;
232
233 if (clkspec->args_count != 2)
234 return ERR_PTR(-EINVAL);
235
236 pr_debug("%s: looking for %x:%x\n", __func__,
237 clkspec->args[0], clkspec->args[1]);
238
239 list_for_each_entry(entry, &provider->clocks, node) {
240 if (entry->reg_offset == clkspec->args[0] &&
241 entry->bit_offset == clkspec->args[1])
242 break;
243 }
244
245 if (!entry)
246 return ERR_PTR(-EINVAL);
247
248 return entry->clk;
249}
250
251static int __init
252_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
253 struct device_node *node, struct clk_hw *clk_hw,
254 u16 offset, u8 bit, const char * const *parents,
255 int num_parents, const struct clk_ops *ops)
256{
257 struct clk_init_data init = { NULL };
258 struct clk *clk;
259 struct omap_clkctrl_clk *clkctrl_clk;
260 int ret = 0;
261
262 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", node->parent->name,
263 node->name, offset, bit);
264 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
265 if (!init.name || !clkctrl_clk) {
266 ret = -ENOMEM;
267 goto cleanup;
268 }
269
270 clk_hw->init = &init;
271 init.parent_names = parents;
272 init.num_parents = num_parents;
273 init.ops = ops;
274 init.flags = CLK_IS_BASIC;
275
276 clk = ti_clk_register(NULL, clk_hw, init.name);
277 if (IS_ERR_OR_NULL(clk)) {
278 ret = -EINVAL;
279 goto cleanup;
280 }
281
282 clkctrl_clk->reg_offset = offset;
283 clkctrl_clk->bit_offset = bit;
284 clkctrl_clk->clk = clk_hw;
285
286 list_add(&clkctrl_clk->node, &provider->clocks);
287
288 return 0;
289
290cleanup:
291 kfree(init.name);
292 kfree(clkctrl_clk);
293 return ret;
294}
295
296static void __init
297_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
298 struct device_node *node, u16 offset,
299 const struct omap_clkctrl_bit_data *data,
300 void __iomem *reg)
301{
302 struct clk_hw_omap *clk_hw;
303
304 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
305 if (!clk_hw)
306 return;
307
308 clk_hw->enable_bit = data->bit;
309 clk_hw->enable_reg.ptr = reg;
310
311 if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
312 data->bit, data->parents, 1,
313 &omap_gate_clk_ops))
314 kfree(clk_hw);
315}
316
317static void __init
318_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
319 struct device_node *node, u16 offset,
320 const struct omap_clkctrl_bit_data *data,
321 void __iomem *reg)
322{
323 struct clk_omap_mux *mux;
324 int num_parents = 0;
325 const char * const *pname;
326
327 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
328 if (!mux)
329 return;
330
331 pname = data->parents;
332 while (*pname) {
333 num_parents++;
334 pname++;
335 }
336
337 mux->mask = num_parents;
49eec6fb
TK
338 if (!(mux->flags & CLK_MUX_INDEX_ONE))
339 mux->mask--;
340
88a17252
TK
341 mux->mask = (1 << fls(mux->mask)) - 1;
342
343 mux->shift = data->bit;
344 mux->reg.ptr = reg;
345
346 if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
347 data->bit, data->parents, num_parents,
348 &ti_clk_mux_ops))
349 kfree(mux);
350}
351
352static void __init
353_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
354 struct device_node *node, u16 offset,
355 const struct omap_clkctrl_bit_data *data,
356 void __iomem *reg)
357{
358 struct clk_omap_divider *div;
359 const struct omap_clkctrl_div_data *div_data = data->data;
49eec6fb 360 u8 div_flags = 0;
88a17252
TK
361
362 div = kzalloc(sizeof(*div), GFP_KERNEL);
363 if (!div)
364 return;
365
366 div->reg.ptr = reg;
367 div->shift = data->bit;
49eec6fb
TK
368 div->flags = div_data->flags;
369
370 if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
371 div_flags |= CLKF_INDEX_POWER_OF_TWO;
88a17252 372
49eec6fb
TK
373 if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
374 div_data->max_div, div_flags,
88a17252 375 &div->width, &div->table)) {
c2c296c3
TK
376 pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
377 node, offset, data->bit);
88a17252
TK
378 kfree(div);
379 return;
380 }
381
382 if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
383 data->bit, data->parents, 1,
384 &ti_clk_divider_ops))
385 kfree(div);
386}
387
388static void __init
389_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
390 struct device_node *node,
391 const struct omap_clkctrl_reg_data *data,
392 void __iomem *reg)
393{
394 const struct omap_clkctrl_bit_data *bits = data->bit_data;
395
396 if (!bits)
397 return;
398
399 while (bits->bit) {
400 switch (bits->type) {
401 case TI_CLK_GATE:
402 _ti_clkctrl_setup_gate(provider, node, data->offset,
403 bits, reg);
404 break;
405
406 case TI_CLK_DIVIDER:
407 _ti_clkctrl_setup_div(provider, node, data->offset,
408 bits, reg);
409 break;
410
411 case TI_CLK_MUX:
412 _ti_clkctrl_setup_mux(provider, node, data->offset,
413 bits, reg);
414 break;
415
416 default:
417 pr_err("%s: bad subclk type: %d\n", __func__,
418 bits->type);
419 return;
420 }
421 bits++;
422 }
423}
424
729e13bf
TK
425static void __init _clkctrl_add_provider(void *data,
426 struct device_node *np)
427{
428 of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
429}
430
88a17252
TK
431static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
432{
433 struct omap_clkctrl_provider *provider;
434 const struct omap_clkctrl_data *data = default_clkctrl_data;
435 const struct omap_clkctrl_reg_data *reg_data;
436 struct clk_init_data init = { NULL };
437 struct clk_hw_omap *hw;
438 struct clk *clk;
439 struct omap_clkctrl_clk *clkctrl_clk;
440 const __be32 *addrp;
441 u32 addr;
729e13bf 442 int ret;
88a17252
TK
443
444 addrp = of_get_address(node, 0, NULL, NULL);
445 addr = (u32)of_translate_address(node, addrp);
446
1c881b5a
TK
447#ifdef CONFIG_ARCH_OMAP4
448 if (of_machine_is_compatible("ti,omap4"))
449 data = omap4_clkctrl_data;
450#endif
0ad902f6
TK
451#ifdef CONFIG_SOC_OMAP5
452 if (of_machine_is_compatible("ti,omap5"))
453 data = omap5_clkctrl_data;
454#endif
24d504a3
TK
455#ifdef CONFIG_SOC_DRA7XX
456 if (of_machine_is_compatible("ti,dra7"))
457 data = dra7_clkctrl_data;
458#endif
df54bfc5
TK
459#ifdef CONFIG_SOC_AM33XX
460 if (of_machine_is_compatible("ti,am33xx"))
461 data = am3_clkctrl_data;
462#endif
a3da10b7
TK
463#ifdef CONFIG_SOC_AM43XX
464 if (of_machine_is_compatible("ti,am4372"))
465 data = am4_clkctrl_data;
466 if (of_machine_is_compatible("ti,am438x"))
467 data = am438x_clkctrl_data;
468#endif
26ca2e97
TK
469#ifdef CONFIG_SOC_TI81XX
470 if (of_machine_is_compatible("ti,dm814"))
471 data = dm814_clkctrl_data;
50ef5089
TK
472
473 if (of_machine_is_compatible("ti,dm816"))
474 data = dm816_clkctrl_data;
26ca2e97 475#endif
1c881b5a 476
88a17252
TK
477 while (data->addr) {
478 if (addr == data->addr)
479 break;
480
481 data++;
482 }
483
484 if (!data->addr) {
c2c296c3 485 pr_err("%pOF not found from clkctrl data.\n", node);
88a17252
TK
486 return;
487 }
488
489 provider = kzalloc(sizeof(*provider), GFP_KERNEL);
490 if (!provider)
491 return;
492
493 provider->base = of_iomap(node, 0);
494
ddfb183e
TK
495 provider->clkdm_name = kmalloc(strlen(node->parent->name) + 3,
496 GFP_KERNEL);
497 if (!provider->clkdm_name) {
498 kfree(provider);
499 return;
500 }
501
502 /*
503 * Create default clkdm name, replace _cm from end of parent node
504 * name with _clkdm
505 */
506 strcpy(provider->clkdm_name, node->parent->name);
507 provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
508 strcat(provider->clkdm_name, "clkdm");
509
88a17252
TK
510 INIT_LIST_HEAD(&provider->clocks);
511
512 /* Generate clocks */
513 reg_data = data->regs;
514
515 while (reg_data->parent) {
516 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
517 if (!hw)
518 return;
519
520 hw->enable_reg.ptr = provider->base + reg_data->offset;
521
522 _ti_clkctrl_setup_subclks(provider, node, reg_data,
523 hw->enable_reg.ptr);
524
525 if (reg_data->flags & CLKF_SW_SUP)
526 hw->enable_bit = MODULEMODE_SWCTRL;
527 if (reg_data->flags & CLKF_HW_SUP)
528 hw->enable_bit = MODULEMODE_HWCTRL;
529 if (reg_data->flags & CLKF_NO_IDLEST)
530 hw->flags |= NO_IDLEST;
531
ddfb183e
TK
532 if (reg_data->clkdm_name)
533 hw->clkdm_name = reg_data->clkdm_name;
534 else
535 hw->clkdm_name = provider->clkdm_name;
536
88a17252
TK
537 init.parent_names = &reg_data->parent;
538 init.num_parents = 1;
539 init.flags = 0;
540 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
541 node->parent->name, node->name,
542 reg_data->offset, 0);
543 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
544 if (!init.name || !clkctrl_clk)
545 goto cleanup;
546
547 init.ops = &omap4_clkctrl_clk_ops;
548 hw->hw.init = &init;
549
550 clk = ti_clk_register(NULL, &hw->hw, init.name);
551 if (IS_ERR_OR_NULL(clk))
552 goto cleanup;
553
554 clkctrl_clk->reg_offset = reg_data->offset;
555 clkctrl_clk->clk = &hw->hw;
556
557 list_add(&clkctrl_clk->node, &provider->clocks);
558
559 reg_data++;
560 }
561
729e13bf
TK
562 ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
563 if (ret == -EPROBE_DEFER)
564 ti_clk_retry_init(node, provider, _clkctrl_add_provider);
565
88a17252
TK
566 return;
567
568cleanup:
569 kfree(hw);
570 kfree(init.name);
571 kfree(clkctrl_clk);
572}
573CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
574 _ti_omap4_clkctrl_setup);