clk: stm32mp13: add composite clock
[linux-block.git] / drivers / clk / stm32 / clk-stm32-core.c
CommitLineData
637cee5f
GF
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) STMicroelectronics 2022 - All Rights Reserved
4 * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
5 */
6
7#include <linux/clk.h>
8#include <linux/delay.h>
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/io.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16
17#include "clk-stm32-core.h"
18#include "reset-stm32.h"
19
20static DEFINE_SPINLOCK(rlock);
21
22static int stm32_rcc_clock_init(struct device *dev,
23 const struct of_device_id *match,
24 void __iomem *base)
25{
26 const struct stm32_rcc_match_data *data = match->data;
27 struct clk_hw_onecell_data *clk_data = data->hw_clks;
28 struct device_node *np = dev_of_node(dev);
29 struct clk_hw **hws;
30 int n, max_binding;
31
32 max_binding = data->maxbinding;
33
34 clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, max_binding), GFP_KERNEL);
35 if (!clk_data)
36 return -ENOMEM;
37
38 clk_data->num = max_binding;
39
40 hws = clk_data->hws;
41
42 for (n = 0; n < max_binding; n++)
43 hws[n] = ERR_PTR(-ENOENT);
44
45 for (n = 0; n < data->num_clocks; n++) {
46 const struct clock_config *cfg_clock = &data->tab_clocks[n];
47 struct clk_hw *hw = ERR_PTR(-ENOENT);
48
49 if (cfg_clock->func)
50 hw = (*cfg_clock->func)(dev, data, base, &rlock,
51 cfg_clock);
52
53 if (IS_ERR(hw)) {
54 dev_err(dev, "Can't register clk %d: %ld\n", n,
55 PTR_ERR(hw));
56 return PTR_ERR(hw);
57 }
58
59 if (cfg_clock->id != NO_ID)
60 hws[cfg_clock->id] = hw;
61 }
62
63 return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
64}
65
66int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
67 void __iomem *base)
68{
69 const struct of_device_id *match;
70 int err;
71
72 match = of_match_node(match_data, dev_of_node(dev));
73 if (!match) {
74 dev_err(dev, "match data not found\n");
75 return -ENODEV;
76 }
77
78 /* RCC Reset Configuration */
79 err = stm32_rcc_reset_init(dev, match, base);
80 if (err) {
81 pr_err("stm32 reset failed to initialize\n");
82 return err;
83 }
84
85 /* RCC Clock Configuration */
86 err = stm32_rcc_clock_init(dev, match, base);
87 if (err) {
88 pr_err("stm32 clock failed to initialize\n");
89 return err;
90 }
91
92 return 0;
93}
f95cea83
GF
94
95static u8 stm32_mux_get_parent(void __iomem *base,
96 struct clk_stm32_clock_data *data,
97 u16 mux_id)
98{
99 const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
100 u32 mask = BIT(mux->width) - 1;
101 u32 val;
102
103 val = readl(base + mux->offset) >> mux->shift;
104 val &= mask;
105
106 return val;
107}
108
109static int stm32_mux_set_parent(void __iomem *base,
110 struct clk_stm32_clock_data *data,
111 u16 mux_id, u8 index)
112{
113 const struct stm32_mux_cfg *mux = &data->muxes[mux_id];
114
115 u32 mask = BIT(mux->width) - 1;
116 u32 reg = readl(base + mux->offset);
117 u32 val = index << mux->shift;
118
119 reg &= ~(mask << mux->shift);
120 reg |= val;
121
122 writel(reg, base + mux->offset);
123
124 return 0;
125}
126
95f5e0a4
GF
127static void stm32_gate_endisable(void __iomem *base,
128 struct clk_stm32_clock_data *data,
129 u16 gate_id, int enable)
130{
131 const struct stm32_gate_cfg *gate = &data->gates[gate_id];
132 void __iomem *addr = base + gate->offset;
133
134 if (enable) {
135 if (data->gate_cpt[gate_id]++ > 0)
136 return;
137
138 if (gate->set_clr != 0)
139 writel(BIT(gate->bit_idx), addr);
140 else
141 writel(readl(addr) | BIT(gate->bit_idx), addr);
142 } else {
143 if (--data->gate_cpt[gate_id] > 0)
144 return;
145
146 if (gate->set_clr != 0)
147 writel(BIT(gate->bit_idx), addr + gate->set_clr);
148 else
149 writel(readl(addr) & ~BIT(gate->bit_idx), addr);
150 }
151}
152
153static void stm32_gate_disable_unused(void __iomem *base,
154 struct clk_stm32_clock_data *data,
155 u16 gate_id)
156{
157 const struct stm32_gate_cfg *gate = &data->gates[gate_id];
158 void __iomem *addr = base + gate->offset;
159
160 if (data->gate_cpt[gate_id] > 0)
161 return;
162
163 if (gate->set_clr != 0)
164 writel(BIT(gate->bit_idx), addr + gate->set_clr);
165 else
166 writel(readl(addr) & ~BIT(gate->bit_idx), addr);
167}
168
169static int stm32_gate_is_enabled(void __iomem *base,
170 struct clk_stm32_clock_data *data,
171 u16 gate_id)
172{
173 const struct stm32_gate_cfg *gate = &data->gates[gate_id];
174
175 return (readl(base + gate->offset) & BIT(gate->bit_idx)) != 0;
176}
177
720e34ab
GF
178static unsigned int _get_table_div(const struct clk_div_table *table,
179 unsigned int val)
180{
181 const struct clk_div_table *clkt;
182
183 for (clkt = table; clkt->div; clkt++)
184 if (clkt->val == val)
185 return clkt->div;
186 return 0;
187}
188
189static unsigned int _get_div(const struct clk_div_table *table,
190 unsigned int val, unsigned long flags, u8 width)
191{
192 if (flags & CLK_DIVIDER_ONE_BASED)
193 return val;
194 if (flags & CLK_DIVIDER_POWER_OF_TWO)
195 return 1 << val;
196 if (table)
197 return _get_table_div(table, val);
198 return val + 1;
199}
200
201static unsigned long stm32_divider_get_rate(void __iomem *base,
202 struct clk_stm32_clock_data *data,
203 u16 div_id,
204 unsigned long parent_rate)
205{
206 const struct stm32_div_cfg *divider = &data->dividers[div_id];
207 unsigned int val;
208 unsigned int div;
209
210 val = readl(base + divider->offset) >> divider->shift;
211 val &= clk_div_mask(divider->width);
212 div = _get_div(divider->table, val, divider->flags, divider->width);
213
214 if (!div) {
215 WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
216 "%d: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
217 div_id);
218 return parent_rate;
219 }
220
221 return DIV_ROUND_UP_ULL((u64)parent_rate, div);
222}
223
224static int stm32_divider_set_rate(void __iomem *base,
225 struct clk_stm32_clock_data *data,
226 u16 div_id, unsigned long rate,
227 unsigned long parent_rate)
228{
229 const struct stm32_div_cfg *divider = &data->dividers[div_id];
230 int value;
231 u32 val;
232
233 value = divider_get_val(rate, parent_rate, divider->table,
234 divider->width, divider->flags);
235 if (value < 0)
236 return value;
237
238 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
239 val = clk_div_mask(divider->width) << (divider->shift + 16);
240 } else {
241 val = readl(base + divider->offset);
242 val &= ~(clk_div_mask(divider->width) << divider->shift);
243 }
244
245 val |= (u32)value << divider->shift;
246
247 writel(val, base + divider->offset);
248
249 return 0;
250}
251
f95cea83
GF
252static u8 clk_stm32_mux_get_parent(struct clk_hw *hw)
253{
254 struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
255
256 return stm32_mux_get_parent(mux->base, mux->clock_data, mux->mux_id);
257}
258
259static int clk_stm32_mux_set_parent(struct clk_hw *hw, u8 index)
260{
261 struct clk_stm32_mux *mux = to_clk_stm32_mux(hw);
262 unsigned long flags = 0;
263
264 spin_lock_irqsave(mux->lock, flags);
265
266 stm32_mux_set_parent(mux->base, mux->clock_data, mux->mux_id, index);
267
268 spin_unlock_irqrestore(mux->lock, flags);
269
270 return 0;
271}
272
273const struct clk_ops clk_stm32_mux_ops = {
274 .get_parent = clk_stm32_mux_get_parent,
275 .set_parent = clk_stm32_mux_set_parent,
276};
277
95f5e0a4
GF
278static void clk_stm32_gate_endisable(struct clk_hw *hw, int enable)
279{
280 struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
281 unsigned long flags = 0;
282
283 spin_lock_irqsave(gate->lock, flags);
284
285 stm32_gate_endisable(gate->base, gate->clock_data, gate->gate_id, enable);
286
287 spin_unlock_irqrestore(gate->lock, flags);
288}
289
290static int clk_stm32_gate_enable(struct clk_hw *hw)
291{
292 clk_stm32_gate_endisable(hw, 1);
293
294 return 0;
295}
296
297static void clk_stm32_gate_disable(struct clk_hw *hw)
298{
299 clk_stm32_gate_endisable(hw, 0);
300}
301
302static int clk_stm32_gate_is_enabled(struct clk_hw *hw)
303{
304 struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
305
306 return stm32_gate_is_enabled(gate->base, gate->clock_data, gate->gate_id);
307}
308
309static void clk_stm32_gate_disable_unused(struct clk_hw *hw)
310{
311 struct clk_stm32_gate *gate = to_clk_stm32_gate(hw);
312 unsigned long flags = 0;
313
314 spin_lock_irqsave(gate->lock, flags);
315
316 stm32_gate_disable_unused(gate->base, gate->clock_data, gate->gate_id);
317
318 spin_unlock_irqrestore(gate->lock, flags);
319}
320
321const struct clk_ops clk_stm32_gate_ops = {
322 .enable = clk_stm32_gate_enable,
323 .disable = clk_stm32_gate_disable,
324 .is_enabled = clk_stm32_gate_is_enabled,
325 .disable_unused = clk_stm32_gate_disable_unused,
326};
327
720e34ab
GF
328static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
329 unsigned long parent_rate)
330{
331 struct clk_stm32_div *div = to_clk_stm32_divider(hw);
332 unsigned long flags = 0;
333 int ret;
334
335 if (div->div_id == NO_STM32_DIV)
336 return rate;
337
338 spin_lock_irqsave(div->lock, flags);
339
340 ret = stm32_divider_set_rate(div->base, div->clock_data, div->div_id, rate, parent_rate);
341
342 spin_unlock_irqrestore(div->lock, flags);
343
344 return ret;
345}
346
347static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
348 unsigned long *prate)
349{
350 struct clk_stm32_div *div = to_clk_stm32_divider(hw);
351 const struct stm32_div_cfg *divider;
352
353 if (div->div_id == NO_STM32_DIV)
354 return rate;
355
356 divider = &div->clock_data->dividers[div->div_id];
357
358 /* if read only, just return current value */
359 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
360 u32 val;
361
362 val = readl(div->base + divider->offset) >> divider->shift;
363 val &= clk_div_mask(divider->width);
364
365 return divider_ro_round_rate(hw, rate, prate, divider->table,
366 divider->width, divider->flags,
367 val);
368 }
369
370 return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
371 rate, prate, divider->table,
372 divider->width, divider->flags);
373}
374
375static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
376 unsigned long parent_rate)
377{
378 struct clk_stm32_div *div = to_clk_stm32_divider(hw);
379
380 if (div->div_id == NO_STM32_DIV)
381 return parent_rate;
382
383 return stm32_divider_get_rate(div->base, div->clock_data, div->div_id, parent_rate);
384}
385
386const struct clk_ops clk_stm32_divider_ops = {
387 .recalc_rate = clk_stm32_divider_recalc_rate,
388 .round_rate = clk_stm32_divider_round_rate,
389 .set_rate = clk_stm32_divider_set_rate,
390};
391
5f0d4721
GF
392static int clk_stm32_composite_set_rate(struct clk_hw *hw, unsigned long rate,
393 unsigned long parent_rate)
394{
395 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
396 unsigned long flags = 0;
397 int ret;
398
399 if (composite->div_id == NO_STM32_DIV)
400 return rate;
401
402 spin_lock_irqsave(composite->lock, flags);
403
404 ret = stm32_divider_set_rate(composite->base, composite->clock_data,
405 composite->div_id, rate, parent_rate);
406
407 spin_unlock_irqrestore(composite->lock, flags);
408
409 return ret;
410}
411
412static unsigned long clk_stm32_composite_recalc_rate(struct clk_hw *hw,
413 unsigned long parent_rate)
414{
415 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
416
417 if (composite->div_id == NO_STM32_DIV)
418 return parent_rate;
419
420 return stm32_divider_get_rate(composite->base, composite->clock_data,
421 composite->div_id, parent_rate);
422}
423
424static long clk_stm32_composite_round_rate(struct clk_hw *hw, unsigned long rate,
425 unsigned long *prate)
426{
427 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
428
429 const struct stm32_div_cfg *divider;
430
431 if (composite->div_id == NO_STM32_DIV)
432 return rate;
433
434 divider = &composite->clock_data->dividers[composite->div_id];
435
436 /* if read only, just return current value */
437 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
438 u32 val;
439
440 val = readl(composite->base + divider->offset) >> divider->shift;
441 val &= clk_div_mask(divider->width);
442
443 return divider_ro_round_rate(hw, rate, prate, divider->table,
444 divider->width, divider->flags,
445 val);
446 }
447
448 return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
449 rate, prate, divider->table,
450 divider->width, divider->flags);
451}
452
453static u8 clk_stm32_composite_get_parent(struct clk_hw *hw)
454{
455 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
456
457 return stm32_mux_get_parent(composite->base, composite->clock_data, composite->mux_id);
458}
459
460static int clk_stm32_composite_set_parent(struct clk_hw *hw, u8 index)
461{
462 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
463 unsigned long flags = 0;
464
465 spin_lock_irqsave(composite->lock, flags);
466
467 stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, index);
468
469 spin_unlock_irqrestore(composite->lock, flags);
470
471 return 0;
472}
473
474static int clk_stm32_composite_is_enabled(struct clk_hw *hw)
475{
476 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
477
478 if (composite->gate_id == NO_STM32_GATE)
479 return (__clk_get_enable_count(hw->clk) > 0);
480
481 return stm32_gate_is_enabled(composite->base, composite->clock_data, composite->gate_id);
482}
483
484static void clk_stm32_composite_gate_endisable(struct clk_hw *hw, int enable)
485{
486 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
487 unsigned long flags = 0;
488
489 spin_lock_irqsave(composite->lock, flags);
490
491 stm32_gate_endisable(composite->base, composite->clock_data, composite->gate_id, enable);
492
493 spin_unlock_irqrestore(composite->lock, flags);
494}
495
496static int clk_stm32_composite_gate_enable(struct clk_hw *hw)
497{
498 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
499
500 if (composite->gate_id == NO_STM32_GATE)
501 return 0;
502
503 clk_stm32_composite_gate_endisable(hw, 1);
504
505 return 0;
506}
507
508static void clk_stm32_composite_gate_disable(struct clk_hw *hw)
509{
510 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
511
512 if (composite->gate_id == NO_STM32_GATE)
513 return;
514
515 clk_stm32_composite_gate_endisable(hw, 0);
516}
517
518static void clk_stm32_composite_disable_unused(struct clk_hw *hw)
519{
520 struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
521 unsigned long flags = 0;
522
523 if (composite->gate_id == NO_STM32_GATE)
524 return;
525
526 spin_lock_irqsave(composite->lock, flags);
527
528 stm32_gate_disable_unused(composite->base, composite->clock_data, composite->gate_id);
529
530 spin_unlock_irqrestore(composite->lock, flags);
531}
532
533const struct clk_ops clk_stm32_composite_ops = {
534 .set_rate = clk_stm32_composite_set_rate,
535 .recalc_rate = clk_stm32_composite_recalc_rate,
536 .round_rate = clk_stm32_composite_round_rate,
537 .get_parent = clk_stm32_composite_get_parent,
538 .set_parent = clk_stm32_composite_set_parent,
539 .enable = clk_stm32_composite_gate_enable,
540 .disable = clk_stm32_composite_gate_disable,
541 .is_enabled = clk_stm32_composite_is_enabled,
542 .disable_unused = clk_stm32_composite_disable_unused,
543};
544
f95cea83
GF
545struct clk_hw *clk_stm32_mux_register(struct device *dev,
546 const struct stm32_rcc_match_data *data,
547 void __iomem *base,
548 spinlock_t *lock,
549 const struct clock_config *cfg)
550{
551 struct clk_stm32_mux *mux = cfg->clock_cfg;
552 struct clk_hw *hw = &mux->hw;
553 int err;
554
555 mux->base = base;
556 mux->lock = lock;
557 mux->clock_data = data->clock_data;
558
559 err = clk_hw_register(dev, hw);
560 if (err)
561 return ERR_PTR(err);
562
563 return hw;
564}
95f5e0a4
GF
565
566struct clk_hw *clk_stm32_gate_register(struct device *dev,
567 const struct stm32_rcc_match_data *data,
568 void __iomem *base,
569 spinlock_t *lock,
570 const struct clock_config *cfg)
571{
572 struct clk_stm32_gate *gate = cfg->clock_cfg;
573 struct clk_hw *hw = &gate->hw;
574 int err;
575
576 gate->base = base;
577 gate->lock = lock;
578 gate->clock_data = data->clock_data;
579
580 err = clk_hw_register(dev, hw);
581 if (err)
582 return ERR_PTR(err);
583
584 return hw;
585}
720e34ab
GF
586
587struct clk_hw *clk_stm32_div_register(struct device *dev,
588 const struct stm32_rcc_match_data *data,
589 void __iomem *base,
590 spinlock_t *lock,
591 const struct clock_config *cfg)
592{
593 struct clk_stm32_div *div = cfg->clock_cfg;
594 struct clk_hw *hw = &div->hw;
595 int err;
596
597 div->base = base;
598 div->lock = lock;
599 div->clock_data = data->clock_data;
600
601 err = clk_hw_register(dev, hw);
602 if (err)
603 return ERR_PTR(err);
604
605 return hw;
606}
5f0d4721
GF
607
608struct clk_hw *clk_stm32_composite_register(struct device *dev,
609 const struct stm32_rcc_match_data *data,
610 void __iomem *base,
611 spinlock_t *lock,
612 const struct clock_config *cfg)
613{
614 struct clk_stm32_composite *composite = cfg->clock_cfg;
615 struct clk_hw *hw = &composite->hw;
616 int err;
617
618 composite->base = base;
619 composite->lock = lock;
620 composite->clock_data = data->clock_data;
621
622 err = clk_hw_register(dev, hw);
623 if (err)
624 return ERR_PTR(err);
625
626 return hw;
627}