Merge tag 'f2fs-for-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[linux-2.6-block.git] / drivers / clk / mvebu / clk-cpu.c
CommitLineData
c3828949 1// SPDX-License-Identifier: GPL-2.0
ab8ba01b
GC
2/*
3 * Marvell MVEBU CPU clock handling.
4 *
5 * Copyright (C) 2012 Marvell
6 *
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 *
ab8ba01b
GC
9 */
10#include <linux/kernel.h>
db00c3e5
SB
11#include <linux/slab.h>
12#include <linux/clk.h>
ab8ba01b
GC
13#include <linux/clk-provider.h>
14#include <linux/of_address.h>
15#include <linux/io.h>
16#include <linux/of.h>
17#include <linux/delay.h>
ee2d8ea1
TP
18#include <linux/mvebu-pmsu.h>
19#include <asm/smp_plat.h>
ab8ba01b 20
ee2d8ea1
TP
21#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
22#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
23#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
24#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
25#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
26#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
27#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
28
29#define PMU_DFS_RATIO_SHIFT 16
30#define PMU_DFS_RATIO_MASK 0x3F
ab8ba01b
GC
31
32#define MAX_CPU 4
33struct cpu_clk {
34 struct clk_hw hw;
35 int cpu;
36 const char *clk_name;
37 const char *parent_name;
38 void __iomem *reg_base;
ee2d8ea1 39 void __iomem *pmu_dfs;
ab8ba01b
GC
40};
41
42static struct clk **clks;
43
44static struct clk_onecell_data clk_data;
45
46#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
47
48static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
49 unsigned long parent_rate)
50{
51 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
52 u32 reg, div;
53
54 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
55 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
56 return parent_rate / div;
57}
58
59static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
60 unsigned long *parent_rate)
61{
62 /* Valid ratio are 1:1, 1:2 and 1:3 */
63 u32 div;
64
65 div = *parent_rate / rate;
66 if (div == 0)
67 div = 1;
68 else if (div > 3)
69 div = 3;
70
71 return *parent_rate / div;
72}
73
ee2d8ea1
TP
74static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
75 unsigned long parent_rate)
76
ab8ba01b
GC
77{
78 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
79 u32 reg, div;
80 u32 reload_mask;
81
82 div = parent_rate / rate;
83 reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
84 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
85 | (div << (cpuclk->cpu * 8));
86 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
87 /* Set clock divider reload smooth bit mask */
88 reload_mask = 1 << (20 + cpuclk->cpu);
89
90 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
91 | reload_mask;
92 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
93
94 /* Now trigger the clock update */
95 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
96 | 1 << 24;
97 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
98
99 /* Wait for clocks to settle down then clear reload request */
100 udelay(1000);
101 reg &= ~(reload_mask | 1 << 24);
102 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
103 udelay(1000);
104
105 return 0;
106}
107
ee2d8ea1
TP
108static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
109 unsigned long parent_rate)
110{
111 u32 reg;
112 unsigned long fabric_div, target_div, cur_rate;
113 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
114
115 /*
116 * PMU DFS registers are not mapped, Device Tree does not
117 * describes them. We cannot change the frequency dynamically.
118 */
119 if (!cpuclk->pmu_dfs)
120 return -ENODEV;
121
eca61c9f 122 cur_rate = clk_hw_get_rate(hwclk);
ee2d8ea1
TP
123
124 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
125 fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
126 SYS_CTRL_CLK_DIVIDER_MASK;
127
128 /* Frequency is going up */
129 if (rate == 2 * cur_rate)
130 target_div = fabric_div / 2;
131 /* Frequency is going down */
132 else
133 target_div = fabric_div;
134
135 if (target_div == 0)
136 target_div = 1;
137
138 reg = readl(cpuclk->pmu_dfs);
139 reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
140 reg |= (target_div << PMU_DFS_RATIO_SHIFT);
141 writel(reg, cpuclk->pmu_dfs);
142
143 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
144 reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
145 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
146 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
147
148 return mvebu_pmsu_dfs_request(cpuclk->cpu);
149}
150
151static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
152 unsigned long parent_rate)
153{
154 if (__clk_is_enabled(hwclk->clk))
155 return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
156 else
157 return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
158}
159
ab8ba01b
GC
160static const struct clk_ops cpu_ops = {
161 .recalc_rate = clk_cpu_recalc_rate,
162 .round_rate = clk_cpu_round_rate,
163 .set_rate = clk_cpu_set_rate,
164};
165
9ac81751 166static void __init of_cpu_clk_setup(struct device_node *node)
ab8ba01b
GC
167{
168 struct cpu_clk *cpuclk;
169 void __iomem *clock_complex_base = of_iomap(node, 0);
ee2d8ea1 170 void __iomem *pmu_dfs_base = of_iomap(node, 1);
ab8ba01b
GC
171 int ncpus = 0;
172 struct device_node *dn;
173
174 if (clock_complex_base == NULL) {
175 pr_err("%s: clock-complex base register not set\n",
176 __func__);
177 return;
178 }
179
ee2d8ea1
TP
180 if (pmu_dfs_base == NULL)
181 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
182 __func__);
183
76ec23b1 184 for_each_of_cpu_node(dn)
ab8ba01b
GC
185 ncpus++;
186
23826e24 187 cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
ab8ba01b 188 if (WARN_ON(!cpuclk))
f98d007d 189 goto cpuclk_out;
ab8ba01b 190
23826e24 191 clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
ab8ba01b 192 if (WARN_ON(!clks))
d6f620a4 193 goto clks_out;
ab8ba01b 194
76ec23b1 195 for_each_of_cpu_node(dn) {
ab8ba01b
GC
196 struct clk_init_data init;
197 struct clk *clk;
ab8ba01b
GC
198 char *clk_name = kzalloc(5, GFP_KERNEL);
199 int cpu, err;
200
201 if (WARN_ON(!clk_name))
d6f620a4 202 goto bail_out;
ab8ba01b
GC
203
204 err = of_property_read_u32(dn, "reg", &cpu);
205 if (WARN_ON(err))
d6f620a4 206 goto bail_out;
ab8ba01b
GC
207
208 sprintf(clk_name, "cpu%d", cpu);
ab8ba01b 209
61e22fff 210 cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
ab8ba01b
GC
211 cpuclk[cpu].clk_name = clk_name;
212 cpuclk[cpu].cpu = cpu;
213 cpuclk[cpu].reg_base = clock_complex_base;
ee2d8ea1
TP
214 if (pmu_dfs_base)
215 cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
ab8ba01b
GC
216 cpuclk[cpu].hw.init = &init;
217
218 init.name = cpuclk[cpu].clk_name;
219 init.ops = &cpu_ops;
220 init.flags = 0;
221 init.parent_names = &cpuclk[cpu].parent_name;
222 init.num_parents = 1;
223
224 clk = clk_register(NULL, &cpuclk[cpu].hw);
225 if (WARN_ON(IS_ERR(clk)))
226 goto bail_out;
227 clks[cpu] = clk;
228 }
229 clk_data.clk_num = MAX_CPU;
230 clk_data.clks = clks;
231 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
232
233 return;
234bail_out:
235 kfree(clks);
d6f620a4
CD
236 while(ncpus--)
237 kfree(cpuclk[ncpus].clk_name);
238clks_out:
ab8ba01b 239 kfree(cpuclk);
f98d007d
JZ
240cpuclk_out:
241 iounmap(clock_complex_base);
ab8ba01b
GC
242}
243
f640c0fa
JFM
244CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
245 of_cpu_clk_setup);
e120c17a
CP
246
247static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
248{
249 of_clk_add_provider(node, of_clk_src_simple_get, NULL);
250}
251
252CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock",
253 of_mv98dx3236_cpu_clk_setup);