Commit | Line | Data |
---|---|---|
253b0887 PM |
1 | #include <linux/clk.h> |
2 | #include <linux/compiler.h> | |
a1153e27 | 3 | #include <linux/bootmem.h> |
6881e8bf | 4 | #include <linux/io.h> |
253b0887 PM |
5 | #include <asm/clock.h> |
6 | ||
6881e8bf MD |
7 | static int sh_clk_mstp32_enable(struct clk *clk) |
8 | { | |
9 | __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit), | |
10 | clk->enable_reg); | |
11 | return 0; | |
12 | } | |
13 | ||
14 | static void sh_clk_mstp32_disable(struct clk *clk) | |
15 | { | |
16 | __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit), | |
17 | clk->enable_reg); | |
18 | } | |
19 | ||
20 | static struct clk_ops sh_clk_mstp32_clk_ops = { | |
21 | .enable = sh_clk_mstp32_enable, | |
22 | .disable = sh_clk_mstp32_disable, | |
23 | .recalc = followparent_recalc, | |
24 | }; | |
25 | ||
26 | int __init sh_clk_mstp32_register(struct clk *clks, int nr) | |
27 | { | |
28 | struct clk *clkp; | |
29 | int ret = 0; | |
30 | int k; | |
31 | ||
32 | for (k = 0; !ret && (k < nr); k++) { | |
33 | clkp = clks + k; | |
34 | clkp->ops = &sh_clk_mstp32_clk_ops; | |
35 | ret |= clk_register(clkp); | |
36 | } | |
37 | ||
38 | return ret; | |
39 | } | |
40 | ||
2693e274 MD |
41 | static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) |
42 | { | |
43 | return clk_rate_table_round(clk, clk->freq_table, rate); | |
44 | } | |
45 | ||
46 | static int sh_clk_div6_divisors[64] = { | |
47 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, | |
48 | 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, | |
49 | 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, | |
50 | 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 | |
51 | }; | |
52 | ||
53 | static struct clk_div_mult_table sh_clk_div6_table = { | |
54 | .divisors = sh_clk_div6_divisors, | |
55 | .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), | |
56 | }; | |
57 | ||
58 | static unsigned long sh_clk_div6_recalc(struct clk *clk) | |
59 | { | |
60 | struct clk_div_mult_table *table = &sh_clk_div6_table; | |
61 | unsigned int idx; | |
62 | ||
63 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, | |
64 | table, NULL); | |
65 | ||
66 | idx = __raw_readl(clk->enable_reg) & 0x003f; | |
67 | ||
68 | return clk->freq_table[idx].frequency; | |
69 | } | |
70 | ||
098dee99 MD |
71 | static int sh_clk_div6_set_rate(struct clk *clk, |
72 | unsigned long rate, int algo_id) | |
73 | { | |
74 | unsigned long value; | |
75 | int idx; | |
76 | ||
77 | idx = clk_rate_table_find(clk, clk->freq_table, rate); | |
78 | if (idx < 0) | |
79 | return idx; | |
80 | ||
81 | value = __raw_readl(clk->enable_reg); | |
82 | value &= ~0x3f; | |
83 | value |= idx; | |
84 | __raw_writel(value, clk->enable_reg); | |
85 | return 0; | |
86 | } | |
87 | ||
88 | static int sh_clk_div6_enable(struct clk *clk) | |
89 | { | |
90 | unsigned long value; | |
91 | int ret; | |
92 | ||
93 | ret = sh_clk_div6_set_rate(clk, clk->rate, 0); | |
94 | if (ret == 0) { | |
95 | value = __raw_readl(clk->enable_reg); | |
96 | value &= ~0x100; /* clear stop bit to enable clock */ | |
97 | __raw_writel(value, clk->enable_reg); | |
98 | } | |
99 | return ret; | |
100 | } | |
101 | ||
102 | static void sh_clk_div6_disable(struct clk *clk) | |
103 | { | |
104 | unsigned long value; | |
105 | ||
106 | value = __raw_readl(clk->enable_reg); | |
107 | value |= 0x100; /* stop clock */ | |
108 | value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */ | |
109 | __raw_writel(value, clk->enable_reg); | |
110 | } | |
111 | ||
2693e274 MD |
112 | static struct clk_ops sh_clk_div6_clk_ops = { |
113 | .recalc = sh_clk_div6_recalc, | |
114 | .round_rate = sh_clk_div_round_rate, | |
098dee99 MD |
115 | .set_rate = sh_clk_div6_set_rate, |
116 | .enable = sh_clk_div6_enable, | |
117 | .disable = sh_clk_div6_disable, | |
2693e274 MD |
118 | }; |
119 | ||
120 | int __init sh_clk_div6_register(struct clk *clks, int nr) | |
121 | { | |
122 | struct clk *clkp; | |
123 | void *freq_table; | |
124 | int nr_divs = sh_clk_div6_table.nr_divisors; | |
125 | int freq_table_size = sizeof(struct cpufreq_frequency_table); | |
126 | int ret = 0; | |
127 | int k; | |
128 | ||
129 | freq_table_size *= (nr_divs + 1); | |
130 | ||
131 | freq_table = alloc_bootmem(freq_table_size * nr); | |
132 | if (!freq_table) | |
133 | return -ENOMEM; | |
134 | ||
135 | for (k = 0; !ret && (k < nr); k++) { | |
136 | clkp = clks + k; | |
137 | ||
138 | clkp->ops = &sh_clk_div6_clk_ops; | |
139 | clkp->id = -1; | |
140 | clkp->freq_table = freq_table + (k * freq_table_size); | |
141 | clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; | |
142 | ||
143 | ret = clk_register(clkp); | |
144 | } | |
145 | ||
146 | return ret; | |
147 | } | |
148 | ||
a1153e27 MD |
149 | static unsigned long sh_clk_div4_recalc(struct clk *clk) |
150 | { | |
151 | struct clk_div_mult_table *table = clk->priv; | |
152 | unsigned int idx; | |
153 | ||
154 | clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, | |
155 | table, &clk->arch_flags); | |
156 | ||
157 | idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f; | |
158 | ||
159 | return clk->freq_table[idx].frequency; | |
160 | } | |
161 | ||
a1153e27 MD |
162 | static struct clk_ops sh_clk_div4_clk_ops = { |
163 | .recalc = sh_clk_div4_recalc, | |
2693e274 | 164 | .round_rate = sh_clk_div_round_rate, |
a1153e27 MD |
165 | }; |
166 | ||
167 | int __init sh_clk_div4_register(struct clk *clks, int nr, | |
168 | struct clk_div_mult_table *table) | |
169 | { | |
170 | struct clk *clkp; | |
171 | void *freq_table; | |
172 | int nr_divs = table->nr_divisors; | |
173 | int freq_table_size = sizeof(struct cpufreq_frequency_table); | |
174 | int ret = 0; | |
175 | int k; | |
176 | ||
a50de78d MD |
177 | freq_table_size *= (nr_divs + 1); |
178 | ||
179 | freq_table = alloc_bootmem(freq_table_size * nr); | |
a1153e27 MD |
180 | if (!freq_table) |
181 | return -ENOMEM; | |
182 | ||
183 | for (k = 0; !ret && (k < nr); k++) { | |
184 | clkp = clks + k; | |
185 | ||
186 | clkp->ops = &sh_clk_div4_clk_ops; | |
187 | clkp->id = -1; | |
188 | clkp->priv = table; | |
189 | ||
190 | clkp->freq_table = freq_table + (k * freq_table_size); | |
191 | clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; | |
192 | ||
193 | ret = clk_register(clkp); | |
194 | } | |
195 | ||
196 | return ret; | |
197 | } | |
198 | ||
36aa1e32 | 199 | #ifdef CONFIG_SH_CLK_CPG_LEGACY |
253b0887 PM |
200 | static struct clk master_clk = { |
201 | .name = "master_clk", | |
202 | .flags = CLK_ENABLE_ON_INIT, | |
203 | .rate = CONFIG_SH_PCLK_FREQ, | |
204 | }; | |
205 | ||
206 | static struct clk peripheral_clk = { | |
207 | .name = "peripheral_clk", | |
208 | .parent = &master_clk, | |
209 | .flags = CLK_ENABLE_ON_INIT, | |
210 | }; | |
211 | ||
212 | static struct clk bus_clk = { | |
213 | .name = "bus_clk", | |
214 | .parent = &master_clk, | |
215 | .flags = CLK_ENABLE_ON_INIT, | |
216 | }; | |
217 | ||
218 | static struct clk cpu_clk = { | |
219 | .name = "cpu_clk", | |
220 | .parent = &master_clk, | |
221 | .flags = CLK_ENABLE_ON_INIT, | |
222 | }; | |
223 | ||
224 | /* | |
225 | * The ordering of these clocks matters, do not change it. | |
226 | */ | |
227 | static struct clk *onchip_clocks[] = { | |
228 | &master_clk, | |
229 | &peripheral_clk, | |
230 | &bus_clk, | |
231 | &cpu_clk, | |
232 | }; | |
233 | ||
234 | int __init __deprecated cpg_clk_init(void) | |
235 | { | |
236 | int i, ret = 0; | |
237 | ||
238 | for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { | |
239 | struct clk *clk = onchip_clocks[i]; | |
240 | arch_init_clk_ops(&clk->ops, i); | |
241 | if (clk->ops) | |
242 | ret |= clk_register(clk); | |
243 | } | |
244 | ||
245 | return ret; | |
246 | } | |
247 | ||
248 | /* | |
249 | * Placeholder for compatability, until the lazy CPUs do this | |
250 | * on their own. | |
251 | */ | |
252 | int __init __weak arch_clk_init(void) | |
253 | { | |
254 | return cpg_clk_init(); | |
255 | } | |
36aa1e32 | 256 | #endif /* CONFIG_SH_CPG_CLK_LEGACY */ |