drm/i915: remove HBR2 from chv supported list
[linux-2.6-block.git] / drivers / sh / clk / cpg.c
CommitLineData
de9186c2
PM
1/*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
4d6ddb08 5 * Copyright (C) 2010 - 2012 Paul Mundt
de9186c2
PM
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
fa676ca3
MD
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/sh_clk.h>
16
764f4e4e
PM
17#define CPG_CKSTP_BIT BIT(8)
18
104fa61a 19static unsigned int sh_clk_read(struct clk *clk)
fa676ca3 20{
4d6ddb08 21 if (clk->flags & CLK_ENABLE_REG_8BIT)
104fa61a 22 return ioread8(clk->mapped_reg);
4d6ddb08 23 else if (clk->flags & CLK_ENABLE_REG_16BIT)
104fa61a 24 return ioread16(clk->mapped_reg);
4d6ddb08 25
104fa61a 26 return ioread32(clk->mapped_reg);
fa676ca3
MD
27}
28
104fa61a 29static void sh_clk_write(int value, struct clk *clk)
fa676ca3 30{
4d6ddb08 31 if (clk->flags & CLK_ENABLE_REG_8BIT)
104fa61a 32 iowrite8(value, clk->mapped_reg);
4d6ddb08 33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
104fa61a 34 iowrite16(value, clk->mapped_reg);
4d6ddb08 35 else
104fa61a
PM
36 iowrite32(value, clk->mapped_reg);
37}
38
a028c6da
GL
39static unsigned int r8(const void __iomem *addr)
40{
41 return ioread8(addr);
42}
43
44static unsigned int r16(const void __iomem *addr)
45{
46 return ioread16(addr);
47}
48
49static unsigned int r32(const void __iomem *addr)
50{
51 return ioread32(addr);
52}
53
104fa61a
PM
54static int sh_clk_mstp_enable(struct clk *clk)
55{
56 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
a028c6da
GL
57 if (clk->status_reg) {
58 unsigned int (*read)(const void __iomem *addr);
59 int i;
60 void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
61 (phys_addr_t)clk->enable_reg + clk->mapped_reg;
62
63 if (clk->flags & CLK_ENABLE_REG_8BIT)
64 read = r8;
65 else if (clk->flags & CLK_ENABLE_REG_16BIT)
66 read = r16;
67 else
68 read = r32;
69
70 for (i = 1000;
71 (read(mapped_status) & (1 << clk->enable_bit)) && i;
72 i--)
73 cpu_relax();
74 if (!i) {
75 pr_err("cpg: failed to enable %p[%d]\n",
76 clk->enable_reg, clk->enable_bit);
77 return -ETIMEDOUT;
78 }
79 }
104fa61a
PM
80 return 0;
81}
82
83static void sh_clk_mstp_disable(struct clk *clk)
84{
85 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
fa676ca3
MD
86}
87
4d6ddb08
PM
88static struct sh_clk_ops sh_clk_mstp_clk_ops = {
89 .enable = sh_clk_mstp_enable,
90 .disable = sh_clk_mstp_disable,
fa676ca3
MD
91 .recalc = followparent_recalc,
92};
93
4d6ddb08 94int __init sh_clk_mstp_register(struct clk *clks, int nr)
fa676ca3
MD
95{
96 struct clk *clkp;
97 int ret = 0;
98 int k;
99
100 for (k = 0; !ret && (k < nr); k++) {
101 clkp = clks + k;
4d6ddb08 102 clkp->ops = &sh_clk_mstp_clk_ops;
fa676ca3
MD
103 ret |= clk_register(clkp);
104 }
105
106 return ret;
107}
108
a60977a5
PM
109/*
110 * Div/mult table lookup helpers
111 */
112static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
113{
114 return clk->priv;
115}
116
117static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
118{
119 return clk_to_div_table(clk)->div_mult_table;
120}
121
75f5f8a5
PM
122/*
123 * Common div ops
124 */
125static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
126{
127 return clk_rate_table_round(clk, clk->freq_table, rate);
128}
129
130static unsigned long sh_clk_div_recalc(struct clk *clk)
131{
132 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
133 unsigned int idx;
134
135 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
136 table, clk->arch_flags ? &clk->arch_flags : NULL);
137
138 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
139
140 return clk->freq_table[idx].frequency;
141}
142
0fa22168
PM
143static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
144{
145 struct clk_div_table *dt = clk_to_div_table(clk);
146 unsigned long value;
147 int idx;
148
149 idx = clk_rate_table_find(clk, clk->freq_table, rate);
150 if (idx < 0)
151 return idx;
152
153 value = sh_clk_read(clk);
154 value &= ~(clk->div_mask << clk->enable_bit);
155 value |= (idx << clk->enable_bit);
156 sh_clk_write(value, clk);
157
158 /* XXX: Should use a post-change notifier */
159 if (dt->kick)
160 dt->kick(clk);
161
162 return 0;
163}
164
764f4e4e
PM
165static int sh_clk_div_enable(struct clk *clk)
166{
5a799b82
KM
167 if (clk->div_mask == SH_CLK_DIV6_MSK) {
168 int ret = sh_clk_div_set_rate(clk, clk->rate);
169 if (ret < 0)
170 return ret;
171 }
172
764f4e4e
PM
173 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
174 return 0;
175}
176
177static void sh_clk_div_disable(struct clk *clk)
178{
179 unsigned int val;
180
181 val = sh_clk_read(clk);
182 val |= CPG_CKSTP_BIT;
183
184 /*
185 * div6 clocks require the divisor field to be non-zero or the
186 * above CKSTP toggle silently fails. Ensure that the divisor
187 * array is reset to its initial state on disable.
188 */
189 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
190 val |= clk->div_mask;
191
192 sh_clk_write(val, clk);
193}
194
e3c87607
PM
195static struct sh_clk_ops sh_clk_div_clk_ops = {
196 .recalc = sh_clk_div_recalc,
197 .set_rate = sh_clk_div_set_rate,
198 .round_rate = sh_clk_div_round_rate,
199};
200
201static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
202 .recalc = sh_clk_div_recalc,
203 .set_rate = sh_clk_div_set_rate,
204 .round_rate = sh_clk_div_round_rate,
205 .enable = sh_clk_div_enable,
206 .disable = sh_clk_div_disable,
207};
208
609d7558
PM
209static int __init sh_clk_init_parent(struct clk *clk)
210{
211 u32 val;
212
213 if (clk->parent)
214 return 0;
215
216 if (!clk->parent_table || !clk->parent_num)
217 return 0;
218
219 if (!clk->src_width) {
220 pr_err("sh_clk_init_parent: cannot select parent clock\n");
221 return -EINVAL;
222 }
223
224 val = (sh_clk_read(clk) >> clk->src_shift);
225 val &= (1 << clk->src_width) - 1;
226
227 if (val >= clk->parent_num) {
228 pr_err("sh_clk_init_parent: parent table size failed\n");
229 return -EINVAL;
230 }
231
232 clk_reparent(clk, clk->parent_table[val]);
233 if (!clk->parent) {
234 pr_err("sh_clk_init_parent: unable to set parent");
235 return -EINVAL;
236 }
237
238 return 0;
239}
240
241static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
242 struct clk_div_table *table, struct sh_clk_ops *ops)
243{
244 struct clk *clkp;
245 void *freq_table;
246 int nr_divs = table->div_mult_table->nr_divisors;
247 int freq_table_size = sizeof(struct cpufreq_frequency_table);
248 int ret = 0;
249 int k;
250
251 freq_table_size *= (nr_divs + 1);
252 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
253 if (!freq_table) {
254 pr_err("%s: unable to alloc memory\n", __func__);
255 return -ENOMEM;
256 }
257
258 for (k = 0; !ret && (k < nr); k++) {
259 clkp = clks + k;
260
261 clkp->ops = ops;
262 clkp->priv = table;
263
264 clkp->freq_table = freq_table + (k * freq_table_size);
265 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
266
267 ret = clk_register(clkp);
268 if (ret == 0)
269 ret = sh_clk_init_parent(clkp);
270 }
271
272 return ret;
273}
274
a60977a5
PM
275/*
276 * div6 support
277 */
fa676ca3
MD
278static int sh_clk_div6_divisors[64] = {
279 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
280 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
281 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
282 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
283};
284
a60977a5 285static struct clk_div_mult_table div6_div_mult_table = {
fa676ca3
MD
286 .divisors = sh_clk_div6_divisors,
287 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
288};
289
a60977a5
PM
290static struct clk_div_table sh_clk_div6_table = {
291 .div_mult_table = &div6_div_mult_table,
292};
293
b3dd51a8
GL
294static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
295{
a60977a5 296 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
b3dd51a8
GL
297 u32 value;
298 int ret, i;
299
300 if (!clk->parent_table || !clk->parent_num)
301 return -EINVAL;
302
303 /* Search the parent */
304 for (i = 0; i < clk->parent_num; i++)
305 if (clk->parent_table[i] == parent)
306 break;
307
308 if (i == clk->parent_num)
309 return -ENODEV;
310
311 ret = clk_reparent(clk, parent);
312 if (ret < 0)
313 return ret;
314
104fa61a 315 value = sh_clk_read(clk) &
b3dd51a8
GL
316 ~(((1 << clk->src_width) - 1) << clk->src_shift);
317
104fa61a 318 sh_clk_write(value | (i << clk->src_shift), clk);
b3dd51a8
GL
319
320 /* Rebuild the frequency table */
321 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
52c10ad2 322 table, NULL);
b3dd51a8
GL
323
324 return 0;
325}
326
a0ec360f 327static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
75f5f8a5 328 .recalc = sh_clk_div_recalc,
b3dd51a8 329 .round_rate = sh_clk_div_round_rate,
0fa22168 330 .set_rate = sh_clk_div_set_rate,
764f4e4e
PM
331 .enable = sh_clk_div_enable,
332 .disable = sh_clk_div_disable,
b3dd51a8
GL
333 .set_parent = sh_clk_div6_set_parent,
334};
335
b3dd51a8
GL
336int __init sh_clk_div6_register(struct clk *clks, int nr)
337{
609d7558
PM
338 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
339 &sh_clk_div_enable_clk_ops);
b3dd51a8
GL
340}
341
342int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
343{
609d7558
PM
344 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
345 &sh_clk_div6_reparent_clk_ops);
b3dd51a8
GL
346}
347
a60977a5
PM
348/*
349 * div4 support
350 */
fa676ca3
MD
351static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
352{
a60977a5 353 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
fa676ca3
MD
354 u32 value;
355 int ret;
356
357 /* we really need a better way to determine parent index, but for
358 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
359 * no CLK_ENABLE_ON_INIT means external clock...
360 */
361
362 if (parent->flags & CLK_ENABLE_ON_INIT)
104fa61a 363 value = sh_clk_read(clk) & ~(1 << 7);
fa676ca3 364 else
104fa61a 365 value = sh_clk_read(clk) | (1 << 7);
fa676ca3
MD
366
367 ret = clk_reparent(clk, parent);
368 if (ret < 0)
369 return ret;
370
104fa61a 371 sh_clk_write(value, clk);
fa676ca3
MD
372
373 /* Rebiuld the frequency table */
374 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
375 table, &clk->arch_flags);
376
377 return 0;
378}
379
a0ec360f 380static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
75f5f8a5 381 .recalc = sh_clk_div_recalc,
0fa22168 382 .set_rate = sh_clk_div_set_rate,
fa676ca3 383 .round_rate = sh_clk_div_round_rate,
764f4e4e
PM
384 .enable = sh_clk_div_enable,
385 .disable = sh_clk_div_disable,
fa676ca3
MD
386 .set_parent = sh_clk_div4_set_parent,
387};
388
fa676ca3
MD
389int __init sh_clk_div4_register(struct clk *clks, int nr,
390 struct clk_div4_table *table)
391{
609d7558 392 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
fa676ca3
MD
393}
394
395int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
396 struct clk_div4_table *table)
397{
609d7558
PM
398 return sh_clk_div_register_ops(clks, nr, table,
399 &sh_clk_div_enable_clk_ops);
fa676ca3
MD
400}
401
402int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
403 struct clk_div4_table *table)
404{
609d7558
PM
405 return sh_clk_div_register_ops(clks, nr, table,
406 &sh_clk_div4_reparent_clk_ops);
fa676ca3 407}
9d626ecc
KM
408
409/* FSI-DIV */
410static unsigned long fsidiv_recalc(struct clk *clk)
411{
412 u32 value;
413
414 value = __raw_readl(clk->mapping->base);
415
416 value >>= 16;
417 if (value < 2)
418 return clk->parent->rate;
419
420 return clk->parent->rate / value;
421}
422
423static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
424{
425 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
426}
427
428static void fsidiv_disable(struct clk *clk)
429{
430 __raw_writel(0, clk->mapping->base);
431}
432
433static int fsidiv_enable(struct clk *clk)
434{
435 u32 value;
436
437 value = __raw_readl(clk->mapping->base) >> 16;
438 if (value < 2)
439 return 0;
440
441 __raw_writel((value << 16) | 0x3, clk->mapping->base);
442
443 return 0;
444}
445
446static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
447{
9d626ecc
KM
448 int idx;
449
450 idx = (clk->parent->rate / rate) & 0xffff;
451 if (idx < 2)
452 __raw_writel(0, clk->mapping->base);
453 else
454 __raw_writel(idx << 16, clk->mapping->base);
455
456 return 0;
457}
458
459static struct sh_clk_ops fsidiv_clk_ops = {
460 .recalc = fsidiv_recalc,
461 .round_rate = fsidiv_round_rate,
462 .set_rate = fsidiv_set_rate,
463 .enable = fsidiv_enable,
464 .disable = fsidiv_disable,
465};
466
467int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
468{
469 struct clk_mapping *map;
470 int i;
471
472 for (i = 0; i < nr; i++) {
473
474 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
475 if (!map) {
476 pr_err("%s: unable to alloc memory\n", __func__);
477 return -ENOMEM;
478 }
479
480 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
481 map->phys = (phys_addr_t)clks[i].enable_reg;
482 map->len = 8;
483
484 clks[i].enable_reg = 0; /* remove .enable_reg */
485 clks[i].ops = &fsidiv_clk_ops;
486 clks[i].mapping = map;
487
488 clk_register(&clks[i]);
489 }
490
491 return 0;
492}