Commit | Line | Data |
---|---|---|
ebafb63d | 1 | // SPDX-License-Identifier: GPL-2.0 |
b2476490 MT |
2 | /* |
3 | * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> | |
4 | * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> | |
5 | * | |
5fb94e9c | 6 | * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst |
b2476490 MT |
7 | */ |
8 | ||
3c373117 | 9 | #include <linux/clk.h> |
b09d6d99 | 10 | #include <linux/clk-provider.h> |
86be408b | 11 | #include <linux/clk/clk-conf.h> |
b2476490 MT |
12 | #include <linux/module.h> |
13 | #include <linux/mutex.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/slab.h> | |
766e6a4e | 18 | #include <linux/of.h> |
46c8773a | 19 | #include <linux/device.h> |
f2f6c255 | 20 | #include <linux/init.h> |
9a34b453 | 21 | #include <linux/pm_runtime.h> |
533ddeb1 | 22 | #include <linux/sched.h> |
562ef0b0 | 23 | #include <linux/clkdev.h> |
b2476490 | 24 | |
d6782c26 SN |
25 | #include "clk.h" |
26 | ||
b2476490 MT |
27 | static DEFINE_SPINLOCK(enable_lock); |
28 | static DEFINE_MUTEX(prepare_lock); | |
29 | ||
533ddeb1 MT |
30 | static struct task_struct *prepare_owner; |
31 | static struct task_struct *enable_owner; | |
32 | ||
33 | static int prepare_refcnt; | |
34 | static int enable_refcnt; | |
35 | ||
b2476490 MT |
36 | static HLIST_HEAD(clk_root_list); |
37 | static HLIST_HEAD(clk_orphan_list); | |
38 | static LIST_HEAD(clk_notifier_list); | |
39 | ||
b09d6d99 MT |
40 | /*** private data structures ***/ |
41 | ||
42 | struct clk_core { | |
43 | const char *name; | |
44 | const struct clk_ops *ops; | |
45 | struct clk_hw *hw; | |
46 | struct module *owner; | |
9a34b453 | 47 | struct device *dev; |
b09d6d99 MT |
48 | struct clk_core *parent; |
49 | const char **parent_names; | |
50 | struct clk_core **parents; | |
51 | u8 num_parents; | |
52 | u8 new_parent_index; | |
53 | unsigned long rate; | |
1c8e6004 | 54 | unsigned long req_rate; |
b09d6d99 MT |
55 | unsigned long new_rate; |
56 | struct clk_core *new_parent; | |
57 | struct clk_core *new_child; | |
58 | unsigned long flags; | |
e6500344 | 59 | bool orphan; |
b09d6d99 MT |
60 | unsigned int enable_count; |
61 | unsigned int prepare_count; | |
e55a839a | 62 | unsigned int protect_count; |
9783c0d9 SB |
63 | unsigned long min_rate; |
64 | unsigned long max_rate; | |
b09d6d99 MT |
65 | unsigned long accuracy; |
66 | int phase; | |
9fba738a | 67 | struct clk_duty duty; |
b09d6d99 MT |
68 | struct hlist_head children; |
69 | struct hlist_node child_node; | |
1c8e6004 | 70 | struct hlist_head clks; |
b09d6d99 MT |
71 | unsigned int notifier_count; |
72 | #ifdef CONFIG_DEBUG_FS | |
73 | struct dentry *dentry; | |
8c9a8a8f | 74 | struct hlist_node debug_node; |
b09d6d99 MT |
75 | #endif |
76 | struct kref ref; | |
77 | }; | |
78 | ||
dfc202ea SB |
79 | #define CREATE_TRACE_POINTS |
80 | #include <trace/events/clk.h> | |
81 | ||
b09d6d99 MT |
82 | struct clk { |
83 | struct clk_core *core; | |
84 | const char *dev_id; | |
85 | const char *con_id; | |
1c8e6004 TV |
86 | unsigned long min_rate; |
87 | unsigned long max_rate; | |
55e9b8b7 | 88 | unsigned int exclusive_count; |
50595f8b | 89 | struct hlist_node clks_node; |
b09d6d99 MT |
90 | }; |
91 | ||
9a34b453 MS |
92 | /*** runtime pm ***/ |
93 | static int clk_pm_runtime_get(struct clk_core *core) | |
94 | { | |
95 | int ret = 0; | |
96 | ||
97 | if (!core->dev) | |
98 | return 0; | |
99 | ||
100 | ret = pm_runtime_get_sync(core->dev); | |
101 | return ret < 0 ? ret : 0; | |
102 | } | |
103 | ||
104 | static void clk_pm_runtime_put(struct clk_core *core) | |
105 | { | |
106 | if (!core->dev) | |
107 | return; | |
108 | ||
109 | pm_runtime_put_sync(core->dev); | |
110 | } | |
111 | ||
eab89f69 MT |
112 | /*** locking ***/ |
113 | static void clk_prepare_lock(void) | |
114 | { | |
533ddeb1 MT |
115 | if (!mutex_trylock(&prepare_lock)) { |
116 | if (prepare_owner == current) { | |
117 | prepare_refcnt++; | |
118 | return; | |
119 | } | |
120 | mutex_lock(&prepare_lock); | |
121 | } | |
122 | WARN_ON_ONCE(prepare_owner != NULL); | |
123 | WARN_ON_ONCE(prepare_refcnt != 0); | |
124 | prepare_owner = current; | |
125 | prepare_refcnt = 1; | |
eab89f69 MT |
126 | } |
127 | ||
128 | static void clk_prepare_unlock(void) | |
129 | { | |
533ddeb1 MT |
130 | WARN_ON_ONCE(prepare_owner != current); |
131 | WARN_ON_ONCE(prepare_refcnt == 0); | |
132 | ||
133 | if (--prepare_refcnt) | |
134 | return; | |
135 | prepare_owner = NULL; | |
eab89f69 MT |
136 | mutex_unlock(&prepare_lock); |
137 | } | |
138 | ||
139 | static unsigned long clk_enable_lock(void) | |
a57aa185 | 140 | __acquires(enable_lock) |
eab89f69 MT |
141 | { |
142 | unsigned long flags; | |
533ddeb1 | 143 | |
a12aa8a6 DL |
144 | /* |
145 | * On UP systems, spin_trylock_irqsave() always returns true, even if | |
146 | * we already hold the lock. So, in that case, we rely only on | |
147 | * reference counting. | |
148 | */ | |
149 | if (!IS_ENABLED(CONFIG_SMP) || | |
150 | !spin_trylock_irqsave(&enable_lock, flags)) { | |
533ddeb1 MT |
151 | if (enable_owner == current) { |
152 | enable_refcnt++; | |
a57aa185 | 153 | __acquire(enable_lock); |
a12aa8a6 DL |
154 | if (!IS_ENABLED(CONFIG_SMP)) |
155 | local_save_flags(flags); | |
533ddeb1 MT |
156 | return flags; |
157 | } | |
158 | spin_lock_irqsave(&enable_lock, flags); | |
159 | } | |
160 | WARN_ON_ONCE(enable_owner != NULL); | |
161 | WARN_ON_ONCE(enable_refcnt != 0); | |
162 | enable_owner = current; | |
163 | enable_refcnt = 1; | |
eab89f69 MT |
164 | return flags; |
165 | } | |
166 | ||
167 | static void clk_enable_unlock(unsigned long flags) | |
a57aa185 | 168 | __releases(enable_lock) |
eab89f69 | 169 | { |
533ddeb1 MT |
170 | WARN_ON_ONCE(enable_owner != current); |
171 | WARN_ON_ONCE(enable_refcnt == 0); | |
172 | ||
a57aa185 SB |
173 | if (--enable_refcnt) { |
174 | __release(enable_lock); | |
533ddeb1 | 175 | return; |
a57aa185 | 176 | } |
533ddeb1 | 177 | enable_owner = NULL; |
eab89f69 MT |
178 | spin_unlock_irqrestore(&enable_lock, flags); |
179 | } | |
180 | ||
e55a839a JB |
181 | static bool clk_core_rate_is_protected(struct clk_core *core) |
182 | { | |
183 | return core->protect_count; | |
184 | } | |
185 | ||
4dff95dc SB |
186 | static bool clk_core_is_prepared(struct clk_core *core) |
187 | { | |
9a34b453 MS |
188 | bool ret = false; |
189 | ||
4dff95dc SB |
190 | /* |
191 | * .is_prepared is optional for clocks that can prepare | |
192 | * fall back to software usage counter if it is missing | |
193 | */ | |
194 | if (!core->ops->is_prepared) | |
195 | return core->prepare_count; | |
b2476490 | 196 | |
9a34b453 MS |
197 | if (!clk_pm_runtime_get(core)) { |
198 | ret = core->ops->is_prepared(core->hw); | |
199 | clk_pm_runtime_put(core); | |
200 | } | |
201 | ||
202 | return ret; | |
4dff95dc | 203 | } |
b2476490 | 204 | |
4dff95dc SB |
205 | static bool clk_core_is_enabled(struct clk_core *core) |
206 | { | |
9a34b453 MS |
207 | bool ret = false; |
208 | ||
4dff95dc SB |
209 | /* |
210 | * .is_enabled is only mandatory for clocks that gate | |
211 | * fall back to software usage counter if .is_enabled is missing | |
212 | */ | |
213 | if (!core->ops->is_enabled) | |
214 | return core->enable_count; | |
6b44c854 | 215 | |
9a34b453 MS |
216 | /* |
217 | * Check if clock controller's device is runtime active before | |
218 | * calling .is_enabled callback. If not, assume that clock is | |
219 | * disabled, because we might be called from atomic context, from | |
220 | * which pm_runtime_get() is not allowed. | |
221 | * This function is called mainly from clk_disable_unused_subtree, | |
222 | * which ensures proper runtime pm activation of controller before | |
223 | * taking enable spinlock, but the below check is needed if one tries | |
224 | * to call it from other places. | |
225 | */ | |
226 | if (core->dev) { | |
227 | pm_runtime_get_noresume(core->dev); | |
228 | if (!pm_runtime_active(core->dev)) { | |
229 | ret = false; | |
230 | goto done; | |
231 | } | |
232 | } | |
233 | ||
234 | ret = core->ops->is_enabled(core->hw); | |
235 | done: | |
756efe13 DA |
236 | if (core->dev) |
237 | pm_runtime_put(core->dev); | |
9a34b453 MS |
238 | |
239 | return ret; | |
4dff95dc | 240 | } |
6b44c854 | 241 | |
4dff95dc | 242 | /*** helper functions ***/ |
1af599df | 243 | |
b76281cb | 244 | const char *__clk_get_name(const struct clk *clk) |
1af599df | 245 | { |
4dff95dc | 246 | return !clk ? NULL : clk->core->name; |
1af599df | 247 | } |
4dff95dc | 248 | EXPORT_SYMBOL_GPL(__clk_get_name); |
1af599df | 249 | |
e7df6f6e | 250 | const char *clk_hw_get_name(const struct clk_hw *hw) |
1a9c069c SB |
251 | { |
252 | return hw->core->name; | |
253 | } | |
254 | EXPORT_SYMBOL_GPL(clk_hw_get_name); | |
255 | ||
4dff95dc SB |
256 | struct clk_hw *__clk_get_hw(struct clk *clk) |
257 | { | |
258 | return !clk ? NULL : clk->core->hw; | |
259 | } | |
260 | EXPORT_SYMBOL_GPL(__clk_get_hw); | |
1af599df | 261 | |
e7df6f6e | 262 | unsigned int clk_hw_get_num_parents(const struct clk_hw *hw) |
1a9c069c SB |
263 | { |
264 | return hw->core->num_parents; | |
265 | } | |
266 | EXPORT_SYMBOL_GPL(clk_hw_get_num_parents); | |
267 | ||
e7df6f6e | 268 | struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) |
1a9c069c SB |
269 | { |
270 | return hw->core->parent ? hw->core->parent->hw : NULL; | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(clk_hw_get_parent); | |
273 | ||
4dff95dc SB |
274 | static struct clk_core *__clk_lookup_subtree(const char *name, |
275 | struct clk_core *core) | |
bddca894 | 276 | { |
035a61c3 | 277 | struct clk_core *child; |
4dff95dc | 278 | struct clk_core *ret; |
bddca894 | 279 | |
4dff95dc SB |
280 | if (!strcmp(core->name, name)) |
281 | return core; | |
bddca894 | 282 | |
4dff95dc SB |
283 | hlist_for_each_entry(child, &core->children, child_node) { |
284 | ret = __clk_lookup_subtree(name, child); | |
285 | if (ret) | |
286 | return ret; | |
bddca894 PG |
287 | } |
288 | ||
4dff95dc | 289 | return NULL; |
bddca894 PG |
290 | } |
291 | ||
4dff95dc | 292 | static struct clk_core *clk_core_lookup(const char *name) |
bddca894 | 293 | { |
4dff95dc SB |
294 | struct clk_core *root_clk; |
295 | struct clk_core *ret; | |
bddca894 | 296 | |
4dff95dc SB |
297 | if (!name) |
298 | return NULL; | |
bddca894 | 299 | |
4dff95dc SB |
300 | /* search the 'proper' clk tree first */ |
301 | hlist_for_each_entry(root_clk, &clk_root_list, child_node) { | |
302 | ret = __clk_lookup_subtree(name, root_clk); | |
303 | if (ret) | |
304 | return ret; | |
bddca894 PG |
305 | } |
306 | ||
4dff95dc SB |
307 | /* if not found, then search the orphan tree */ |
308 | hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { | |
309 | ret = __clk_lookup_subtree(name, root_clk); | |
310 | if (ret) | |
311 | return ret; | |
312 | } | |
bddca894 | 313 | |
4dff95dc | 314 | return NULL; |
bddca894 PG |
315 | } |
316 | ||
4dff95dc SB |
317 | static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, |
318 | u8 index) | |
bddca894 | 319 | { |
4dff95dc SB |
320 | if (!core || index >= core->num_parents) |
321 | return NULL; | |
88cfbef2 MY |
322 | |
323 | if (!core->parents[index]) | |
324 | core->parents[index] = | |
325 | clk_core_lookup(core->parent_names[index]); | |
326 | ||
327 | return core->parents[index]; | |
bddca894 PG |
328 | } |
329 | ||
e7df6f6e SB |
330 | struct clk_hw * |
331 | clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index) | |
1a9c069c SB |
332 | { |
333 | struct clk_core *parent; | |
334 | ||
335 | parent = clk_core_get_parent_by_index(hw->core, index); | |
336 | ||
337 | return !parent ? NULL : parent->hw; | |
338 | } | |
339 | EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index); | |
340 | ||
4dff95dc SB |
341 | unsigned int __clk_get_enable_count(struct clk *clk) |
342 | { | |
343 | return !clk ? 0 : clk->core->enable_count; | |
344 | } | |
b2476490 | 345 | |
4dff95dc SB |
346 | static unsigned long clk_core_get_rate_nolock(struct clk_core *core) |
347 | { | |
348 | unsigned long ret; | |
b2476490 | 349 | |
4dff95dc SB |
350 | if (!core) { |
351 | ret = 0; | |
352 | goto out; | |
353 | } | |
b2476490 | 354 | |
4dff95dc | 355 | ret = core->rate; |
b2476490 | 356 | |
47b0eeb3 | 357 | if (!core->num_parents) |
4dff95dc | 358 | goto out; |
c646cbf1 | 359 | |
4dff95dc SB |
360 | if (!core->parent) |
361 | ret = 0; | |
b2476490 | 362 | |
b2476490 MT |
363 | out: |
364 | return ret; | |
365 | } | |
366 | ||
e7df6f6e | 367 | unsigned long clk_hw_get_rate(const struct clk_hw *hw) |
1a9c069c SB |
368 | { |
369 | return clk_core_get_rate_nolock(hw->core); | |
370 | } | |
371 | EXPORT_SYMBOL_GPL(clk_hw_get_rate); | |
372 | ||
4dff95dc SB |
373 | static unsigned long __clk_get_accuracy(struct clk_core *core) |
374 | { | |
375 | if (!core) | |
376 | return 0; | |
b2476490 | 377 | |
4dff95dc | 378 | return core->accuracy; |
b2476490 MT |
379 | } |
380 | ||
4dff95dc | 381 | unsigned long __clk_get_flags(struct clk *clk) |
fcb0ee6a | 382 | { |
4dff95dc | 383 | return !clk ? 0 : clk->core->flags; |
fcb0ee6a | 384 | } |
4dff95dc | 385 | EXPORT_SYMBOL_GPL(__clk_get_flags); |
fcb0ee6a | 386 | |
e7df6f6e | 387 | unsigned long clk_hw_get_flags(const struct clk_hw *hw) |
1a9c069c SB |
388 | { |
389 | return hw->core->flags; | |
390 | } | |
391 | EXPORT_SYMBOL_GPL(clk_hw_get_flags); | |
392 | ||
e7df6f6e | 393 | bool clk_hw_is_prepared(const struct clk_hw *hw) |
1a9c069c SB |
394 | { |
395 | return clk_core_is_prepared(hw->core); | |
396 | } | |
397 | ||
e55a839a JB |
398 | bool clk_hw_rate_is_protected(const struct clk_hw *hw) |
399 | { | |
400 | return clk_core_rate_is_protected(hw->core); | |
401 | } | |
402 | ||
be68bf88 JE |
403 | bool clk_hw_is_enabled(const struct clk_hw *hw) |
404 | { | |
405 | return clk_core_is_enabled(hw->core); | |
406 | } | |
407 | ||
4dff95dc | 408 | bool __clk_is_enabled(struct clk *clk) |
b2476490 | 409 | { |
4dff95dc SB |
410 | if (!clk) |
411 | return false; | |
b2476490 | 412 | |
4dff95dc SB |
413 | return clk_core_is_enabled(clk->core); |
414 | } | |
415 | EXPORT_SYMBOL_GPL(__clk_is_enabled); | |
b2476490 | 416 | |
4dff95dc SB |
417 | static bool mux_is_better_rate(unsigned long rate, unsigned long now, |
418 | unsigned long best, unsigned long flags) | |
419 | { | |
420 | if (flags & CLK_MUX_ROUND_CLOSEST) | |
421 | return abs(now - rate) < abs(best - rate); | |
1af599df | 422 | |
4dff95dc SB |
423 | return now <= rate && now > best; |
424 | } | |
bddca894 | 425 | |
4ad69b80 JB |
426 | int clk_mux_determine_rate_flags(struct clk_hw *hw, |
427 | struct clk_rate_request *req, | |
428 | unsigned long flags) | |
4dff95dc SB |
429 | { |
430 | struct clk_core *core = hw->core, *parent, *best_parent = NULL; | |
0817b62c BB |
431 | int i, num_parents, ret; |
432 | unsigned long best = 0; | |
433 | struct clk_rate_request parent_req = *req; | |
b2476490 | 434 | |
4dff95dc SB |
435 | /* if NO_REPARENT flag set, pass through to current parent */ |
436 | if (core->flags & CLK_SET_RATE_NO_REPARENT) { | |
437 | parent = core->parent; | |
0817b62c BB |
438 | if (core->flags & CLK_SET_RATE_PARENT) { |
439 | ret = __clk_determine_rate(parent ? parent->hw : NULL, | |
440 | &parent_req); | |
441 | if (ret) | |
442 | return ret; | |
443 | ||
444 | best = parent_req.rate; | |
445 | } else if (parent) { | |
4dff95dc | 446 | best = clk_core_get_rate_nolock(parent); |
0817b62c | 447 | } else { |
4dff95dc | 448 | best = clk_core_get_rate_nolock(core); |
0817b62c BB |
449 | } |
450 | ||
4dff95dc SB |
451 | goto out; |
452 | } | |
b2476490 | 453 | |
4dff95dc SB |
454 | /* find the parent that can provide the fastest rate <= rate */ |
455 | num_parents = core->num_parents; | |
456 | for (i = 0; i < num_parents; i++) { | |
457 | parent = clk_core_get_parent_by_index(core, i); | |
458 | if (!parent) | |
459 | continue; | |
0817b62c BB |
460 | |
461 | if (core->flags & CLK_SET_RATE_PARENT) { | |
462 | parent_req = *req; | |
463 | ret = __clk_determine_rate(parent->hw, &parent_req); | |
464 | if (ret) | |
465 | continue; | |
466 | } else { | |
467 | parent_req.rate = clk_core_get_rate_nolock(parent); | |
468 | } | |
469 | ||
470 | if (mux_is_better_rate(req->rate, parent_req.rate, | |
471 | best, flags)) { | |
4dff95dc | 472 | best_parent = parent; |
0817b62c | 473 | best = parent_req.rate; |
4dff95dc SB |
474 | } |
475 | } | |
b2476490 | 476 | |
57d866e6 BB |
477 | if (!best_parent) |
478 | return -EINVAL; | |
479 | ||
4dff95dc SB |
480 | out: |
481 | if (best_parent) | |
0817b62c BB |
482 | req->best_parent_hw = best_parent->hw; |
483 | req->best_parent_rate = best; | |
484 | req->rate = best; | |
b2476490 | 485 | |
0817b62c | 486 | return 0; |
b33d212f | 487 | } |
4ad69b80 | 488 | EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); |
4dff95dc SB |
489 | |
490 | struct clk *__clk_lookup(const char *name) | |
fcb0ee6a | 491 | { |
4dff95dc SB |
492 | struct clk_core *core = clk_core_lookup(name); |
493 | ||
494 | return !core ? NULL : core->hw->clk; | |
fcb0ee6a | 495 | } |
b2476490 | 496 | |
4dff95dc SB |
497 | static void clk_core_get_boundaries(struct clk_core *core, |
498 | unsigned long *min_rate, | |
499 | unsigned long *max_rate) | |
1c155b3d | 500 | { |
4dff95dc | 501 | struct clk *clk_user; |
1c155b3d | 502 | |
9783c0d9 SB |
503 | *min_rate = core->min_rate; |
504 | *max_rate = core->max_rate; | |
496eadf8 | 505 | |
4dff95dc SB |
506 | hlist_for_each_entry(clk_user, &core->clks, clks_node) |
507 | *min_rate = max(*min_rate, clk_user->min_rate); | |
1c155b3d | 508 | |
4dff95dc SB |
509 | hlist_for_each_entry(clk_user, &core->clks, clks_node) |
510 | *max_rate = min(*max_rate, clk_user->max_rate); | |
511 | } | |
1c155b3d | 512 | |
9783c0d9 SB |
513 | void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, |
514 | unsigned long max_rate) | |
515 | { | |
516 | hw->core->min_rate = min_rate; | |
517 | hw->core->max_rate = max_rate; | |
518 | } | |
519 | EXPORT_SYMBOL_GPL(clk_hw_set_rate_range); | |
520 | ||
4dff95dc SB |
521 | /* |
522 | * Helper for finding best parent to provide a given frequency. This can be used | |
523 | * directly as a determine_rate callback (e.g. for a mux), or from a more | |
524 | * complex clock that may combine a mux with other operations. | |
525 | */ | |
0817b62c BB |
526 | int __clk_mux_determine_rate(struct clk_hw *hw, |
527 | struct clk_rate_request *req) | |
4dff95dc | 528 | { |
0817b62c | 529 | return clk_mux_determine_rate_flags(hw, req, 0); |
1c155b3d | 530 | } |
4dff95dc | 531 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); |
1c155b3d | 532 | |
0817b62c BB |
533 | int __clk_mux_determine_rate_closest(struct clk_hw *hw, |
534 | struct clk_rate_request *req) | |
b2476490 | 535 | { |
0817b62c | 536 | return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST); |
4dff95dc SB |
537 | } |
538 | EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); | |
b2476490 | 539 | |
4dff95dc | 540 | /*** clk api ***/ |
496eadf8 | 541 | |
e55a839a JB |
542 | static void clk_core_rate_unprotect(struct clk_core *core) |
543 | { | |
544 | lockdep_assert_held(&prepare_lock); | |
545 | ||
546 | if (!core) | |
547 | return; | |
548 | ||
ab525dcc FE |
549 | if (WARN(core->protect_count == 0, |
550 | "%s already unprotected\n", core->name)) | |
e55a839a JB |
551 | return; |
552 | ||
553 | if (--core->protect_count > 0) | |
554 | return; | |
555 | ||
556 | clk_core_rate_unprotect(core->parent); | |
557 | } | |
558 | ||
559 | static int clk_core_rate_nuke_protect(struct clk_core *core) | |
560 | { | |
561 | int ret; | |
562 | ||
563 | lockdep_assert_held(&prepare_lock); | |
564 | ||
565 | if (!core) | |
566 | return -EINVAL; | |
567 | ||
568 | if (core->protect_count == 0) | |
569 | return 0; | |
570 | ||
571 | ret = core->protect_count; | |
572 | core->protect_count = 1; | |
573 | clk_core_rate_unprotect(core); | |
574 | ||
575 | return ret; | |
576 | } | |
577 | ||
55e9b8b7 JB |
578 | /** |
579 | * clk_rate_exclusive_put - release exclusivity over clock rate control | |
580 | * @clk: the clk over which the exclusivity is released | |
581 | * | |
582 | * clk_rate_exclusive_put() completes a critical section during which a clock | |
583 | * consumer cannot tolerate any other consumer making any operation on the | |
584 | * clock which could result in a rate change or rate glitch. Exclusive clocks | |
585 | * cannot have their rate changed, either directly or indirectly due to changes | |
586 | * further up the parent chain of clocks. As a result, clocks up parent chain | |
587 | * also get under exclusive control of the calling consumer. | |
588 | * | |
589 | * If exlusivity is claimed more than once on clock, even by the same consumer, | |
590 | * the rate effectively gets locked as exclusivity can't be preempted. | |
591 | * | |
592 | * Calls to clk_rate_exclusive_put() must be balanced with calls to | |
593 | * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return | |
594 | * error status. | |
595 | */ | |
596 | void clk_rate_exclusive_put(struct clk *clk) | |
597 | { | |
598 | if (!clk) | |
599 | return; | |
600 | ||
601 | clk_prepare_lock(); | |
602 | ||
603 | /* | |
604 | * if there is something wrong with this consumer protect count, stop | |
605 | * here before messing with the provider | |
606 | */ | |
607 | if (WARN_ON(clk->exclusive_count <= 0)) | |
608 | goto out; | |
609 | ||
610 | clk_core_rate_unprotect(clk->core); | |
611 | clk->exclusive_count--; | |
612 | out: | |
613 | clk_prepare_unlock(); | |
614 | } | |
615 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_put); | |
616 | ||
e55a839a JB |
617 | static void clk_core_rate_protect(struct clk_core *core) |
618 | { | |
619 | lockdep_assert_held(&prepare_lock); | |
620 | ||
621 | if (!core) | |
622 | return; | |
623 | ||
624 | if (core->protect_count == 0) | |
625 | clk_core_rate_protect(core->parent); | |
626 | ||
627 | core->protect_count++; | |
628 | } | |
629 | ||
630 | static void clk_core_rate_restore_protect(struct clk_core *core, int count) | |
631 | { | |
632 | lockdep_assert_held(&prepare_lock); | |
633 | ||
634 | if (!core) | |
635 | return; | |
636 | ||
637 | if (count == 0) | |
638 | return; | |
639 | ||
640 | clk_core_rate_protect(core); | |
641 | core->protect_count = count; | |
642 | } | |
643 | ||
55e9b8b7 JB |
644 | /** |
645 | * clk_rate_exclusive_get - get exclusivity over the clk rate control | |
646 | * @clk: the clk over which the exclusity of rate control is requested | |
647 | * | |
648 | * clk_rate_exlusive_get() begins a critical section during which a clock | |
649 | * consumer cannot tolerate any other consumer making any operation on the | |
650 | * clock which could result in a rate change or rate glitch. Exclusive clocks | |
651 | * cannot have their rate changed, either directly or indirectly due to changes | |
652 | * further up the parent chain of clocks. As a result, clocks up parent chain | |
653 | * also get under exclusive control of the calling consumer. | |
654 | * | |
655 | * If exlusivity is claimed more than once on clock, even by the same consumer, | |
656 | * the rate effectively gets locked as exclusivity can't be preempted. | |
657 | * | |
658 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | |
659 | * clk_rate_exclusive_put(). Calls to this function may sleep. | |
660 | * Returns 0 on success, -EERROR otherwise | |
661 | */ | |
662 | int clk_rate_exclusive_get(struct clk *clk) | |
663 | { | |
664 | if (!clk) | |
665 | return 0; | |
666 | ||
667 | clk_prepare_lock(); | |
668 | clk_core_rate_protect(clk->core); | |
669 | clk->exclusive_count++; | |
670 | clk_prepare_unlock(); | |
671 | ||
672 | return 0; | |
673 | } | |
674 | EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); | |
675 | ||
4dff95dc SB |
676 | static void clk_core_unprepare(struct clk_core *core) |
677 | { | |
a6334725 SB |
678 | lockdep_assert_held(&prepare_lock); |
679 | ||
4dff95dc SB |
680 | if (!core) |
681 | return; | |
b2476490 | 682 | |
ab525dcc FE |
683 | if (WARN(core->prepare_count == 0, |
684 | "%s already unprepared\n", core->name)) | |
4dff95dc | 685 | return; |
b2476490 | 686 | |
ab525dcc FE |
687 | if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL, |
688 | "Unpreparing critical %s\n", core->name)) | |
2e20fbf5 LJ |
689 | return; |
690 | ||
9461f7b3 JB |
691 | if (core->flags & CLK_SET_RATE_GATE) |
692 | clk_core_rate_unprotect(core); | |
693 | ||
4dff95dc SB |
694 | if (--core->prepare_count > 0) |
695 | return; | |
b2476490 | 696 | |
ab525dcc | 697 | WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name); |
b2476490 | 698 | |
4dff95dc | 699 | trace_clk_unprepare(core); |
b2476490 | 700 | |
4dff95dc SB |
701 | if (core->ops->unprepare) |
702 | core->ops->unprepare(core->hw); | |
703 | ||
9a34b453 MS |
704 | clk_pm_runtime_put(core); |
705 | ||
4dff95dc SB |
706 | trace_clk_unprepare_complete(core); |
707 | clk_core_unprepare(core->parent); | |
b2476490 MT |
708 | } |
709 | ||
a6adc30b DA |
710 | static void clk_core_unprepare_lock(struct clk_core *core) |
711 | { | |
712 | clk_prepare_lock(); | |
713 | clk_core_unprepare(core); | |
714 | clk_prepare_unlock(); | |
715 | } | |
716 | ||
4dff95dc SB |
717 | /** |
718 | * clk_unprepare - undo preparation of a clock source | |
719 | * @clk: the clk being unprepared | |
720 | * | |
721 | * clk_unprepare may sleep, which differentiates it from clk_disable. In a | |
722 | * simple case, clk_unprepare can be used instead of clk_disable to gate a clk | |
723 | * if the operation may sleep. One example is a clk which is accessed over | |
724 | * I2c. In the complex case a clk gate operation may require a fast and a slow | |
725 | * part. It is this reason that clk_unprepare and clk_disable are not mutually | |
726 | * exclusive. In fact clk_disable must be called before clk_unprepare. | |
727 | */ | |
728 | void clk_unprepare(struct clk *clk) | |
1e435256 | 729 | { |
4dff95dc SB |
730 | if (IS_ERR_OR_NULL(clk)) |
731 | return; | |
732 | ||
a6adc30b | 733 | clk_core_unprepare_lock(clk->core); |
1e435256 | 734 | } |
4dff95dc | 735 | EXPORT_SYMBOL_GPL(clk_unprepare); |
1e435256 | 736 | |
4dff95dc | 737 | static int clk_core_prepare(struct clk_core *core) |
b2476490 | 738 | { |
4dff95dc | 739 | int ret = 0; |
b2476490 | 740 | |
a6334725 SB |
741 | lockdep_assert_held(&prepare_lock); |
742 | ||
4dff95dc | 743 | if (!core) |
1e435256 | 744 | return 0; |
1e435256 | 745 | |
4dff95dc | 746 | if (core->prepare_count == 0) { |
9a34b453 | 747 | ret = clk_pm_runtime_get(core); |
4dff95dc SB |
748 | if (ret) |
749 | return ret; | |
b2476490 | 750 | |
9a34b453 MS |
751 | ret = clk_core_prepare(core->parent); |
752 | if (ret) | |
753 | goto runtime_put; | |
754 | ||
4dff95dc | 755 | trace_clk_prepare(core); |
b2476490 | 756 | |
4dff95dc SB |
757 | if (core->ops->prepare) |
758 | ret = core->ops->prepare(core->hw); | |
b2476490 | 759 | |
4dff95dc | 760 | trace_clk_prepare_complete(core); |
1c155b3d | 761 | |
9a34b453 MS |
762 | if (ret) |
763 | goto unprepare; | |
4dff95dc | 764 | } |
1c155b3d | 765 | |
4dff95dc | 766 | core->prepare_count++; |
b2476490 | 767 | |
9461f7b3 JB |
768 | /* |
769 | * CLK_SET_RATE_GATE is a special case of clock protection | |
770 | * Instead of a consumer claiming exclusive rate control, it is | |
771 | * actually the provider which prevents any consumer from making any | |
772 | * operation which could result in a rate change or rate glitch while | |
773 | * the clock is prepared. | |
774 | */ | |
775 | if (core->flags & CLK_SET_RATE_GATE) | |
776 | clk_core_rate_protect(core); | |
777 | ||
b2476490 | 778 | return 0; |
9a34b453 MS |
779 | unprepare: |
780 | clk_core_unprepare(core->parent); | |
781 | runtime_put: | |
782 | clk_pm_runtime_put(core); | |
783 | return ret; | |
b2476490 | 784 | } |
b2476490 | 785 | |
a6adc30b DA |
786 | static int clk_core_prepare_lock(struct clk_core *core) |
787 | { | |
788 | int ret; | |
789 | ||
790 | clk_prepare_lock(); | |
791 | ret = clk_core_prepare(core); | |
792 | clk_prepare_unlock(); | |
793 | ||
794 | return ret; | |
795 | } | |
796 | ||
4dff95dc SB |
797 | /** |
798 | * clk_prepare - prepare a clock source | |
799 | * @clk: the clk being prepared | |
800 | * | |
801 | * clk_prepare may sleep, which differentiates it from clk_enable. In a simple | |
802 | * case, clk_prepare can be used instead of clk_enable to ungate a clk if the | |
803 | * operation may sleep. One example is a clk which is accessed over I2c. In | |
804 | * the complex case a clk ungate operation may require a fast and a slow part. | |
805 | * It is this reason that clk_prepare and clk_enable are not mutually | |
806 | * exclusive. In fact clk_prepare must be called before clk_enable. | |
807 | * Returns 0 on success, -EERROR otherwise. | |
808 | */ | |
809 | int clk_prepare(struct clk *clk) | |
b2476490 | 810 | { |
4dff95dc SB |
811 | if (!clk) |
812 | return 0; | |
b2476490 | 813 | |
a6adc30b | 814 | return clk_core_prepare_lock(clk->core); |
b2476490 | 815 | } |
4dff95dc | 816 | EXPORT_SYMBOL_GPL(clk_prepare); |
b2476490 | 817 | |
4dff95dc | 818 | static void clk_core_disable(struct clk_core *core) |
b2476490 | 819 | { |
a6334725 SB |
820 | lockdep_assert_held(&enable_lock); |
821 | ||
4dff95dc SB |
822 | if (!core) |
823 | return; | |
035a61c3 | 824 | |
ab525dcc | 825 | if (WARN(core->enable_count == 0, "%s already disabled\n", core->name)) |
4dff95dc | 826 | return; |
b2476490 | 827 | |
ab525dcc FE |
828 | if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL, |
829 | "Disabling critical %s\n", core->name)) | |
2e20fbf5 LJ |
830 | return; |
831 | ||
4dff95dc SB |
832 | if (--core->enable_count > 0) |
833 | return; | |
035a61c3 | 834 | |
2f87a6ea | 835 | trace_clk_disable_rcuidle(core); |
035a61c3 | 836 | |
4dff95dc SB |
837 | if (core->ops->disable) |
838 | core->ops->disable(core->hw); | |
035a61c3 | 839 | |
2f87a6ea | 840 | trace_clk_disable_complete_rcuidle(core); |
035a61c3 | 841 | |
4dff95dc | 842 | clk_core_disable(core->parent); |
035a61c3 | 843 | } |
7ef3dcc8 | 844 | |
a6adc30b DA |
845 | static void clk_core_disable_lock(struct clk_core *core) |
846 | { | |
847 | unsigned long flags; | |
848 | ||
849 | flags = clk_enable_lock(); | |
850 | clk_core_disable(core); | |
851 | clk_enable_unlock(flags); | |
852 | } | |
853 | ||
4dff95dc SB |
854 | /** |
855 | * clk_disable - gate a clock | |
856 | * @clk: the clk being gated | |
857 | * | |
858 | * clk_disable must not sleep, which differentiates it from clk_unprepare. In | |
859 | * a simple case, clk_disable can be used instead of clk_unprepare to gate a | |
860 | * clk if the operation is fast and will never sleep. One example is a | |
861 | * SoC-internal clk which is controlled via simple register writes. In the | |
862 | * complex case a clk gate operation may require a fast and a slow part. It is | |
863 | * this reason that clk_unprepare and clk_disable are not mutually exclusive. | |
864 | * In fact clk_disable must be called before clk_unprepare. | |
865 | */ | |
866 | void clk_disable(struct clk *clk) | |
b2476490 | 867 | { |
4dff95dc SB |
868 | if (IS_ERR_OR_NULL(clk)) |
869 | return; | |
870 | ||
a6adc30b | 871 | clk_core_disable_lock(clk->core); |
b2476490 | 872 | } |
4dff95dc | 873 | EXPORT_SYMBOL_GPL(clk_disable); |
b2476490 | 874 | |
4dff95dc | 875 | static int clk_core_enable(struct clk_core *core) |
b2476490 | 876 | { |
4dff95dc | 877 | int ret = 0; |
b2476490 | 878 | |
a6334725 SB |
879 | lockdep_assert_held(&enable_lock); |
880 | ||
4dff95dc SB |
881 | if (!core) |
882 | return 0; | |
b2476490 | 883 | |
ab525dcc FE |
884 | if (WARN(core->prepare_count == 0, |
885 | "Enabling unprepared %s\n", core->name)) | |
4dff95dc | 886 | return -ESHUTDOWN; |
b2476490 | 887 | |
4dff95dc SB |
888 | if (core->enable_count == 0) { |
889 | ret = clk_core_enable(core->parent); | |
b2476490 | 890 | |
4dff95dc SB |
891 | if (ret) |
892 | return ret; | |
b2476490 | 893 | |
f17a0dd1 | 894 | trace_clk_enable_rcuidle(core); |
035a61c3 | 895 | |
4dff95dc SB |
896 | if (core->ops->enable) |
897 | ret = core->ops->enable(core->hw); | |
035a61c3 | 898 | |
f17a0dd1 | 899 | trace_clk_enable_complete_rcuidle(core); |
4dff95dc SB |
900 | |
901 | if (ret) { | |
902 | clk_core_disable(core->parent); | |
903 | return ret; | |
904 | } | |
905 | } | |
906 | ||
907 | core->enable_count++; | |
908 | return 0; | |
035a61c3 | 909 | } |
b2476490 | 910 | |
a6adc30b DA |
911 | static int clk_core_enable_lock(struct clk_core *core) |
912 | { | |
913 | unsigned long flags; | |
914 | int ret; | |
915 | ||
916 | flags = clk_enable_lock(); | |
917 | ret = clk_core_enable(core); | |
918 | clk_enable_unlock(flags); | |
919 | ||
920 | return ret; | |
921 | } | |
922 | ||
43536548 K |
923 | /** |
924 | * clk_gate_restore_context - restore context for poweroff | |
925 | * @hw: the clk_hw pointer of clock whose state is to be restored | |
926 | * | |
927 | * The clock gate restore context function enables or disables | |
928 | * the gate clocks based on the enable_count. This is done in cases | |
929 | * where the clock context is lost and based on the enable_count | |
930 | * the clock either needs to be enabled/disabled. This | |
931 | * helps restore the state of gate clocks. | |
932 | */ | |
933 | void clk_gate_restore_context(struct clk_hw *hw) | |
934 | { | |
9be76627 SB |
935 | struct clk_core *core = hw->core; |
936 | ||
937 | if (core->enable_count) | |
938 | core->ops->enable(hw); | |
43536548 | 939 | else |
9be76627 | 940 | core->ops->disable(hw); |
43536548 K |
941 | } |
942 | EXPORT_SYMBOL_GPL(clk_gate_restore_context); | |
943 | ||
9be76627 | 944 | static int clk_core_save_context(struct clk_core *core) |
8b95d1ce RD |
945 | { |
946 | struct clk_core *child; | |
947 | int ret = 0; | |
948 | ||
9be76627 SB |
949 | hlist_for_each_entry(child, &core->children, child_node) { |
950 | ret = clk_core_save_context(child); | |
8b95d1ce RD |
951 | if (ret < 0) |
952 | return ret; | |
953 | } | |
954 | ||
9be76627 SB |
955 | if (core->ops && core->ops->save_context) |
956 | ret = core->ops->save_context(core->hw); | |
8b95d1ce RD |
957 | |
958 | return ret; | |
959 | } | |
960 | ||
9be76627 | 961 | static void clk_core_restore_context(struct clk_core *core) |
8b95d1ce RD |
962 | { |
963 | struct clk_core *child; | |
964 | ||
9be76627 SB |
965 | if (core->ops && core->ops->restore_context) |
966 | core->ops->restore_context(core->hw); | |
8b95d1ce | 967 | |
9be76627 SB |
968 | hlist_for_each_entry(child, &core->children, child_node) |
969 | clk_core_restore_context(child); | |
8b95d1ce RD |
970 | } |
971 | ||
972 | /** | |
973 | * clk_save_context - save clock context for poweroff | |
974 | * | |
975 | * Saves the context of the clock register for powerstates in which the | |
976 | * contents of the registers will be lost. Occurs deep within the suspend | |
977 | * code. Returns 0 on success. | |
978 | */ | |
979 | int clk_save_context(void) | |
980 | { | |
981 | struct clk_core *clk; | |
982 | int ret; | |
983 | ||
984 | hlist_for_each_entry(clk, &clk_root_list, child_node) { | |
9be76627 | 985 | ret = clk_core_save_context(clk); |
8b95d1ce RD |
986 | if (ret < 0) |
987 | return ret; | |
988 | } | |
989 | ||
990 | hlist_for_each_entry(clk, &clk_orphan_list, child_node) { | |
9be76627 | 991 | ret = clk_core_save_context(clk); |
8b95d1ce RD |
992 | if (ret < 0) |
993 | return ret; | |
994 | } | |
995 | ||
996 | return 0; | |
997 | } | |
998 | EXPORT_SYMBOL_GPL(clk_save_context); | |
999 | ||
1000 | /** | |
1001 | * clk_restore_context - restore clock context after poweroff | |
1002 | * | |
1003 | * Restore the saved clock context upon resume. | |
1004 | * | |
1005 | */ | |
1006 | void clk_restore_context(void) | |
1007 | { | |
9be76627 | 1008 | struct clk_core *core; |
8b95d1ce | 1009 | |
9be76627 SB |
1010 | hlist_for_each_entry(core, &clk_root_list, child_node) |
1011 | clk_core_restore_context(core); | |
8b95d1ce | 1012 | |
9be76627 SB |
1013 | hlist_for_each_entry(core, &clk_orphan_list, child_node) |
1014 | clk_core_restore_context(core); | |
8b95d1ce RD |
1015 | } |
1016 | EXPORT_SYMBOL_GPL(clk_restore_context); | |
1017 | ||
4dff95dc SB |
1018 | /** |
1019 | * clk_enable - ungate a clock | |
1020 | * @clk: the clk being ungated | |
1021 | * | |
1022 | * clk_enable must not sleep, which differentiates it from clk_prepare. In a | |
1023 | * simple case, clk_enable can be used instead of clk_prepare to ungate a clk | |
1024 | * if the operation will never sleep. One example is a SoC-internal clk which | |
1025 | * is controlled via simple register writes. In the complex case a clk ungate | |
1026 | * operation may require a fast and a slow part. It is this reason that | |
1027 | * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare | |
1028 | * must be called before clk_enable. Returns 0 on success, -EERROR | |
1029 | * otherwise. | |
1030 | */ | |
1031 | int clk_enable(struct clk *clk) | |
5279fc40 | 1032 | { |
4dff95dc | 1033 | if (!clk) |
5279fc40 BB |
1034 | return 0; |
1035 | ||
a6adc30b DA |
1036 | return clk_core_enable_lock(clk->core); |
1037 | } | |
1038 | EXPORT_SYMBOL_GPL(clk_enable); | |
1039 | ||
1040 | static int clk_core_prepare_enable(struct clk_core *core) | |
1041 | { | |
1042 | int ret; | |
1043 | ||
1044 | ret = clk_core_prepare_lock(core); | |
1045 | if (ret) | |
1046 | return ret; | |
1047 | ||
1048 | ret = clk_core_enable_lock(core); | |
1049 | if (ret) | |
1050 | clk_core_unprepare_lock(core); | |
5279fc40 | 1051 | |
4dff95dc | 1052 | return ret; |
b2476490 | 1053 | } |
a6adc30b DA |
1054 | |
1055 | static void clk_core_disable_unprepare(struct clk_core *core) | |
1056 | { | |
1057 | clk_core_disable_lock(core); | |
1058 | clk_core_unprepare_lock(core); | |
1059 | } | |
b2476490 | 1060 | |
7ec986ef DA |
1061 | static void clk_unprepare_unused_subtree(struct clk_core *core) |
1062 | { | |
1063 | struct clk_core *child; | |
1064 | ||
1065 | lockdep_assert_held(&prepare_lock); | |
1066 | ||
1067 | hlist_for_each_entry(child, &core->children, child_node) | |
1068 | clk_unprepare_unused_subtree(child); | |
1069 | ||
1070 | if (core->prepare_count) | |
1071 | return; | |
1072 | ||
1073 | if (core->flags & CLK_IGNORE_UNUSED) | |
1074 | return; | |
1075 | ||
9a34b453 MS |
1076 | if (clk_pm_runtime_get(core)) |
1077 | return; | |
1078 | ||
7ec986ef DA |
1079 | if (clk_core_is_prepared(core)) { |
1080 | trace_clk_unprepare(core); | |
1081 | if (core->ops->unprepare_unused) | |
1082 | core->ops->unprepare_unused(core->hw); | |
1083 | else if (core->ops->unprepare) | |
1084 | core->ops->unprepare(core->hw); | |
1085 | trace_clk_unprepare_complete(core); | |
1086 | } | |
9a34b453 MS |
1087 | |
1088 | clk_pm_runtime_put(core); | |
7ec986ef DA |
1089 | } |
1090 | ||
1091 | static void clk_disable_unused_subtree(struct clk_core *core) | |
1092 | { | |
1093 | struct clk_core *child; | |
1094 | unsigned long flags; | |
1095 | ||
1096 | lockdep_assert_held(&prepare_lock); | |
1097 | ||
1098 | hlist_for_each_entry(child, &core->children, child_node) | |
1099 | clk_disable_unused_subtree(child); | |
1100 | ||
a4b3518d DA |
1101 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1102 | clk_core_prepare_enable(core->parent); | |
1103 | ||
9a34b453 MS |
1104 | if (clk_pm_runtime_get(core)) |
1105 | goto unprepare_out; | |
1106 | ||
7ec986ef DA |
1107 | flags = clk_enable_lock(); |
1108 | ||
1109 | if (core->enable_count) | |
1110 | goto unlock_out; | |
1111 | ||
1112 | if (core->flags & CLK_IGNORE_UNUSED) | |
1113 | goto unlock_out; | |
1114 | ||
1115 | /* | |
1116 | * some gate clocks have special needs during the disable-unused | |
1117 | * sequence. call .disable_unused if available, otherwise fall | |
1118 | * back to .disable | |
1119 | */ | |
1120 | if (clk_core_is_enabled(core)) { | |
1121 | trace_clk_disable(core); | |
1122 | if (core->ops->disable_unused) | |
1123 | core->ops->disable_unused(core->hw); | |
1124 | else if (core->ops->disable) | |
1125 | core->ops->disable(core->hw); | |
1126 | trace_clk_disable_complete(core); | |
1127 | } | |
1128 | ||
1129 | unlock_out: | |
1130 | clk_enable_unlock(flags); | |
9a34b453 MS |
1131 | clk_pm_runtime_put(core); |
1132 | unprepare_out: | |
a4b3518d DA |
1133 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1134 | clk_core_disable_unprepare(core->parent); | |
7ec986ef DA |
1135 | } |
1136 | ||
1137 | static bool clk_ignore_unused; | |
1138 | static int __init clk_ignore_unused_setup(char *__unused) | |
1139 | { | |
1140 | clk_ignore_unused = true; | |
1141 | return 1; | |
1142 | } | |
1143 | __setup("clk_ignore_unused", clk_ignore_unused_setup); | |
1144 | ||
1145 | static int clk_disable_unused(void) | |
1146 | { | |
1147 | struct clk_core *core; | |
1148 | ||
1149 | if (clk_ignore_unused) { | |
1150 | pr_warn("clk: Not disabling unused clocks\n"); | |
1151 | return 0; | |
1152 | } | |
1153 | ||
1154 | clk_prepare_lock(); | |
1155 | ||
1156 | hlist_for_each_entry(core, &clk_root_list, child_node) | |
1157 | clk_disable_unused_subtree(core); | |
1158 | ||
1159 | hlist_for_each_entry(core, &clk_orphan_list, child_node) | |
1160 | clk_disable_unused_subtree(core); | |
1161 | ||
1162 | hlist_for_each_entry(core, &clk_root_list, child_node) | |
1163 | clk_unprepare_unused_subtree(core); | |
1164 | ||
1165 | hlist_for_each_entry(core, &clk_orphan_list, child_node) | |
1166 | clk_unprepare_unused_subtree(core); | |
1167 | ||
1168 | clk_prepare_unlock(); | |
1169 | ||
1170 | return 0; | |
1171 | } | |
1172 | late_initcall_sync(clk_disable_unused); | |
1173 | ||
0f6cc2b8 JB |
1174 | static int clk_core_determine_round_nolock(struct clk_core *core, |
1175 | struct clk_rate_request *req) | |
3d6ee287 | 1176 | { |
0817b62c | 1177 | long rate; |
4dff95dc SB |
1178 | |
1179 | lockdep_assert_held(&prepare_lock); | |
3d6ee287 | 1180 | |
d6968fca | 1181 | if (!core) |
4dff95dc | 1182 | return 0; |
3d6ee287 | 1183 | |
55e9b8b7 JB |
1184 | /* |
1185 | * At this point, core protection will be disabled if | |
1186 | * - if the provider is not protected at all | |
1187 | * - if the calling consumer is the only one which has exclusivity | |
1188 | * over the provider | |
1189 | */ | |
e55a839a JB |
1190 | if (clk_core_rate_is_protected(core)) { |
1191 | req->rate = core->rate; | |
1192 | } else if (core->ops->determine_rate) { | |
0817b62c BB |
1193 | return core->ops->determine_rate(core->hw, req); |
1194 | } else if (core->ops->round_rate) { | |
1195 | rate = core->ops->round_rate(core->hw, req->rate, | |
1196 | &req->best_parent_rate); | |
1197 | if (rate < 0) | |
1198 | return rate; | |
1199 | ||
1200 | req->rate = rate; | |
0817b62c | 1201 | } else { |
0f6cc2b8 | 1202 | return -EINVAL; |
0817b62c BB |
1203 | } |
1204 | ||
1205 | return 0; | |
3d6ee287 UH |
1206 | } |
1207 | ||
0f6cc2b8 JB |
1208 | static void clk_core_init_rate_req(struct clk_core * const core, |
1209 | struct clk_rate_request *req) | |
1210 | { | |
1211 | struct clk_core *parent; | |
1212 | ||
1213 | if (WARN_ON(!core || !req)) | |
1214 | return; | |
1215 | ||
1216 | parent = core->parent; | |
1217 | if (parent) { | |
1218 | req->best_parent_hw = parent->hw; | |
1219 | req->best_parent_rate = parent->rate; | |
1220 | } else { | |
1221 | req->best_parent_hw = NULL; | |
1222 | req->best_parent_rate = 0; | |
0817b62c | 1223 | } |
0f6cc2b8 | 1224 | } |
0817b62c | 1225 | |
0f6cc2b8 JB |
1226 | static bool clk_core_can_round(struct clk_core * const core) |
1227 | { | |
1228 | if (core->ops->determine_rate || core->ops->round_rate) | |
1229 | return true; | |
1230 | ||
1231 | return false; | |
1232 | } | |
1233 | ||
1234 | static int clk_core_round_rate_nolock(struct clk_core *core, | |
1235 | struct clk_rate_request *req) | |
1236 | { | |
1237 | lockdep_assert_held(&prepare_lock); | |
1238 | ||
04bf9ab3 JB |
1239 | if (!core) { |
1240 | req->rate = 0; | |
0f6cc2b8 | 1241 | return 0; |
04bf9ab3 | 1242 | } |
0817b62c | 1243 | |
0f6cc2b8 JB |
1244 | clk_core_init_rate_req(core, req); |
1245 | ||
1246 | if (clk_core_can_round(core)) | |
1247 | return clk_core_determine_round_nolock(core, req); | |
1248 | else if (core->flags & CLK_SET_RATE_PARENT) | |
1249 | return clk_core_round_rate_nolock(core->parent, req); | |
1250 | ||
1251 | req->rate = core->rate; | |
0817b62c | 1252 | return 0; |
3d6ee287 UH |
1253 | } |
1254 | ||
4dff95dc SB |
1255 | /** |
1256 | * __clk_determine_rate - get the closest rate actually supported by a clock | |
1257 | * @hw: determine the rate of this clock | |
2d5b520c | 1258 | * @req: target rate request |
4dff95dc | 1259 | * |
6e5ab41b | 1260 | * Useful for clk_ops such as .set_rate and .determine_rate. |
4dff95dc | 1261 | */ |
0817b62c | 1262 | int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) |
035a61c3 | 1263 | { |
0817b62c BB |
1264 | if (!hw) { |
1265 | req->rate = 0; | |
4dff95dc | 1266 | return 0; |
0817b62c | 1267 | } |
035a61c3 | 1268 | |
0817b62c | 1269 | return clk_core_round_rate_nolock(hw->core, req); |
035a61c3 | 1270 | } |
4dff95dc | 1271 | EXPORT_SYMBOL_GPL(__clk_determine_rate); |
035a61c3 | 1272 | |
1a9c069c SB |
1273 | unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) |
1274 | { | |
1275 | int ret; | |
1276 | struct clk_rate_request req; | |
1277 | ||
1278 | clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); | |
1279 | req.rate = rate; | |
1280 | ||
1281 | ret = clk_core_round_rate_nolock(hw->core, &req); | |
1282 | if (ret) | |
1283 | return 0; | |
1284 | ||
1285 | return req.rate; | |
1286 | } | |
1287 | EXPORT_SYMBOL_GPL(clk_hw_round_rate); | |
1288 | ||
4dff95dc SB |
1289 | /** |
1290 | * clk_round_rate - round the given rate for a clk | |
1291 | * @clk: the clk for which we are rounding a rate | |
1292 | * @rate: the rate which is to be rounded | |
1293 | * | |
1294 | * Takes in a rate as input and rounds it to a rate that the clk can actually | |
1295 | * use which is then returned. If clk doesn't support round_rate operation | |
1296 | * then the parent rate is returned. | |
1297 | */ | |
1298 | long clk_round_rate(struct clk *clk, unsigned long rate) | |
035a61c3 | 1299 | { |
fc4a05d4 SB |
1300 | struct clk_rate_request req; |
1301 | int ret; | |
4dff95dc | 1302 | |
035a61c3 | 1303 | if (!clk) |
4dff95dc | 1304 | return 0; |
035a61c3 | 1305 | |
4dff95dc | 1306 | clk_prepare_lock(); |
fc4a05d4 | 1307 | |
55e9b8b7 JB |
1308 | if (clk->exclusive_count) |
1309 | clk_core_rate_unprotect(clk->core); | |
1310 | ||
fc4a05d4 SB |
1311 | clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); |
1312 | req.rate = rate; | |
1313 | ||
1314 | ret = clk_core_round_rate_nolock(clk->core, &req); | |
55e9b8b7 JB |
1315 | |
1316 | if (clk->exclusive_count) | |
1317 | clk_core_rate_protect(clk->core); | |
1318 | ||
4dff95dc SB |
1319 | clk_prepare_unlock(); |
1320 | ||
fc4a05d4 SB |
1321 | if (ret) |
1322 | return ret; | |
1323 | ||
1324 | return req.rate; | |
035a61c3 | 1325 | } |
4dff95dc | 1326 | EXPORT_SYMBOL_GPL(clk_round_rate); |
b2476490 | 1327 | |
4dff95dc SB |
1328 | /** |
1329 | * __clk_notify - call clk notifier chain | |
1330 | * @core: clk that is changing rate | |
1331 | * @msg: clk notifier type (see include/linux/clk.h) | |
1332 | * @old_rate: old clk rate | |
1333 | * @new_rate: new clk rate | |
1334 | * | |
1335 | * Triggers a notifier call chain on the clk rate-change notification | |
1336 | * for 'clk'. Passes a pointer to the struct clk and the previous | |
1337 | * and current rates to the notifier callback. Intended to be called by | |
1338 | * internal clock code only. Returns NOTIFY_DONE from the last driver | |
1339 | * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if | |
1340 | * a driver returns that. | |
1341 | */ | |
1342 | static int __clk_notify(struct clk_core *core, unsigned long msg, | |
1343 | unsigned long old_rate, unsigned long new_rate) | |
b2476490 | 1344 | { |
4dff95dc SB |
1345 | struct clk_notifier *cn; |
1346 | struct clk_notifier_data cnd; | |
1347 | int ret = NOTIFY_DONE; | |
b2476490 | 1348 | |
4dff95dc SB |
1349 | cnd.old_rate = old_rate; |
1350 | cnd.new_rate = new_rate; | |
b2476490 | 1351 | |
4dff95dc SB |
1352 | list_for_each_entry(cn, &clk_notifier_list, node) { |
1353 | if (cn->clk->core == core) { | |
1354 | cnd.clk = cn->clk; | |
1355 | ret = srcu_notifier_call_chain(&cn->notifier_head, msg, | |
1356 | &cnd); | |
17c34c56 PDS |
1357 | if (ret & NOTIFY_STOP_MASK) |
1358 | return ret; | |
4dff95dc | 1359 | } |
b2476490 MT |
1360 | } |
1361 | ||
4dff95dc | 1362 | return ret; |
b2476490 MT |
1363 | } |
1364 | ||
4dff95dc SB |
1365 | /** |
1366 | * __clk_recalc_accuracies | |
1367 | * @core: first clk in the subtree | |
1368 | * | |
1369 | * Walks the subtree of clks starting with clk and recalculates accuracies as | |
1370 | * it goes. Note that if a clk does not implement the .recalc_accuracy | |
6e5ab41b | 1371 | * callback then it is assumed that the clock will take on the accuracy of its |
4dff95dc | 1372 | * parent. |
4dff95dc SB |
1373 | */ |
1374 | static void __clk_recalc_accuracies(struct clk_core *core) | |
b2476490 | 1375 | { |
4dff95dc SB |
1376 | unsigned long parent_accuracy = 0; |
1377 | struct clk_core *child; | |
b2476490 | 1378 | |
4dff95dc | 1379 | lockdep_assert_held(&prepare_lock); |
b2476490 | 1380 | |
4dff95dc SB |
1381 | if (core->parent) |
1382 | parent_accuracy = core->parent->accuracy; | |
b2476490 | 1383 | |
4dff95dc SB |
1384 | if (core->ops->recalc_accuracy) |
1385 | core->accuracy = core->ops->recalc_accuracy(core->hw, | |
1386 | parent_accuracy); | |
1387 | else | |
1388 | core->accuracy = parent_accuracy; | |
b2476490 | 1389 | |
4dff95dc SB |
1390 | hlist_for_each_entry(child, &core->children, child_node) |
1391 | __clk_recalc_accuracies(child); | |
b2476490 MT |
1392 | } |
1393 | ||
4dff95dc | 1394 | static long clk_core_get_accuracy(struct clk_core *core) |
e366fdd7 | 1395 | { |
4dff95dc | 1396 | unsigned long accuracy; |
15a02c1f | 1397 | |
4dff95dc SB |
1398 | clk_prepare_lock(); |
1399 | if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) | |
1400 | __clk_recalc_accuracies(core); | |
15a02c1f | 1401 | |
4dff95dc SB |
1402 | accuracy = __clk_get_accuracy(core); |
1403 | clk_prepare_unlock(); | |
e366fdd7 | 1404 | |
4dff95dc | 1405 | return accuracy; |
e366fdd7 | 1406 | } |
15a02c1f | 1407 | |
4dff95dc SB |
1408 | /** |
1409 | * clk_get_accuracy - return the accuracy of clk | |
1410 | * @clk: the clk whose accuracy is being returned | |
1411 | * | |
1412 | * Simply returns the cached accuracy of the clk, unless | |
1413 | * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be | |
1414 | * issued. | |
1415 | * If clk is NULL then returns 0. | |
1416 | */ | |
1417 | long clk_get_accuracy(struct clk *clk) | |
035a61c3 | 1418 | { |
4dff95dc SB |
1419 | if (!clk) |
1420 | return 0; | |
035a61c3 | 1421 | |
4dff95dc | 1422 | return clk_core_get_accuracy(clk->core); |
035a61c3 | 1423 | } |
4dff95dc | 1424 | EXPORT_SYMBOL_GPL(clk_get_accuracy); |
035a61c3 | 1425 | |
4dff95dc SB |
1426 | static unsigned long clk_recalc(struct clk_core *core, |
1427 | unsigned long parent_rate) | |
1c8e6004 | 1428 | { |
9a34b453 MS |
1429 | unsigned long rate = parent_rate; |
1430 | ||
1431 | if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) { | |
1432 | rate = core->ops->recalc_rate(core->hw, parent_rate); | |
1433 | clk_pm_runtime_put(core); | |
1434 | } | |
1435 | return rate; | |
1c8e6004 TV |
1436 | } |
1437 | ||
4dff95dc SB |
1438 | /** |
1439 | * __clk_recalc_rates | |
1440 | * @core: first clk in the subtree | |
1441 | * @msg: notification type (see include/linux/clk.h) | |
1442 | * | |
1443 | * Walks the subtree of clks starting with clk and recalculates rates as it | |
1444 | * goes. Note that if a clk does not implement the .recalc_rate callback then | |
1445 | * it is assumed that the clock will take on the rate of its parent. | |
1446 | * | |
1447 | * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, | |
1448 | * if necessary. | |
15a02c1f | 1449 | */ |
4dff95dc | 1450 | static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) |
15a02c1f | 1451 | { |
4dff95dc SB |
1452 | unsigned long old_rate; |
1453 | unsigned long parent_rate = 0; | |
1454 | struct clk_core *child; | |
e366fdd7 | 1455 | |
4dff95dc | 1456 | lockdep_assert_held(&prepare_lock); |
15a02c1f | 1457 | |
4dff95dc | 1458 | old_rate = core->rate; |
b2476490 | 1459 | |
4dff95dc SB |
1460 | if (core->parent) |
1461 | parent_rate = core->parent->rate; | |
b2476490 | 1462 | |
4dff95dc | 1463 | core->rate = clk_recalc(core, parent_rate); |
b2476490 | 1464 | |
4dff95dc SB |
1465 | /* |
1466 | * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE | |
1467 | * & ABORT_RATE_CHANGE notifiers | |
1468 | */ | |
1469 | if (core->notifier_count && msg) | |
1470 | __clk_notify(core, msg, old_rate, core->rate); | |
b2476490 | 1471 | |
4dff95dc SB |
1472 | hlist_for_each_entry(child, &core->children, child_node) |
1473 | __clk_recalc_rates(child, msg); | |
1474 | } | |
b2476490 | 1475 | |
4dff95dc SB |
1476 | static unsigned long clk_core_get_rate(struct clk_core *core) |
1477 | { | |
1478 | unsigned long rate; | |
dfc202ea | 1479 | |
4dff95dc | 1480 | clk_prepare_lock(); |
b2476490 | 1481 | |
4dff95dc SB |
1482 | if (core && (core->flags & CLK_GET_RATE_NOCACHE)) |
1483 | __clk_recalc_rates(core, 0); | |
1484 | ||
1485 | rate = clk_core_get_rate_nolock(core); | |
1486 | clk_prepare_unlock(); | |
1487 | ||
1488 | return rate; | |
b2476490 MT |
1489 | } |
1490 | ||
1491 | /** | |
4dff95dc SB |
1492 | * clk_get_rate - return the rate of clk |
1493 | * @clk: the clk whose rate is being returned | |
b2476490 | 1494 | * |
4dff95dc SB |
1495 | * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag |
1496 | * is set, which means a recalc_rate will be issued. | |
1497 | * If clk is NULL then returns 0. | |
b2476490 | 1498 | */ |
4dff95dc | 1499 | unsigned long clk_get_rate(struct clk *clk) |
b2476490 | 1500 | { |
4dff95dc SB |
1501 | if (!clk) |
1502 | return 0; | |
63589e92 | 1503 | |
4dff95dc | 1504 | return clk_core_get_rate(clk->core); |
b2476490 | 1505 | } |
4dff95dc | 1506 | EXPORT_SYMBOL_GPL(clk_get_rate); |
b2476490 | 1507 | |
4dff95dc SB |
1508 | static int clk_fetch_parent_index(struct clk_core *core, |
1509 | struct clk_core *parent) | |
b2476490 | 1510 | { |
4dff95dc | 1511 | int i; |
b2476490 | 1512 | |
508f884a MY |
1513 | if (!parent) |
1514 | return -EINVAL; | |
1515 | ||
470b5e2f MY |
1516 | for (i = 0; i < core->num_parents; i++) |
1517 | if (clk_core_get_parent_by_index(core, i) == parent) | |
4dff95dc | 1518 | return i; |
b2476490 | 1519 | |
4dff95dc | 1520 | return -EINVAL; |
b2476490 MT |
1521 | } |
1522 | ||
e6500344 HS |
1523 | /* |
1524 | * Update the orphan status of @core and all its children. | |
1525 | */ | |
1526 | static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan) | |
1527 | { | |
1528 | struct clk_core *child; | |
1529 | ||
1530 | core->orphan = is_orphan; | |
1531 | ||
1532 | hlist_for_each_entry(child, &core->children, child_node) | |
1533 | clk_core_update_orphan_status(child, is_orphan); | |
1534 | } | |
1535 | ||
4dff95dc | 1536 | static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) |
b2476490 | 1537 | { |
e6500344 HS |
1538 | bool was_orphan = core->orphan; |
1539 | ||
4dff95dc | 1540 | hlist_del(&core->child_node); |
035a61c3 | 1541 | |
4dff95dc | 1542 | if (new_parent) { |
e6500344 HS |
1543 | bool becomes_orphan = new_parent->orphan; |
1544 | ||
4dff95dc SB |
1545 | /* avoid duplicate POST_RATE_CHANGE notifications */ |
1546 | if (new_parent->new_child == core) | |
1547 | new_parent->new_child = NULL; | |
b2476490 | 1548 | |
4dff95dc | 1549 | hlist_add_head(&core->child_node, &new_parent->children); |
e6500344 HS |
1550 | |
1551 | if (was_orphan != becomes_orphan) | |
1552 | clk_core_update_orphan_status(core, becomes_orphan); | |
4dff95dc SB |
1553 | } else { |
1554 | hlist_add_head(&core->child_node, &clk_orphan_list); | |
e6500344 HS |
1555 | if (!was_orphan) |
1556 | clk_core_update_orphan_status(core, true); | |
4dff95dc | 1557 | } |
dfc202ea | 1558 | |
4dff95dc | 1559 | core->parent = new_parent; |
035a61c3 TV |
1560 | } |
1561 | ||
4dff95dc SB |
1562 | static struct clk_core *__clk_set_parent_before(struct clk_core *core, |
1563 | struct clk_core *parent) | |
b2476490 MT |
1564 | { |
1565 | unsigned long flags; | |
4dff95dc | 1566 | struct clk_core *old_parent = core->parent; |
b2476490 | 1567 | |
4dff95dc | 1568 | /* |
fc8726a2 DA |
1569 | * 1. enable parents for CLK_OPS_PARENT_ENABLE clock |
1570 | * | |
1571 | * 2. Migrate prepare state between parents and prevent race with | |
4dff95dc SB |
1572 | * clk_enable(). |
1573 | * | |
1574 | * If the clock is not prepared, then a race with | |
1575 | * clk_enable/disable() is impossible since we already have the | |
1576 | * prepare lock (future calls to clk_enable() need to be preceded by | |
1577 | * a clk_prepare()). | |
1578 | * | |
1579 | * If the clock is prepared, migrate the prepared state to the new | |
1580 | * parent and also protect against a race with clk_enable() by | |
1581 | * forcing the clock and the new parent on. This ensures that all | |
1582 | * future calls to clk_enable() are practically NOPs with respect to | |
1583 | * hardware and software states. | |
1584 | * | |
1585 | * See also: Comment for clk_set_parent() below. | |
1586 | */ | |
fc8726a2 DA |
1587 | |
1588 | /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */ | |
1589 | if (core->flags & CLK_OPS_PARENT_ENABLE) { | |
1590 | clk_core_prepare_enable(old_parent); | |
1591 | clk_core_prepare_enable(parent); | |
1592 | } | |
1593 | ||
1594 | /* migrate prepare count if > 0 */ | |
4dff95dc | 1595 | if (core->prepare_count) { |
fc8726a2 DA |
1596 | clk_core_prepare_enable(parent); |
1597 | clk_core_enable_lock(core); | |
4dff95dc | 1598 | } |
63589e92 | 1599 | |
4dff95dc | 1600 | /* update the clk tree topology */ |
eab89f69 | 1601 | flags = clk_enable_lock(); |
4dff95dc | 1602 | clk_reparent(core, parent); |
eab89f69 | 1603 | clk_enable_unlock(flags); |
4dff95dc SB |
1604 | |
1605 | return old_parent; | |
b2476490 | 1606 | } |
b2476490 | 1607 | |
4dff95dc SB |
1608 | static void __clk_set_parent_after(struct clk_core *core, |
1609 | struct clk_core *parent, | |
1610 | struct clk_core *old_parent) | |
b2476490 | 1611 | { |
4dff95dc SB |
1612 | /* |
1613 | * Finish the migration of prepare state and undo the changes done | |
1614 | * for preventing a race with clk_enable(). | |
1615 | */ | |
1616 | if (core->prepare_count) { | |
fc8726a2 DA |
1617 | clk_core_disable_lock(core); |
1618 | clk_core_disable_unprepare(old_parent); | |
1619 | } | |
1620 | ||
1621 | /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */ | |
1622 | if (core->flags & CLK_OPS_PARENT_ENABLE) { | |
1623 | clk_core_disable_unprepare(parent); | |
1624 | clk_core_disable_unprepare(old_parent); | |
4dff95dc SB |
1625 | } |
1626 | } | |
b2476490 | 1627 | |
4dff95dc SB |
1628 | static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, |
1629 | u8 p_index) | |
1630 | { | |
1631 | unsigned long flags; | |
1632 | int ret = 0; | |
1633 | struct clk_core *old_parent; | |
b2476490 | 1634 | |
4dff95dc | 1635 | old_parent = __clk_set_parent_before(core, parent); |
b2476490 | 1636 | |
4dff95dc | 1637 | trace_clk_set_parent(core, parent); |
b2476490 | 1638 | |
4dff95dc SB |
1639 | /* change clock input source */ |
1640 | if (parent && core->ops->set_parent) | |
1641 | ret = core->ops->set_parent(core->hw, p_index); | |
dfc202ea | 1642 | |
4dff95dc | 1643 | trace_clk_set_parent_complete(core, parent); |
dfc202ea | 1644 | |
4dff95dc SB |
1645 | if (ret) { |
1646 | flags = clk_enable_lock(); | |
1647 | clk_reparent(core, old_parent); | |
1648 | clk_enable_unlock(flags); | |
c660b2eb | 1649 | __clk_set_parent_after(core, old_parent, parent); |
dfc202ea | 1650 | |
4dff95dc | 1651 | return ret; |
b2476490 MT |
1652 | } |
1653 | ||
4dff95dc SB |
1654 | __clk_set_parent_after(core, parent, old_parent); |
1655 | ||
b2476490 MT |
1656 | return 0; |
1657 | } | |
1658 | ||
1659 | /** | |
4dff95dc SB |
1660 | * __clk_speculate_rates |
1661 | * @core: first clk in the subtree | |
1662 | * @parent_rate: the "future" rate of clk's parent | |
b2476490 | 1663 | * |
4dff95dc SB |
1664 | * Walks the subtree of clks starting with clk, speculating rates as it |
1665 | * goes and firing off PRE_RATE_CHANGE notifications as necessary. | |
1666 | * | |
1667 | * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending | |
1668 | * pre-rate change notifications and returns early if no clks in the | |
1669 | * subtree have subscribed to the notifications. Note that if a clk does not | |
1670 | * implement the .recalc_rate callback then it is assumed that the clock will | |
1671 | * take on the rate of its parent. | |
b2476490 | 1672 | */ |
4dff95dc SB |
1673 | static int __clk_speculate_rates(struct clk_core *core, |
1674 | unsigned long parent_rate) | |
b2476490 | 1675 | { |
4dff95dc SB |
1676 | struct clk_core *child; |
1677 | unsigned long new_rate; | |
1678 | int ret = NOTIFY_DONE; | |
b2476490 | 1679 | |
4dff95dc | 1680 | lockdep_assert_held(&prepare_lock); |
864e160a | 1681 | |
4dff95dc SB |
1682 | new_rate = clk_recalc(core, parent_rate); |
1683 | ||
1684 | /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ | |
1685 | if (core->notifier_count) | |
1686 | ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); | |
1687 | ||
1688 | if (ret & NOTIFY_STOP_MASK) { | |
1689 | pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", | |
1690 | __func__, core->name, ret); | |
1691 | goto out; | |
1692 | } | |
1693 | ||
1694 | hlist_for_each_entry(child, &core->children, child_node) { | |
1695 | ret = __clk_speculate_rates(child, new_rate); | |
1696 | if (ret & NOTIFY_STOP_MASK) | |
1697 | break; | |
1698 | } | |
b2476490 | 1699 | |
4dff95dc | 1700 | out: |
b2476490 MT |
1701 | return ret; |
1702 | } | |
b2476490 | 1703 | |
4dff95dc SB |
1704 | static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, |
1705 | struct clk_core *new_parent, u8 p_index) | |
b2476490 | 1706 | { |
4dff95dc | 1707 | struct clk_core *child; |
b2476490 | 1708 | |
4dff95dc SB |
1709 | core->new_rate = new_rate; |
1710 | core->new_parent = new_parent; | |
1711 | core->new_parent_index = p_index; | |
1712 | /* include clk in new parent's PRE_RATE_CHANGE notifications */ | |
1713 | core->new_child = NULL; | |
1714 | if (new_parent && new_parent != core->parent) | |
1715 | new_parent->new_child = core; | |
496eadf8 | 1716 | |
4dff95dc SB |
1717 | hlist_for_each_entry(child, &core->children, child_node) { |
1718 | child->new_rate = clk_recalc(child, new_rate); | |
1719 | clk_calc_subtree(child, child->new_rate, NULL, 0); | |
1720 | } | |
1721 | } | |
b2476490 | 1722 | |
4dff95dc SB |
1723 | /* |
1724 | * calculate the new rates returning the topmost clock that has to be | |
1725 | * changed. | |
1726 | */ | |
1727 | static struct clk_core *clk_calc_new_rates(struct clk_core *core, | |
1728 | unsigned long rate) | |
1729 | { | |
1730 | struct clk_core *top = core; | |
1731 | struct clk_core *old_parent, *parent; | |
4dff95dc SB |
1732 | unsigned long best_parent_rate = 0; |
1733 | unsigned long new_rate; | |
1734 | unsigned long min_rate; | |
1735 | unsigned long max_rate; | |
1736 | int p_index = 0; | |
1737 | long ret; | |
1738 | ||
1739 | /* sanity */ | |
1740 | if (IS_ERR_OR_NULL(core)) | |
1741 | return NULL; | |
1742 | ||
1743 | /* save parent rate, if it exists */ | |
1744 | parent = old_parent = core->parent; | |
71472c0c | 1745 | if (parent) |
4dff95dc | 1746 | best_parent_rate = parent->rate; |
71472c0c | 1747 | |
4dff95dc SB |
1748 | clk_core_get_boundaries(core, &min_rate, &max_rate); |
1749 | ||
1750 | /* find the closest rate and parent clk/rate */ | |
0f6cc2b8 | 1751 | if (clk_core_can_round(core)) { |
0817b62c BB |
1752 | struct clk_rate_request req; |
1753 | ||
1754 | req.rate = rate; | |
1755 | req.min_rate = min_rate; | |
1756 | req.max_rate = max_rate; | |
0817b62c | 1757 | |
0f6cc2b8 JB |
1758 | clk_core_init_rate_req(core, &req); |
1759 | ||
1760 | ret = clk_core_determine_round_nolock(core, &req); | |
4dff95dc SB |
1761 | if (ret < 0) |
1762 | return NULL; | |
1c8e6004 | 1763 | |
0817b62c BB |
1764 | best_parent_rate = req.best_parent_rate; |
1765 | new_rate = req.rate; | |
1766 | parent = req.best_parent_hw ? req.best_parent_hw->core : NULL; | |
035a61c3 | 1767 | |
4dff95dc SB |
1768 | if (new_rate < min_rate || new_rate > max_rate) |
1769 | return NULL; | |
1770 | } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { | |
1771 | /* pass-through clock without adjustable parent */ | |
1772 | core->new_rate = core->rate; | |
1773 | return NULL; | |
1774 | } else { | |
1775 | /* pass-through clock with adjustable parent */ | |
1776 | top = clk_calc_new_rates(parent, rate); | |
1777 | new_rate = parent->new_rate; | |
1778 | goto out; | |
1779 | } | |
1c8e6004 | 1780 | |
4dff95dc SB |
1781 | /* some clocks must be gated to change parent */ |
1782 | if (parent != old_parent && | |
1783 | (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { | |
1784 | pr_debug("%s: %s not gated but wants to reparent\n", | |
1785 | __func__, core->name); | |
1786 | return NULL; | |
1787 | } | |
b2476490 | 1788 | |
4dff95dc SB |
1789 | /* try finding the new parent index */ |
1790 | if (parent && core->num_parents > 1) { | |
1791 | p_index = clk_fetch_parent_index(core, parent); | |
1792 | if (p_index < 0) { | |
1793 | pr_debug("%s: clk %s can not be parent of clk %s\n", | |
1794 | __func__, parent->name, core->name); | |
1795 | return NULL; | |
1796 | } | |
1797 | } | |
b2476490 | 1798 | |
4dff95dc SB |
1799 | if ((core->flags & CLK_SET_RATE_PARENT) && parent && |
1800 | best_parent_rate != parent->rate) | |
1801 | top = clk_calc_new_rates(parent, best_parent_rate); | |
035a61c3 | 1802 | |
4dff95dc SB |
1803 | out: |
1804 | clk_calc_subtree(core, new_rate, parent, p_index); | |
b2476490 | 1805 | |
4dff95dc | 1806 | return top; |
b2476490 | 1807 | } |
b2476490 | 1808 | |
4dff95dc SB |
1809 | /* |
1810 | * Notify about rate changes in a subtree. Always walk down the whole tree | |
1811 | * so that in case of an error we can walk down the whole tree again and | |
1812 | * abort the change. | |
b2476490 | 1813 | */ |
4dff95dc SB |
1814 | static struct clk_core *clk_propagate_rate_change(struct clk_core *core, |
1815 | unsigned long event) | |
b2476490 | 1816 | { |
4dff95dc | 1817 | struct clk_core *child, *tmp_clk, *fail_clk = NULL; |
b2476490 MT |
1818 | int ret = NOTIFY_DONE; |
1819 | ||
4dff95dc SB |
1820 | if (core->rate == core->new_rate) |
1821 | return NULL; | |
b2476490 | 1822 | |
4dff95dc SB |
1823 | if (core->notifier_count) { |
1824 | ret = __clk_notify(core, event, core->rate, core->new_rate); | |
1825 | if (ret & NOTIFY_STOP_MASK) | |
1826 | fail_clk = core; | |
b2476490 MT |
1827 | } |
1828 | ||
4dff95dc SB |
1829 | hlist_for_each_entry(child, &core->children, child_node) { |
1830 | /* Skip children who will be reparented to another clock */ | |
1831 | if (child->new_parent && child->new_parent != core) | |
1832 | continue; | |
1833 | tmp_clk = clk_propagate_rate_change(child, event); | |
1834 | if (tmp_clk) | |
1835 | fail_clk = tmp_clk; | |
1836 | } | |
5279fc40 | 1837 | |
4dff95dc SB |
1838 | /* handle the new child who might not be in core->children yet */ |
1839 | if (core->new_child) { | |
1840 | tmp_clk = clk_propagate_rate_change(core->new_child, event); | |
1841 | if (tmp_clk) | |
1842 | fail_clk = tmp_clk; | |
1843 | } | |
5279fc40 | 1844 | |
4dff95dc | 1845 | return fail_clk; |
5279fc40 BB |
1846 | } |
1847 | ||
4dff95dc SB |
1848 | /* |
1849 | * walk down a subtree and set the new rates notifying the rate | |
1850 | * change on the way | |
1851 | */ | |
1852 | static void clk_change_rate(struct clk_core *core) | |
035a61c3 | 1853 | { |
4dff95dc SB |
1854 | struct clk_core *child; |
1855 | struct hlist_node *tmp; | |
1856 | unsigned long old_rate; | |
1857 | unsigned long best_parent_rate = 0; | |
1858 | bool skip_set_rate = false; | |
1859 | struct clk_core *old_parent; | |
fc8726a2 | 1860 | struct clk_core *parent = NULL; |
035a61c3 | 1861 | |
4dff95dc | 1862 | old_rate = core->rate; |
035a61c3 | 1863 | |
fc8726a2 DA |
1864 | if (core->new_parent) { |
1865 | parent = core->new_parent; | |
4dff95dc | 1866 | best_parent_rate = core->new_parent->rate; |
fc8726a2 DA |
1867 | } else if (core->parent) { |
1868 | parent = core->parent; | |
4dff95dc | 1869 | best_parent_rate = core->parent->rate; |
fc8726a2 | 1870 | } |
035a61c3 | 1871 | |
588fb54b MS |
1872 | if (clk_pm_runtime_get(core)) |
1873 | return; | |
1874 | ||
2eb8c710 HS |
1875 | if (core->flags & CLK_SET_RATE_UNGATE) { |
1876 | unsigned long flags; | |
1877 | ||
1878 | clk_core_prepare(core); | |
1879 | flags = clk_enable_lock(); | |
1880 | clk_core_enable(core); | |
1881 | clk_enable_unlock(flags); | |
1882 | } | |
1883 | ||
4dff95dc SB |
1884 | if (core->new_parent && core->new_parent != core->parent) { |
1885 | old_parent = __clk_set_parent_before(core, core->new_parent); | |
1886 | trace_clk_set_parent(core, core->new_parent); | |
5279fc40 | 1887 | |
4dff95dc SB |
1888 | if (core->ops->set_rate_and_parent) { |
1889 | skip_set_rate = true; | |
1890 | core->ops->set_rate_and_parent(core->hw, core->new_rate, | |
1891 | best_parent_rate, | |
1892 | core->new_parent_index); | |
1893 | } else if (core->ops->set_parent) { | |
1894 | core->ops->set_parent(core->hw, core->new_parent_index); | |
1895 | } | |
5279fc40 | 1896 | |
4dff95dc SB |
1897 | trace_clk_set_parent_complete(core, core->new_parent); |
1898 | __clk_set_parent_after(core, core->new_parent, old_parent); | |
1899 | } | |
8f2c2db1 | 1900 | |
fc8726a2 DA |
1901 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1902 | clk_core_prepare_enable(parent); | |
1903 | ||
4dff95dc | 1904 | trace_clk_set_rate(core, core->new_rate); |
b2476490 | 1905 | |
4dff95dc SB |
1906 | if (!skip_set_rate && core->ops->set_rate) |
1907 | core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); | |
496eadf8 | 1908 | |
4dff95dc | 1909 | trace_clk_set_rate_complete(core, core->new_rate); |
b2476490 | 1910 | |
4dff95dc | 1911 | core->rate = clk_recalc(core, best_parent_rate); |
b2476490 | 1912 | |
2eb8c710 HS |
1913 | if (core->flags & CLK_SET_RATE_UNGATE) { |
1914 | unsigned long flags; | |
1915 | ||
1916 | flags = clk_enable_lock(); | |
1917 | clk_core_disable(core); | |
1918 | clk_enable_unlock(flags); | |
1919 | clk_core_unprepare(core); | |
1920 | } | |
1921 | ||
fc8726a2 DA |
1922 | if (core->flags & CLK_OPS_PARENT_ENABLE) |
1923 | clk_core_disable_unprepare(parent); | |
1924 | ||
4dff95dc SB |
1925 | if (core->notifier_count && old_rate != core->rate) |
1926 | __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); | |
b2476490 | 1927 | |
85e88fab MT |
1928 | if (core->flags & CLK_RECALC_NEW_RATES) |
1929 | (void)clk_calc_new_rates(core, core->new_rate); | |
d8d91987 | 1930 | |
b2476490 | 1931 | /* |
4dff95dc SB |
1932 | * Use safe iteration, as change_rate can actually swap parents |
1933 | * for certain clock types. | |
b2476490 | 1934 | */ |
4dff95dc SB |
1935 | hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { |
1936 | /* Skip children who will be reparented to another clock */ | |
1937 | if (child->new_parent && child->new_parent != core) | |
1938 | continue; | |
1939 | clk_change_rate(child); | |
1940 | } | |
b2476490 | 1941 | |
4dff95dc SB |
1942 | /* handle the new child who might not be in core->children yet */ |
1943 | if (core->new_child) | |
1944 | clk_change_rate(core->new_child); | |
588fb54b MS |
1945 | |
1946 | clk_pm_runtime_put(core); | |
b2476490 MT |
1947 | } |
1948 | ||
ca5e089a JB |
1949 | static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, |
1950 | unsigned long req_rate) | |
1951 | { | |
e55a839a | 1952 | int ret, cnt; |
ca5e089a JB |
1953 | struct clk_rate_request req; |
1954 | ||
1955 | lockdep_assert_held(&prepare_lock); | |
1956 | ||
1957 | if (!core) | |
1958 | return 0; | |
1959 | ||
e55a839a JB |
1960 | /* simulate what the rate would be if it could be freely set */ |
1961 | cnt = clk_core_rate_nuke_protect(core); | |
1962 | if (cnt < 0) | |
1963 | return cnt; | |
1964 | ||
ca5e089a JB |
1965 | clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); |
1966 | req.rate = req_rate; | |
1967 | ||
1968 | ret = clk_core_round_rate_nolock(core, &req); | |
1969 | ||
e55a839a JB |
1970 | /* restore the protection */ |
1971 | clk_core_rate_restore_protect(core, cnt); | |
1972 | ||
ca5e089a | 1973 | return ret ? 0 : req.rate; |
b2476490 MT |
1974 | } |
1975 | ||
4dff95dc SB |
1976 | static int clk_core_set_rate_nolock(struct clk_core *core, |
1977 | unsigned long req_rate) | |
a093bde2 | 1978 | { |
4dff95dc | 1979 | struct clk_core *top, *fail_clk; |
ca5e089a | 1980 | unsigned long rate; |
9a34b453 | 1981 | int ret = 0; |
a093bde2 | 1982 | |
4dff95dc SB |
1983 | if (!core) |
1984 | return 0; | |
a093bde2 | 1985 | |
ca5e089a JB |
1986 | rate = clk_core_req_round_rate_nolock(core, req_rate); |
1987 | ||
4dff95dc SB |
1988 | /* bail early if nothing to do */ |
1989 | if (rate == clk_core_get_rate_nolock(core)) | |
1990 | return 0; | |
a093bde2 | 1991 | |
e55a839a JB |
1992 | /* fail on a direct rate set of a protected provider */ |
1993 | if (clk_core_rate_is_protected(core)) | |
1994 | return -EBUSY; | |
1995 | ||
4dff95dc | 1996 | /* calculate new rates and get the topmost changed clock */ |
ca5e089a | 1997 | top = clk_calc_new_rates(core, req_rate); |
4dff95dc SB |
1998 | if (!top) |
1999 | return -EINVAL; | |
2000 | ||
9a34b453 MS |
2001 | ret = clk_pm_runtime_get(core); |
2002 | if (ret) | |
2003 | return ret; | |
2004 | ||
4dff95dc SB |
2005 | /* notify that we are about to change rates */ |
2006 | fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); | |
2007 | if (fail_clk) { | |
2008 | pr_debug("%s: failed to set %s rate\n", __func__, | |
2009 | fail_clk->name); | |
2010 | clk_propagate_rate_change(top, ABORT_RATE_CHANGE); | |
9a34b453 MS |
2011 | ret = -EBUSY; |
2012 | goto err; | |
4dff95dc SB |
2013 | } |
2014 | ||
2015 | /* change the rates */ | |
2016 | clk_change_rate(top); | |
2017 | ||
2018 | core->req_rate = req_rate; | |
9a34b453 MS |
2019 | err: |
2020 | clk_pm_runtime_put(core); | |
4dff95dc | 2021 | |
9a34b453 | 2022 | return ret; |
a093bde2 | 2023 | } |
035a61c3 TV |
2024 | |
2025 | /** | |
4dff95dc SB |
2026 | * clk_set_rate - specify a new rate for clk |
2027 | * @clk: the clk whose rate is being changed | |
2028 | * @rate: the new rate for clk | |
035a61c3 | 2029 | * |
4dff95dc SB |
2030 | * In the simplest case clk_set_rate will only adjust the rate of clk. |
2031 | * | |
2032 | * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to | |
2033 | * propagate up to clk's parent; whether or not this happens depends on the | |
2034 | * outcome of clk's .round_rate implementation. If *parent_rate is unchanged | |
2035 | * after calling .round_rate then upstream parent propagation is ignored. If | |
2036 | * *parent_rate comes back with a new rate for clk's parent then we propagate | |
2037 | * up to clk's parent and set its rate. Upward propagation will continue | |
2038 | * until either a clk does not support the CLK_SET_RATE_PARENT flag or | |
2039 | * .round_rate stops requesting changes to clk's parent_rate. | |
2040 | * | |
2041 | * Rate changes are accomplished via tree traversal that also recalculates the | |
2042 | * rates for the clocks and fires off POST_RATE_CHANGE notifiers. | |
2043 | * | |
2044 | * Returns 0 on success, -EERROR otherwise. | |
035a61c3 | 2045 | */ |
4dff95dc | 2046 | int clk_set_rate(struct clk *clk, unsigned long rate) |
035a61c3 | 2047 | { |
4dff95dc SB |
2048 | int ret; |
2049 | ||
035a61c3 TV |
2050 | if (!clk) |
2051 | return 0; | |
2052 | ||
4dff95dc SB |
2053 | /* prevent racing with updates to the clock topology */ |
2054 | clk_prepare_lock(); | |
da0f0b2c | 2055 | |
55e9b8b7 JB |
2056 | if (clk->exclusive_count) |
2057 | clk_core_rate_unprotect(clk->core); | |
2058 | ||
4dff95dc | 2059 | ret = clk_core_set_rate_nolock(clk->core, rate); |
da0f0b2c | 2060 | |
55e9b8b7 JB |
2061 | if (clk->exclusive_count) |
2062 | clk_core_rate_protect(clk->core); | |
2063 | ||
4dff95dc | 2064 | clk_prepare_unlock(); |
4935b22c | 2065 | |
4dff95dc | 2066 | return ret; |
4935b22c | 2067 | } |
4dff95dc | 2068 | EXPORT_SYMBOL_GPL(clk_set_rate); |
4935b22c | 2069 | |
55e9b8b7 JB |
2070 | /** |
2071 | * clk_set_rate_exclusive - specify a new rate get exclusive control | |
2072 | * @clk: the clk whose rate is being changed | |
2073 | * @rate: the new rate for clk | |
2074 | * | |
2075 | * This is a combination of clk_set_rate() and clk_rate_exclusive_get() | |
2076 | * within a critical section | |
2077 | * | |
2078 | * This can be used initially to ensure that at least 1 consumer is | |
2079 | * statisfied when several consumers are competing for exclusivity over the | |
2080 | * same clock provider. | |
2081 | * | |
2082 | * The exclusivity is not applied if setting the rate failed. | |
2083 | * | |
2084 | * Calls to clk_rate_exclusive_get() should be balanced with calls to | |
2085 | * clk_rate_exclusive_put(). | |
2086 | * | |
2087 | * Returns 0 on success, -EERROR otherwise. | |
2088 | */ | |
2089 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) | |
2090 | { | |
2091 | int ret; | |
2092 | ||
2093 | if (!clk) | |
2094 | return 0; | |
2095 | ||
2096 | /* prevent racing with updates to the clock topology */ | |
2097 | clk_prepare_lock(); | |
2098 | ||
2099 | /* | |
2100 | * The temporary protection removal is not here, on purpose | |
2101 | * This function is meant to be used instead of clk_rate_protect, | |
2102 | * so before the consumer code path protect the clock provider | |
2103 | */ | |
2104 | ||
2105 | ret = clk_core_set_rate_nolock(clk->core, rate); | |
2106 | if (!ret) { | |
2107 | clk_core_rate_protect(clk->core); | |
2108 | clk->exclusive_count++; | |
2109 | } | |
2110 | ||
2111 | clk_prepare_unlock(); | |
2112 | ||
2113 | return ret; | |
2114 | } | |
2115 | EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); | |
2116 | ||
4dff95dc SB |
2117 | /** |
2118 | * clk_set_rate_range - set a rate range for a clock source | |
2119 | * @clk: clock source | |
2120 | * @min: desired minimum clock rate in Hz, inclusive | |
2121 | * @max: desired maximum clock rate in Hz, inclusive | |
2122 | * | |
2123 | * Returns success (0) or negative errno. | |
2124 | */ | |
2125 | int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) | |
4935b22c | 2126 | { |
4dff95dc | 2127 | int ret = 0; |
6562fbcf | 2128 | unsigned long old_min, old_max, rate; |
4935b22c | 2129 | |
4dff95dc SB |
2130 | if (!clk) |
2131 | return 0; | |
903efc55 | 2132 | |
4dff95dc SB |
2133 | if (min > max) { |
2134 | pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", | |
2135 | __func__, clk->core->name, clk->dev_id, clk->con_id, | |
2136 | min, max); | |
2137 | return -EINVAL; | |
903efc55 | 2138 | } |
4935b22c | 2139 | |
4dff95dc | 2140 | clk_prepare_lock(); |
4935b22c | 2141 | |
55e9b8b7 JB |
2142 | if (clk->exclusive_count) |
2143 | clk_core_rate_unprotect(clk->core); | |
2144 | ||
6562fbcf JB |
2145 | /* Save the current values in case we need to rollback the change */ |
2146 | old_min = clk->min_rate; | |
2147 | old_max = clk->max_rate; | |
2148 | clk->min_rate = min; | |
2149 | clk->max_rate = max; | |
2150 | ||
2151 | rate = clk_core_get_rate_nolock(clk->core); | |
2152 | if (rate < min || rate > max) { | |
2153 | /* | |
2154 | * FIXME: | |
2155 | * We are in bit of trouble here, current rate is outside the | |
2156 | * the requested range. We are going try to request appropriate | |
2157 | * range boundary but there is a catch. It may fail for the | |
2158 | * usual reason (clock broken, clock protected, etc) but also | |
2159 | * because: | |
2160 | * - round_rate() was not favorable and fell on the wrong | |
2161 | * side of the boundary | |
2162 | * - the determine_rate() callback does not really check for | |
2163 | * this corner case when determining the rate | |
2164 | */ | |
2165 | ||
2166 | if (rate < min) | |
2167 | rate = min; | |
2168 | else | |
2169 | rate = max; | |
2170 | ||
2171 | ret = clk_core_set_rate_nolock(clk->core, rate); | |
2172 | if (ret) { | |
2173 | /* rollback the changes */ | |
2174 | clk->min_rate = old_min; | |
2175 | clk->max_rate = old_max; | |
2176 | } | |
4935b22c JH |
2177 | } |
2178 | ||
55e9b8b7 JB |
2179 | if (clk->exclusive_count) |
2180 | clk_core_rate_protect(clk->core); | |
2181 | ||
4dff95dc | 2182 | clk_prepare_unlock(); |
4935b22c | 2183 | |
4dff95dc | 2184 | return ret; |
3fa2252b | 2185 | } |
4dff95dc | 2186 | EXPORT_SYMBOL_GPL(clk_set_rate_range); |
3fa2252b | 2187 | |
4dff95dc SB |
2188 | /** |
2189 | * clk_set_min_rate - set a minimum clock rate for a clock source | |
2190 | * @clk: clock source | |
2191 | * @rate: desired minimum clock rate in Hz, inclusive | |
2192 | * | |
2193 | * Returns success (0) or negative errno. | |
2194 | */ | |
2195 | int clk_set_min_rate(struct clk *clk, unsigned long rate) | |
3fa2252b | 2196 | { |
4dff95dc SB |
2197 | if (!clk) |
2198 | return 0; | |
2199 | ||
2200 | return clk_set_rate_range(clk, rate, clk->max_rate); | |
3fa2252b | 2201 | } |
4dff95dc | 2202 | EXPORT_SYMBOL_GPL(clk_set_min_rate); |
3fa2252b | 2203 | |
4dff95dc SB |
2204 | /** |
2205 | * clk_set_max_rate - set a maximum clock rate for a clock source | |
2206 | * @clk: clock source | |
2207 | * @rate: desired maximum clock rate in Hz, inclusive | |
2208 | * | |
2209 | * Returns success (0) or negative errno. | |
2210 | */ | |
2211 | int clk_set_max_rate(struct clk *clk, unsigned long rate) | |
3fa2252b | 2212 | { |
4dff95dc SB |
2213 | if (!clk) |
2214 | return 0; | |
4935b22c | 2215 | |
4dff95dc | 2216 | return clk_set_rate_range(clk, clk->min_rate, rate); |
4935b22c | 2217 | } |
4dff95dc | 2218 | EXPORT_SYMBOL_GPL(clk_set_max_rate); |
4935b22c | 2219 | |
b2476490 | 2220 | /** |
4dff95dc SB |
2221 | * clk_get_parent - return the parent of a clk |
2222 | * @clk: the clk whose parent gets returned | |
b2476490 | 2223 | * |
4dff95dc | 2224 | * Simply returns clk->parent. Returns NULL if clk is NULL. |
b2476490 | 2225 | */ |
4dff95dc | 2226 | struct clk *clk_get_parent(struct clk *clk) |
b2476490 | 2227 | { |
4dff95dc | 2228 | struct clk *parent; |
b2476490 | 2229 | |
fc4a05d4 SB |
2230 | if (!clk) |
2231 | return NULL; | |
2232 | ||
4dff95dc | 2233 | clk_prepare_lock(); |
fc4a05d4 SB |
2234 | /* TODO: Create a per-user clk and change callers to call clk_put */ |
2235 | parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk; | |
4dff95dc | 2236 | clk_prepare_unlock(); |
496eadf8 | 2237 | |
4dff95dc SB |
2238 | return parent; |
2239 | } | |
2240 | EXPORT_SYMBOL_GPL(clk_get_parent); | |
b2476490 | 2241 | |
4dff95dc SB |
2242 | static struct clk_core *__clk_init_parent(struct clk_core *core) |
2243 | { | |
5146e0b0 | 2244 | u8 index = 0; |
4dff95dc | 2245 | |
2430a94d | 2246 | if (core->num_parents > 1 && core->ops->get_parent) |
5146e0b0 | 2247 | index = core->ops->get_parent(core->hw); |
b2476490 | 2248 | |
5146e0b0 | 2249 | return clk_core_get_parent_by_index(core, index); |
b2476490 MT |
2250 | } |
2251 | ||
4dff95dc SB |
2252 | static void clk_core_reparent(struct clk_core *core, |
2253 | struct clk_core *new_parent) | |
b2476490 | 2254 | { |
4dff95dc SB |
2255 | clk_reparent(core, new_parent); |
2256 | __clk_recalc_accuracies(core); | |
2257 | __clk_recalc_rates(core, POST_RATE_CHANGE); | |
b2476490 MT |
2258 | } |
2259 | ||
42c86547 TV |
2260 | void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) |
2261 | { | |
2262 | if (!hw) | |
2263 | return; | |
2264 | ||
2265 | clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); | |
2266 | } | |
2267 | ||
4dff95dc SB |
2268 | /** |
2269 | * clk_has_parent - check if a clock is a possible parent for another | |
2270 | * @clk: clock source | |
2271 | * @parent: parent clock source | |
2272 | * | |
2273 | * This function can be used in drivers that need to check that a clock can be | |
2274 | * the parent of another without actually changing the parent. | |
2275 | * | |
2276 | * Returns true if @parent is a possible parent for @clk, false otherwise. | |
b2476490 | 2277 | */ |
4dff95dc | 2278 | bool clk_has_parent(struct clk *clk, struct clk *parent) |
b2476490 | 2279 | { |
4dff95dc | 2280 | struct clk_core *core, *parent_core; |
b2476490 | 2281 | |
4dff95dc SB |
2282 | /* NULL clocks should be nops, so return success if either is NULL. */ |
2283 | if (!clk || !parent) | |
2284 | return true; | |
7452b219 | 2285 | |
4dff95dc SB |
2286 | core = clk->core; |
2287 | parent_core = parent->core; | |
71472c0c | 2288 | |
4dff95dc SB |
2289 | /* Optimize for the case where the parent is already the parent. */ |
2290 | if (core->parent == parent_core) | |
2291 | return true; | |
1c8e6004 | 2292 | |
d6347445 YX |
2293 | return match_string(core->parent_names, core->num_parents, |
2294 | parent_core->name) >= 0; | |
4dff95dc SB |
2295 | } |
2296 | EXPORT_SYMBOL_GPL(clk_has_parent); | |
03bc10ab | 2297 | |
91baa9ff JB |
2298 | static int clk_core_set_parent_nolock(struct clk_core *core, |
2299 | struct clk_core *parent) | |
4dff95dc SB |
2300 | { |
2301 | int ret = 0; | |
2302 | int p_index = 0; | |
2303 | unsigned long p_rate = 0; | |
2304 | ||
91baa9ff JB |
2305 | lockdep_assert_held(&prepare_lock); |
2306 | ||
4dff95dc SB |
2307 | if (!core) |
2308 | return 0; | |
2309 | ||
4dff95dc | 2310 | if (core->parent == parent) |
91baa9ff | 2311 | return 0; |
4dff95dc SB |
2312 | |
2313 | /* verify ops for for multi-parent clks */ | |
91baa9ff JB |
2314 | if (core->num_parents > 1 && !core->ops->set_parent) |
2315 | return -EPERM; | |
7452b219 | 2316 | |
4dff95dc | 2317 | /* check that we are allowed to re-parent if the clock is in use */ |
91baa9ff JB |
2318 | if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) |
2319 | return -EBUSY; | |
b2476490 | 2320 | |
e55a839a JB |
2321 | if (clk_core_rate_is_protected(core)) |
2322 | return -EBUSY; | |
b2476490 | 2323 | |
71472c0c | 2324 | /* try finding the new parent index */ |
4dff95dc | 2325 | if (parent) { |
d6968fca | 2326 | p_index = clk_fetch_parent_index(core, parent); |
f1c8b2ed | 2327 | if (p_index < 0) { |
71472c0c | 2328 | pr_debug("%s: clk %s can not be parent of clk %s\n", |
4dff95dc | 2329 | __func__, parent->name, core->name); |
91baa9ff | 2330 | return p_index; |
71472c0c | 2331 | } |
e8f0e68e | 2332 | p_rate = parent->rate; |
b2476490 MT |
2333 | } |
2334 | ||
9a34b453 MS |
2335 | ret = clk_pm_runtime_get(core); |
2336 | if (ret) | |
91baa9ff | 2337 | return ret; |
9a34b453 | 2338 | |
4dff95dc SB |
2339 | /* propagate PRE_RATE_CHANGE notifications */ |
2340 | ret = __clk_speculate_rates(core, p_rate); | |
b2476490 | 2341 | |
4dff95dc SB |
2342 | /* abort if a driver objects */ |
2343 | if (ret & NOTIFY_STOP_MASK) | |
9a34b453 | 2344 | goto runtime_put; |
b2476490 | 2345 | |
4dff95dc SB |
2346 | /* do the re-parent */ |
2347 | ret = __clk_set_parent(core, parent, p_index); | |
b2476490 | 2348 | |
4dff95dc SB |
2349 | /* propagate rate an accuracy recalculation accordingly */ |
2350 | if (ret) { | |
2351 | __clk_recalc_rates(core, ABORT_RATE_CHANGE); | |
2352 | } else { | |
2353 | __clk_recalc_rates(core, POST_RATE_CHANGE); | |
2354 | __clk_recalc_accuracies(core); | |
b2476490 MT |
2355 | } |
2356 | ||
9a34b453 MS |
2357 | runtime_put: |
2358 | clk_pm_runtime_put(core); | |
71472c0c | 2359 | |
4dff95dc SB |
2360 | return ret; |
2361 | } | |
b2476490 | 2362 | |
4dff95dc SB |
2363 | /** |
2364 | * clk_set_parent - switch the parent of a mux clk | |
2365 | * @clk: the mux clk whose input we are switching | |
2366 | * @parent: the new input to clk | |
2367 | * | |
2368 | * Re-parent clk to use parent as its new input source. If clk is in | |
2369 | * prepared state, the clk will get enabled for the duration of this call. If | |
2370 | * that's not acceptable for a specific clk (Eg: the consumer can't handle | |
2371 | * that, the reparenting is glitchy in hardware, etc), use the | |
2372 | * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. | |
2373 | * | |
2374 | * After successfully changing clk's parent clk_set_parent will update the | |
2375 | * clk topology, sysfs topology and propagate rate recalculation via | |
2376 | * __clk_recalc_rates. | |
2377 | * | |
2378 | * Returns 0 on success, -EERROR otherwise. | |
2379 | */ | |
2380 | int clk_set_parent(struct clk *clk, struct clk *parent) | |
2381 | { | |
91baa9ff JB |
2382 | int ret; |
2383 | ||
4dff95dc SB |
2384 | if (!clk) |
2385 | return 0; | |
2386 | ||
91baa9ff | 2387 | clk_prepare_lock(); |
55e9b8b7 JB |
2388 | |
2389 | if (clk->exclusive_count) | |
2390 | clk_core_rate_unprotect(clk->core); | |
2391 | ||
91baa9ff JB |
2392 | ret = clk_core_set_parent_nolock(clk->core, |
2393 | parent ? parent->core : NULL); | |
55e9b8b7 JB |
2394 | |
2395 | if (clk->exclusive_count) | |
2396 | clk_core_rate_protect(clk->core); | |
2397 | ||
91baa9ff JB |
2398 | clk_prepare_unlock(); |
2399 | ||
2400 | return ret; | |
b2476490 | 2401 | } |
4dff95dc | 2402 | EXPORT_SYMBOL_GPL(clk_set_parent); |
b2476490 | 2403 | |
9e4d04ad JB |
2404 | static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) |
2405 | { | |
2406 | int ret = -EINVAL; | |
2407 | ||
2408 | lockdep_assert_held(&prepare_lock); | |
2409 | ||
2410 | if (!core) | |
2411 | return 0; | |
2412 | ||
e55a839a JB |
2413 | if (clk_core_rate_is_protected(core)) |
2414 | return -EBUSY; | |
2415 | ||
9e4d04ad JB |
2416 | trace_clk_set_phase(core, degrees); |
2417 | ||
7f95beea | 2418 | if (core->ops->set_phase) { |
9e4d04ad | 2419 | ret = core->ops->set_phase(core->hw, degrees); |
7f95beea SL |
2420 | if (!ret) |
2421 | core->phase = degrees; | |
2422 | } | |
9e4d04ad JB |
2423 | |
2424 | trace_clk_set_phase_complete(core, degrees); | |
2425 | ||
2426 | return ret; | |
2427 | } | |
2428 | ||
4dff95dc SB |
2429 | /** |
2430 | * clk_set_phase - adjust the phase shift of a clock signal | |
2431 | * @clk: clock signal source | |
2432 | * @degrees: number of degrees the signal is shifted | |
2433 | * | |
2434 | * Shifts the phase of a clock signal by the specified | |
2435 | * degrees. Returns 0 on success, -EERROR otherwise. | |
2436 | * | |
2437 | * This function makes no distinction about the input or reference | |
2438 | * signal that we adjust the clock signal phase against. For example | |
2439 | * phase locked-loop clock signal generators we may shift phase with | |
2440 | * respect to feedback clock signal input, but for other cases the | |
2441 | * clock phase may be shifted with respect to some other, unspecified | |
2442 | * signal. | |
2443 | * | |
2444 | * Additionally the concept of phase shift does not propagate through | |
2445 | * the clock tree hierarchy, which sets it apart from clock rates and | |
2446 | * clock accuracy. A parent clock phase attribute does not have an | |
2447 | * impact on the phase attribute of a child clock. | |
b2476490 | 2448 | */ |
4dff95dc | 2449 | int clk_set_phase(struct clk *clk, int degrees) |
b2476490 | 2450 | { |
9e4d04ad | 2451 | int ret; |
b2476490 | 2452 | |
4dff95dc SB |
2453 | if (!clk) |
2454 | return 0; | |
b2476490 | 2455 | |
4dff95dc SB |
2456 | /* sanity check degrees */ |
2457 | degrees %= 360; | |
2458 | if (degrees < 0) | |
2459 | degrees += 360; | |
bf47b4fd | 2460 | |
4dff95dc | 2461 | clk_prepare_lock(); |
3fa2252b | 2462 | |
55e9b8b7 JB |
2463 | if (clk->exclusive_count) |
2464 | clk_core_rate_unprotect(clk->core); | |
3fa2252b | 2465 | |
9e4d04ad | 2466 | ret = clk_core_set_phase_nolock(clk->core, degrees); |
3fa2252b | 2467 | |
55e9b8b7 JB |
2468 | if (clk->exclusive_count) |
2469 | clk_core_rate_protect(clk->core); | |
b2476490 | 2470 | |
4dff95dc | 2471 | clk_prepare_unlock(); |
dfc202ea | 2472 | |
4dff95dc SB |
2473 | return ret; |
2474 | } | |
2475 | EXPORT_SYMBOL_GPL(clk_set_phase); | |
b2476490 | 2476 | |
4dff95dc SB |
2477 | static int clk_core_get_phase(struct clk_core *core) |
2478 | { | |
2479 | int ret; | |
b2476490 | 2480 | |
4dff95dc | 2481 | clk_prepare_lock(); |
1f9c63e8 SL |
2482 | /* Always try to update cached phase if possible */ |
2483 | if (core->ops->get_phase) | |
2484 | core->phase = core->ops->get_phase(core->hw); | |
4dff95dc SB |
2485 | ret = core->phase; |
2486 | clk_prepare_unlock(); | |
71472c0c | 2487 | |
4dff95dc | 2488 | return ret; |
b2476490 MT |
2489 | } |
2490 | ||
4dff95dc SB |
2491 | /** |
2492 | * clk_get_phase - return the phase shift of a clock signal | |
2493 | * @clk: clock signal source | |
2494 | * | |
2495 | * Returns the phase shift of a clock node in degrees, otherwise returns | |
2496 | * -EERROR. | |
2497 | */ | |
2498 | int clk_get_phase(struct clk *clk) | |
1c8e6004 | 2499 | { |
4dff95dc | 2500 | if (!clk) |
1c8e6004 TV |
2501 | return 0; |
2502 | ||
4dff95dc SB |
2503 | return clk_core_get_phase(clk->core); |
2504 | } | |
2505 | EXPORT_SYMBOL_GPL(clk_get_phase); | |
1c8e6004 | 2506 | |
9fba738a JB |
2507 | static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) |
2508 | { | |
2509 | /* Assume a default value of 50% */ | |
2510 | core->duty.num = 1; | |
2511 | core->duty.den = 2; | |
2512 | } | |
2513 | ||
2514 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); | |
2515 | ||
2516 | static int clk_core_update_duty_cycle_nolock(struct clk_core *core) | |
2517 | { | |
2518 | struct clk_duty *duty = &core->duty; | |
2519 | int ret = 0; | |
2520 | ||
2521 | if (!core->ops->get_duty_cycle) | |
2522 | return clk_core_update_duty_cycle_parent_nolock(core); | |
2523 | ||
2524 | ret = core->ops->get_duty_cycle(core->hw, duty); | |
2525 | if (ret) | |
2526 | goto reset; | |
2527 | ||
2528 | /* Don't trust the clock provider too much */ | |
2529 | if (duty->den == 0 || duty->num > duty->den) { | |
2530 | ret = -EINVAL; | |
2531 | goto reset; | |
2532 | } | |
2533 | ||
2534 | return 0; | |
2535 | ||
2536 | reset: | |
2537 | clk_core_reset_duty_cycle_nolock(core); | |
2538 | return ret; | |
2539 | } | |
2540 | ||
2541 | static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) | |
2542 | { | |
2543 | int ret = 0; | |
2544 | ||
2545 | if (core->parent && | |
2546 | core->flags & CLK_DUTY_CYCLE_PARENT) { | |
2547 | ret = clk_core_update_duty_cycle_nolock(core->parent); | |
2548 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); | |
2549 | } else { | |
2550 | clk_core_reset_duty_cycle_nolock(core); | |
2551 | } | |
2552 | ||
2553 | return ret; | |
2554 | } | |
2555 | ||
2556 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, | |
2557 | struct clk_duty *duty); | |
2558 | ||
2559 | static int clk_core_set_duty_cycle_nolock(struct clk_core *core, | |
2560 | struct clk_duty *duty) | |
2561 | { | |
2562 | int ret; | |
2563 | ||
2564 | lockdep_assert_held(&prepare_lock); | |
2565 | ||
2566 | if (clk_core_rate_is_protected(core)) | |
2567 | return -EBUSY; | |
2568 | ||
2569 | trace_clk_set_duty_cycle(core, duty); | |
2570 | ||
2571 | if (!core->ops->set_duty_cycle) | |
2572 | return clk_core_set_duty_cycle_parent_nolock(core, duty); | |
2573 | ||
2574 | ret = core->ops->set_duty_cycle(core->hw, duty); | |
2575 | if (!ret) | |
2576 | memcpy(&core->duty, duty, sizeof(*duty)); | |
2577 | ||
2578 | trace_clk_set_duty_cycle_complete(core, duty); | |
2579 | ||
2580 | return ret; | |
2581 | } | |
2582 | ||
2583 | static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, | |
2584 | struct clk_duty *duty) | |
2585 | { | |
2586 | int ret = 0; | |
2587 | ||
2588 | if (core->parent && | |
2589 | core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { | |
2590 | ret = clk_core_set_duty_cycle_nolock(core->parent, duty); | |
2591 | memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); | |
2592 | } | |
2593 | ||
2594 | return ret; | |
2595 | } | |
2596 | ||
2597 | /** | |
2598 | * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal | |
2599 | * @clk: clock signal source | |
2600 | * @num: numerator of the duty cycle ratio to be applied | |
2601 | * @den: denominator of the duty cycle ratio to be applied | |
2602 | * | |
2603 | * Apply the duty cycle ratio if the ratio is valid and the clock can | |
2604 | * perform this operation | |
2605 | * | |
2606 | * Returns (0) on success, a negative errno otherwise. | |
2607 | */ | |
2608 | int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) | |
2609 | { | |
2610 | int ret; | |
2611 | struct clk_duty duty; | |
2612 | ||
2613 | if (!clk) | |
2614 | return 0; | |
2615 | ||
2616 | /* sanity check the ratio */ | |
2617 | if (den == 0 || num > den) | |
2618 | return -EINVAL; | |
2619 | ||
2620 | duty.num = num; | |
2621 | duty.den = den; | |
2622 | ||
2623 | clk_prepare_lock(); | |
2624 | ||
2625 | if (clk->exclusive_count) | |
2626 | clk_core_rate_unprotect(clk->core); | |
2627 | ||
2628 | ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); | |
2629 | ||
2630 | if (clk->exclusive_count) | |
2631 | clk_core_rate_protect(clk->core); | |
2632 | ||
2633 | clk_prepare_unlock(); | |
2634 | ||
2635 | return ret; | |
2636 | } | |
2637 | EXPORT_SYMBOL_GPL(clk_set_duty_cycle); | |
2638 | ||
2639 | static int clk_core_get_scaled_duty_cycle(struct clk_core *core, | |
2640 | unsigned int scale) | |
2641 | { | |
2642 | struct clk_duty *duty = &core->duty; | |
2643 | int ret; | |
2644 | ||
2645 | clk_prepare_lock(); | |
2646 | ||
2647 | ret = clk_core_update_duty_cycle_nolock(core); | |
2648 | if (!ret) | |
2649 | ret = mult_frac(scale, duty->num, duty->den); | |
2650 | ||
2651 | clk_prepare_unlock(); | |
2652 | ||
2653 | return ret; | |
2654 | } | |
2655 | ||
2656 | /** | |
2657 | * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal | |
2658 | * @clk: clock signal source | |
2659 | * @scale: scaling factor to be applied to represent the ratio as an integer | |
2660 | * | |
2661 | * Returns the duty cycle ratio of a clock node multiplied by the provided | |
2662 | * scaling factor, or negative errno on error. | |
2663 | */ | |
2664 | int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) | |
2665 | { | |
2666 | if (!clk) | |
2667 | return 0; | |
2668 | ||
2669 | return clk_core_get_scaled_duty_cycle(clk->core, scale); | |
2670 | } | |
2671 | EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); | |
2672 | ||
4dff95dc SB |
2673 | /** |
2674 | * clk_is_match - check if two clk's point to the same hardware clock | |
2675 | * @p: clk compared against q | |
2676 | * @q: clk compared against p | |
2677 | * | |
2678 | * Returns true if the two struct clk pointers both point to the same hardware | |
2679 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | |
2680 | * share the same struct clk_core object. | |
2681 | * | |
2682 | * Returns false otherwise. Note that two NULL clks are treated as matching. | |
2683 | */ | |
2684 | bool clk_is_match(const struct clk *p, const struct clk *q) | |
2685 | { | |
2686 | /* trivial case: identical struct clk's or both NULL */ | |
2687 | if (p == q) | |
2688 | return true; | |
1c8e6004 | 2689 | |
3fe003f9 | 2690 | /* true if clk->core pointers match. Avoid dereferencing garbage */ |
4dff95dc SB |
2691 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) |
2692 | if (p->core == q->core) | |
2693 | return true; | |
1c8e6004 | 2694 | |
4dff95dc SB |
2695 | return false; |
2696 | } | |
2697 | EXPORT_SYMBOL_GPL(clk_is_match); | |
1c8e6004 | 2698 | |
4dff95dc | 2699 | /*** debugfs support ***/ |
1c8e6004 | 2700 | |
4dff95dc SB |
2701 | #ifdef CONFIG_DEBUG_FS |
2702 | #include <linux/debugfs.h> | |
1c8e6004 | 2703 | |
4dff95dc SB |
2704 | static struct dentry *rootdir; |
2705 | static int inited = 0; | |
2706 | static DEFINE_MUTEX(clk_debug_lock); | |
2707 | static HLIST_HEAD(clk_debug_list); | |
1c8e6004 | 2708 | |
4dff95dc SB |
2709 | static struct hlist_head *all_lists[] = { |
2710 | &clk_root_list, | |
2711 | &clk_orphan_list, | |
2712 | NULL, | |
2713 | }; | |
2714 | ||
2715 | static struct hlist_head *orphan_list[] = { | |
2716 | &clk_orphan_list, | |
2717 | NULL, | |
2718 | }; | |
2719 | ||
2720 | static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, | |
2721 | int level) | |
b2476490 | 2722 | { |
4dff95dc SB |
2723 | if (!c) |
2724 | return; | |
b2476490 | 2725 | |
9fba738a | 2726 | seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", |
4dff95dc SB |
2727 | level * 3 + 1, "", |
2728 | 30 - level * 3, c->name, | |
e55a839a JB |
2729 | c->enable_count, c->prepare_count, c->protect_count, |
2730 | clk_core_get_rate(c), clk_core_get_accuracy(c), | |
9fba738a JB |
2731 | clk_core_get_phase(c), |
2732 | clk_core_get_scaled_duty_cycle(c, 100000)); | |
4dff95dc | 2733 | } |
89ac8d7a | 2734 | |
4dff95dc SB |
2735 | static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, |
2736 | int level) | |
2737 | { | |
2738 | struct clk_core *child; | |
b2476490 | 2739 | |
4dff95dc SB |
2740 | if (!c) |
2741 | return; | |
b2476490 | 2742 | |
4dff95dc | 2743 | clk_summary_show_one(s, c, level); |
0e1c0301 | 2744 | |
4dff95dc SB |
2745 | hlist_for_each_entry(child, &c->children, child_node) |
2746 | clk_summary_show_subtree(s, child, level + 1); | |
1c8e6004 | 2747 | } |
b2476490 | 2748 | |
4dff95dc | 2749 | static int clk_summary_show(struct seq_file *s, void *data) |
1c8e6004 | 2750 | { |
4dff95dc SB |
2751 | struct clk_core *c; |
2752 | struct hlist_head **lists = (struct hlist_head **)s->private; | |
1c8e6004 | 2753 | |
9fba738a JB |
2754 | seq_puts(s, " enable prepare protect duty\n"); |
2755 | seq_puts(s, " clock count count count rate accuracy phase cycle\n"); | |
2756 | seq_puts(s, "---------------------------------------------------------------------------------------------\n"); | |
b2476490 | 2757 | |
1c8e6004 TV |
2758 | clk_prepare_lock(); |
2759 | ||
4dff95dc SB |
2760 | for (; *lists; lists++) |
2761 | hlist_for_each_entry(c, *lists, child_node) | |
2762 | clk_summary_show_subtree(s, c, 0); | |
b2476490 | 2763 | |
eab89f69 | 2764 | clk_prepare_unlock(); |
b2476490 | 2765 | |
4dff95dc | 2766 | return 0; |
b2476490 | 2767 | } |
fec0ef3f | 2768 | DEFINE_SHOW_ATTRIBUTE(clk_summary); |
b2476490 | 2769 | |
4dff95dc SB |
2770 | static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) |
2771 | { | |
2772 | if (!c) | |
2773 | return; | |
b2476490 | 2774 | |
7cb81136 | 2775 | /* This should be JSON format, i.e. elements separated with a comma */ |
4dff95dc SB |
2776 | seq_printf(s, "\"%s\": { ", c->name); |
2777 | seq_printf(s, "\"enable_count\": %d,", c->enable_count); | |
2778 | seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); | |
e55a839a | 2779 | seq_printf(s, "\"protect_count\": %d,", c->protect_count); |
7cb81136 SW |
2780 | seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); |
2781 | seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); | |
4dff95dc | 2782 | seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); |
9fba738a JB |
2783 | seq_printf(s, "\"duty_cycle\": %u", |
2784 | clk_core_get_scaled_duty_cycle(c, 100000)); | |
b2476490 | 2785 | } |
b2476490 | 2786 | |
4dff95dc | 2787 | static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) |
b2476490 | 2788 | { |
4dff95dc | 2789 | struct clk_core *child; |
b2476490 | 2790 | |
4dff95dc SB |
2791 | if (!c) |
2792 | return; | |
b2476490 | 2793 | |
4dff95dc | 2794 | clk_dump_one(s, c, level); |
b2476490 | 2795 | |
4dff95dc | 2796 | hlist_for_each_entry(child, &c->children, child_node) { |
4d327586 | 2797 | seq_putc(s, ','); |
4dff95dc | 2798 | clk_dump_subtree(s, child, level + 1); |
b2476490 MT |
2799 | } |
2800 | ||
4d327586 | 2801 | seq_putc(s, '}'); |
b2476490 MT |
2802 | } |
2803 | ||
fec0ef3f | 2804 | static int clk_dump_show(struct seq_file *s, void *data) |
4e88f3de | 2805 | { |
4dff95dc SB |
2806 | struct clk_core *c; |
2807 | bool first_node = true; | |
2808 | struct hlist_head **lists = (struct hlist_head **)s->private; | |
4e88f3de | 2809 | |
4d327586 | 2810 | seq_putc(s, '{'); |
4dff95dc | 2811 | clk_prepare_lock(); |
035a61c3 | 2812 | |
4dff95dc SB |
2813 | for (; *lists; lists++) { |
2814 | hlist_for_each_entry(c, *lists, child_node) { | |
2815 | if (!first_node) | |
4d327586 | 2816 | seq_putc(s, ','); |
4dff95dc SB |
2817 | first_node = false; |
2818 | clk_dump_subtree(s, c, 0); | |
2819 | } | |
2820 | } | |
4e88f3de | 2821 | |
4dff95dc | 2822 | clk_prepare_unlock(); |
4e88f3de | 2823 | |
70e9f4dd | 2824 | seq_puts(s, "}\n"); |
4dff95dc | 2825 | return 0; |
4e88f3de | 2826 | } |
fec0ef3f | 2827 | DEFINE_SHOW_ATTRIBUTE(clk_dump); |
89ac8d7a | 2828 | |
a6059ab9 GU |
2829 | static const struct { |
2830 | unsigned long flag; | |
2831 | const char *name; | |
2832 | } clk_flags[] = { | |
40dd71c7 | 2833 | #define ENTRY(f) { f, #f } |
a6059ab9 GU |
2834 | ENTRY(CLK_SET_RATE_GATE), |
2835 | ENTRY(CLK_SET_PARENT_GATE), | |
2836 | ENTRY(CLK_SET_RATE_PARENT), | |
2837 | ENTRY(CLK_IGNORE_UNUSED), | |
2838 | ENTRY(CLK_IS_BASIC), | |
2839 | ENTRY(CLK_GET_RATE_NOCACHE), | |
2840 | ENTRY(CLK_SET_RATE_NO_REPARENT), | |
2841 | ENTRY(CLK_GET_ACCURACY_NOCACHE), | |
2842 | ENTRY(CLK_RECALC_NEW_RATES), | |
2843 | ENTRY(CLK_SET_RATE_UNGATE), | |
2844 | ENTRY(CLK_IS_CRITICAL), | |
2845 | ENTRY(CLK_OPS_PARENT_ENABLE), | |
9fba738a | 2846 | ENTRY(CLK_DUTY_CYCLE_PARENT), |
a6059ab9 GU |
2847 | #undef ENTRY |
2848 | }; | |
2849 | ||
fec0ef3f | 2850 | static int clk_flags_show(struct seq_file *s, void *data) |
a6059ab9 GU |
2851 | { |
2852 | struct clk_core *core = s->private; | |
2853 | unsigned long flags = core->flags; | |
2854 | unsigned int i; | |
2855 | ||
2856 | for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) { | |
2857 | if (flags & clk_flags[i].flag) { | |
2858 | seq_printf(s, "%s\n", clk_flags[i].name); | |
2859 | flags &= ~clk_flags[i].flag; | |
2860 | } | |
2861 | } | |
2862 | if (flags) { | |
2863 | /* Unknown flags */ | |
2864 | seq_printf(s, "0x%lx\n", flags); | |
2865 | } | |
2866 | ||
2867 | return 0; | |
2868 | } | |
fec0ef3f | 2869 | DEFINE_SHOW_ATTRIBUTE(clk_flags); |
a6059ab9 | 2870 | |
fec0ef3f | 2871 | static int possible_parents_show(struct seq_file *s, void *data) |
92031575 PDS |
2872 | { |
2873 | struct clk_core *core = s->private; | |
2874 | int i; | |
2875 | ||
2876 | for (i = 0; i < core->num_parents - 1; i++) | |
2877 | seq_printf(s, "%s ", core->parent_names[i]); | |
2878 | ||
2879 | seq_printf(s, "%s\n", core->parent_names[i]); | |
2880 | ||
2881 | return 0; | |
2882 | } | |
fec0ef3f | 2883 | DEFINE_SHOW_ATTRIBUTE(possible_parents); |
92031575 | 2884 | |
9fba738a JB |
2885 | static int clk_duty_cycle_show(struct seq_file *s, void *data) |
2886 | { | |
2887 | struct clk_core *core = s->private; | |
2888 | struct clk_duty *duty = &core->duty; | |
2889 | ||
2890 | seq_printf(s, "%u/%u\n", duty->num, duty->den); | |
2891 | ||
2892 | return 0; | |
2893 | } | |
2894 | DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); | |
2895 | ||
8a26bbbb | 2896 | static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) |
4dff95dc | 2897 | { |
8a26bbbb | 2898 | struct dentry *root; |
b61c43c0 | 2899 | |
8a26bbbb GKH |
2900 | if (!core || !pdentry) |
2901 | return; | |
b2476490 | 2902 | |
8a26bbbb GKH |
2903 | root = debugfs_create_dir(core->name, pdentry); |
2904 | core->dentry = root; | |
92031575 | 2905 | |
8a26bbbb GKH |
2906 | debugfs_create_ulong("clk_rate", 0444, root, &core->rate); |
2907 | debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); | |
2908 | debugfs_create_u32("clk_phase", 0444, root, &core->phase); | |
2909 | debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); | |
2910 | debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); | |
2911 | debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); | |
2912 | debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); | |
2913 | debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); | |
9fba738a JB |
2914 | debugfs_create_file("clk_duty_cycle", 0444, root, core, |
2915 | &clk_duty_cycle_fops); | |
b2476490 | 2916 | |
8a26bbbb GKH |
2917 | if (core->num_parents > 1) |
2918 | debugfs_create_file("clk_possible_parents", 0444, root, core, | |
2919 | &possible_parents_fops); | |
b2476490 | 2920 | |
8a26bbbb GKH |
2921 | if (core->ops->debug_init) |
2922 | core->ops->debug_init(core->hw, core->dentry); | |
b2476490 | 2923 | } |
035a61c3 TV |
2924 | |
2925 | /** | |
6e5ab41b SB |
2926 | * clk_debug_register - add a clk node to the debugfs clk directory |
2927 | * @core: the clk being added to the debugfs clk directory | |
035a61c3 | 2928 | * |
6e5ab41b SB |
2929 | * Dynamically adds a clk to the debugfs clk directory if debugfs has been |
2930 | * initialized. Otherwise it bails out early since the debugfs clk directory | |
4dff95dc | 2931 | * will be created lazily by clk_debug_init as part of a late_initcall. |
035a61c3 | 2932 | */ |
8a26bbbb | 2933 | static void clk_debug_register(struct clk_core *core) |
035a61c3 | 2934 | { |
4dff95dc SB |
2935 | mutex_lock(&clk_debug_lock); |
2936 | hlist_add_head(&core->debug_node, &clk_debug_list); | |
db3188fa | 2937 | if (inited) |
8a26bbbb | 2938 | clk_debug_create_one(core, rootdir); |
4dff95dc | 2939 | mutex_unlock(&clk_debug_lock); |
035a61c3 | 2940 | } |
b2476490 | 2941 | |
4dff95dc | 2942 | /** |
6e5ab41b SB |
2943 | * clk_debug_unregister - remove a clk node from the debugfs clk directory |
2944 | * @core: the clk being removed from the debugfs clk directory | |
e59c5371 | 2945 | * |
6e5ab41b SB |
2946 | * Dynamically removes a clk and all its child nodes from the |
2947 | * debugfs clk directory if clk->dentry points to debugfs created by | |
706d5c73 | 2948 | * clk_debug_register in __clk_core_init. |
e59c5371 | 2949 | */ |
4dff95dc | 2950 | static void clk_debug_unregister(struct clk_core *core) |
e59c5371 | 2951 | { |
4dff95dc SB |
2952 | mutex_lock(&clk_debug_lock); |
2953 | hlist_del_init(&core->debug_node); | |
2954 | debugfs_remove_recursive(core->dentry); | |
2955 | core->dentry = NULL; | |
2956 | mutex_unlock(&clk_debug_lock); | |
2957 | } | |
e59c5371 | 2958 | |
4dff95dc | 2959 | /** |
6e5ab41b | 2960 | * clk_debug_init - lazily populate the debugfs clk directory |
4dff95dc | 2961 | * |
6e5ab41b SB |
2962 | * clks are often initialized very early during boot before memory can be |
2963 | * dynamically allocated and well before debugfs is setup. This function | |
2964 | * populates the debugfs clk directory once at boot-time when we know that | |
2965 | * debugfs is setup. It should only be called once at boot-time, all other clks | |
2966 | * added dynamically will be done so with clk_debug_register. | |
4dff95dc SB |
2967 | */ |
2968 | static int __init clk_debug_init(void) | |
2969 | { | |
2970 | struct clk_core *core; | |
dfc202ea | 2971 | |
4dff95dc | 2972 | rootdir = debugfs_create_dir("clk", NULL); |
e59c5371 | 2973 | |
8a26bbbb GKH |
2974 | debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, |
2975 | &clk_summary_fops); | |
2976 | debugfs_create_file("clk_dump", 0444, rootdir, &all_lists, | |
2977 | &clk_dump_fops); | |
2978 | debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list, | |
2979 | &clk_summary_fops); | |
2980 | debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list, | |
2981 | &clk_dump_fops); | |
e59c5371 | 2982 | |
4dff95dc SB |
2983 | mutex_lock(&clk_debug_lock); |
2984 | hlist_for_each_entry(core, &clk_debug_list, debug_node) | |
2985 | clk_debug_create_one(core, rootdir); | |
e59c5371 | 2986 | |
4dff95dc SB |
2987 | inited = 1; |
2988 | mutex_unlock(&clk_debug_lock); | |
e59c5371 | 2989 | |
4dff95dc SB |
2990 | return 0; |
2991 | } | |
2992 | late_initcall(clk_debug_init); | |
2993 | #else | |
8a26bbbb | 2994 | static inline void clk_debug_register(struct clk_core *core) { } |
4dff95dc SB |
2995 | static inline void clk_debug_reparent(struct clk_core *core, |
2996 | struct clk_core *new_parent) | |
035a61c3 | 2997 | { |
035a61c3 | 2998 | } |
4dff95dc | 2999 | static inline void clk_debug_unregister(struct clk_core *core) |
3d3801ef | 3000 | { |
3d3801ef | 3001 | } |
4dff95dc | 3002 | #endif |
3d3801ef | 3003 | |
b2476490 | 3004 | /** |
be45ebf2 | 3005 | * __clk_core_init - initialize the data structures in a struct clk_core |
d35c80c2 | 3006 | * @core: clk_core being initialized |
b2476490 | 3007 | * |
035a61c3 | 3008 | * Initializes the lists in struct clk_core, queries the hardware for the |
b2476490 | 3009 | * parent and rate and sets them both. |
b2476490 | 3010 | */ |
be45ebf2 | 3011 | static int __clk_core_init(struct clk_core *core) |
b2476490 | 3012 | { |
9a34b453 | 3013 | int i, ret; |
035a61c3 | 3014 | struct clk_core *orphan; |
b67bfe0d | 3015 | struct hlist_node *tmp2; |
1c8e6004 | 3016 | unsigned long rate; |
b2476490 | 3017 | |
d35c80c2 | 3018 | if (!core) |
d1302a36 | 3019 | return -EINVAL; |
b2476490 | 3020 | |
eab89f69 | 3021 | clk_prepare_lock(); |
b2476490 | 3022 | |
9a34b453 MS |
3023 | ret = clk_pm_runtime_get(core); |
3024 | if (ret) | |
3025 | goto unlock; | |
3026 | ||
b2476490 | 3027 | /* check to see if a clock with this name is already registered */ |
d6968fca | 3028 | if (clk_core_lookup(core->name)) { |
d1302a36 | 3029 | pr_debug("%s: clk %s already initialized\n", |
d6968fca | 3030 | __func__, core->name); |
d1302a36 | 3031 | ret = -EEXIST; |
b2476490 | 3032 | goto out; |
d1302a36 | 3033 | } |
b2476490 | 3034 | |
5fb94e9c | 3035 | /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ |
d6968fca SB |
3036 | if (core->ops->set_rate && |
3037 | !((core->ops->round_rate || core->ops->determine_rate) && | |
3038 | core->ops->recalc_rate)) { | |
c44fccb5 MY |
3039 | pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", |
3040 | __func__, core->name); | |
d1302a36 | 3041 | ret = -EINVAL; |
d4d7e3dd MT |
3042 | goto out; |
3043 | } | |
3044 | ||
d6968fca | 3045 | if (core->ops->set_parent && !core->ops->get_parent) { |
c44fccb5 MY |
3046 | pr_err("%s: %s must implement .get_parent & .set_parent\n", |
3047 | __func__, core->name); | |
d1302a36 | 3048 | ret = -EINVAL; |
d4d7e3dd MT |
3049 | goto out; |
3050 | } | |
3051 | ||
3c8e77dd MY |
3052 | if (core->num_parents > 1 && !core->ops->get_parent) { |
3053 | pr_err("%s: %s must implement .get_parent as it has multi parents\n", | |
3054 | __func__, core->name); | |
3055 | ret = -EINVAL; | |
3056 | goto out; | |
3057 | } | |
3058 | ||
d6968fca SB |
3059 | if (core->ops->set_rate_and_parent && |
3060 | !(core->ops->set_parent && core->ops->set_rate)) { | |
c44fccb5 | 3061 | pr_err("%s: %s must implement .set_parent & .set_rate\n", |
d6968fca | 3062 | __func__, core->name); |
3fa2252b SB |
3063 | ret = -EINVAL; |
3064 | goto out; | |
3065 | } | |
3066 | ||
b2476490 | 3067 | /* throw a WARN if any entries in parent_names are NULL */ |
d6968fca SB |
3068 | for (i = 0; i < core->num_parents; i++) |
3069 | WARN(!core->parent_names[i], | |
b2476490 | 3070 | "%s: invalid NULL in %s's .parent_names\n", |
d6968fca | 3071 | __func__, core->name); |
b2476490 | 3072 | |
d6968fca | 3073 | core->parent = __clk_init_parent(core); |
b2476490 MT |
3074 | |
3075 | /* | |
706d5c73 SB |
3076 | * Populate core->parent if parent has already been clk_core_init'd. If |
3077 | * parent has not yet been clk_core_init'd then place clk in the orphan | |
47b0eeb3 | 3078 | * list. If clk doesn't have any parents then place it in the root |
b2476490 MT |
3079 | * clk list. |
3080 | * | |
3081 | * Every time a new clk is clk_init'd then we walk the list of orphan | |
3082 | * clocks and re-parent any that are children of the clock currently | |
3083 | * being clk_init'd. | |
3084 | */ | |
e6500344 | 3085 | if (core->parent) { |
d6968fca SB |
3086 | hlist_add_head(&core->child_node, |
3087 | &core->parent->children); | |
e6500344 | 3088 | core->orphan = core->parent->orphan; |
47b0eeb3 | 3089 | } else if (!core->num_parents) { |
d6968fca | 3090 | hlist_add_head(&core->child_node, &clk_root_list); |
e6500344 HS |
3091 | core->orphan = false; |
3092 | } else { | |
d6968fca | 3093 | hlist_add_head(&core->child_node, &clk_orphan_list); |
e6500344 HS |
3094 | core->orphan = true; |
3095 | } | |
b2476490 | 3096 | |
541debae JB |
3097 | /* |
3098 | * optional platform-specific magic | |
3099 | * | |
3100 | * The .init callback is not used by any of the basic clock types, but | |
3101 | * exists for weird hardware that must perform initialization magic. | |
3102 | * Please consider other ways of solving initialization problems before | |
3103 | * using this callback, as its use is discouraged. | |
3104 | */ | |
3105 | if (core->ops->init) | |
3106 | core->ops->init(core->hw); | |
3107 | ||
5279fc40 BB |
3108 | /* |
3109 | * Set clk's accuracy. The preferred method is to use | |
3110 | * .recalc_accuracy. For simple clocks and lazy developers the default | |
3111 | * fallback is to use the parent's accuracy. If a clock doesn't have a | |
3112 | * parent (or is orphaned) then accuracy is set to zero (perfect | |
3113 | * clock). | |
3114 | */ | |
d6968fca SB |
3115 | if (core->ops->recalc_accuracy) |
3116 | core->accuracy = core->ops->recalc_accuracy(core->hw, | |
3117 | __clk_get_accuracy(core->parent)); | |
3118 | else if (core->parent) | |
3119 | core->accuracy = core->parent->accuracy; | |
5279fc40 | 3120 | else |
d6968fca | 3121 | core->accuracy = 0; |
5279fc40 | 3122 | |
9824cf73 MR |
3123 | /* |
3124 | * Set clk's phase. | |
3125 | * Since a phase is by definition relative to its parent, just | |
3126 | * query the current clock phase, or just assume it's in phase. | |
3127 | */ | |
d6968fca SB |
3128 | if (core->ops->get_phase) |
3129 | core->phase = core->ops->get_phase(core->hw); | |
9824cf73 | 3130 | else |
d6968fca | 3131 | core->phase = 0; |
9824cf73 | 3132 | |
9fba738a JB |
3133 | /* |
3134 | * Set clk's duty cycle. | |
3135 | */ | |
3136 | clk_core_update_duty_cycle_nolock(core); | |
3137 | ||
b2476490 MT |
3138 | /* |
3139 | * Set clk's rate. The preferred method is to use .recalc_rate. For | |
3140 | * simple clocks and lazy developers the default fallback is to use the | |
3141 | * parent's rate. If a clock doesn't have a parent (or is orphaned) | |
3142 | * then rate is set to zero. | |
3143 | */ | |
d6968fca SB |
3144 | if (core->ops->recalc_rate) |
3145 | rate = core->ops->recalc_rate(core->hw, | |
3146 | clk_core_get_rate_nolock(core->parent)); | |
3147 | else if (core->parent) | |
3148 | rate = core->parent->rate; | |
b2476490 | 3149 | else |
1c8e6004 | 3150 | rate = 0; |
d6968fca | 3151 | core->rate = core->req_rate = rate; |
b2476490 | 3152 | |
99652a46 JB |
3153 | /* |
3154 | * Enable CLK_IS_CRITICAL clocks so newly added critical clocks | |
3155 | * don't get accidentally disabled when walking the orphan tree and | |
3156 | * reparenting clocks | |
3157 | */ | |
3158 | if (core->flags & CLK_IS_CRITICAL) { | |
3159 | unsigned long flags; | |
3160 | ||
3161 | clk_core_prepare(core); | |
3162 | ||
3163 | flags = clk_enable_lock(); | |
3164 | clk_core_enable(core); | |
3165 | clk_enable_unlock(flags); | |
3166 | } | |
3167 | ||
b2476490 | 3168 | /* |
0e8f6e49 MY |
3169 | * walk the list of orphan clocks and reparent any that newly finds a |
3170 | * parent. | |
b2476490 | 3171 | */ |
b67bfe0d | 3172 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { |
0e8f6e49 | 3173 | struct clk_core *parent = __clk_init_parent(orphan); |
1f61e5f1 | 3174 | |
904e6ead | 3175 | /* |
99652a46 JB |
3176 | * We need to use __clk_set_parent_before() and _after() to |
3177 | * to properly migrate any prepare/enable count of the orphan | |
3178 | * clock. This is important for CLK_IS_CRITICAL clocks, which | |
3179 | * are enabled during init but might not have a parent yet. | |
904e6ead MT |
3180 | */ |
3181 | if (parent) { | |
f8f8f1d0 | 3182 | /* update the clk tree topology */ |
99652a46 JB |
3183 | __clk_set_parent_before(orphan, parent); |
3184 | __clk_set_parent_after(orphan, parent, NULL); | |
904e6ead MT |
3185 | __clk_recalc_accuracies(orphan); |
3186 | __clk_recalc_rates(orphan, 0); | |
3187 | } | |
0e8f6e49 | 3188 | } |
b2476490 | 3189 | |
d6968fca | 3190 | kref_init(&core->ref); |
b2476490 | 3191 | out: |
9a34b453 MS |
3192 | clk_pm_runtime_put(core); |
3193 | unlock: | |
eab89f69 | 3194 | clk_prepare_unlock(); |
b2476490 | 3195 | |
89f7e9de | 3196 | if (!ret) |
d6968fca | 3197 | clk_debug_register(core); |
89f7e9de | 3198 | |
d1302a36 | 3199 | return ret; |
b2476490 MT |
3200 | } |
3201 | ||
035a61c3 TV |
3202 | struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, |
3203 | const char *con_id) | |
0197b3ea | 3204 | { |
0197b3ea SK |
3205 | struct clk *clk; |
3206 | ||
035a61c3 | 3207 | /* This is to allow this function to be chained to others */ |
c1de1357 | 3208 | if (IS_ERR_OR_NULL(hw)) |
8a23133c | 3209 | return ERR_CAST(hw); |
0197b3ea | 3210 | |
035a61c3 TV |
3211 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); |
3212 | if (!clk) | |
3213 | return ERR_PTR(-ENOMEM); | |
3214 | ||
3215 | clk->core = hw->core; | |
3216 | clk->dev_id = dev_id; | |
253160a8 | 3217 | clk->con_id = kstrdup_const(con_id, GFP_KERNEL); |
1c8e6004 TV |
3218 | clk->max_rate = ULONG_MAX; |
3219 | ||
3220 | clk_prepare_lock(); | |
50595f8b | 3221 | hlist_add_head(&clk->clks_node, &hw->core->clks); |
1c8e6004 | 3222 | clk_prepare_unlock(); |
0197b3ea SK |
3223 | |
3224 | return clk; | |
3225 | } | |
035a61c3 | 3226 | |
365f7a89 | 3227 | /* keep in sync with __clk_put */ |
73e0e496 | 3228 | void __clk_free_clk(struct clk *clk) |
1c8e6004 TV |
3229 | { |
3230 | clk_prepare_lock(); | |
50595f8b | 3231 | hlist_del(&clk->clks_node); |
1c8e6004 TV |
3232 | clk_prepare_unlock(); |
3233 | ||
253160a8 | 3234 | kfree_const(clk->con_id); |
1c8e6004 TV |
3235 | kfree(clk); |
3236 | } | |
0197b3ea | 3237 | |
293ba3b4 SB |
3238 | /** |
3239 | * clk_register - allocate a new clock, register it and return an opaque cookie | |
3240 | * @dev: device that is registering this clock | |
3241 | * @hw: link to hardware-specific clock data | |
3242 | * | |
3243 | * clk_register is the primary interface for populating the clock tree with new | |
3244 | * clock nodes. It returns a pointer to the newly allocated struct clk which | |
a59a5163 | 3245 | * cannot be dereferenced by driver code but may be used in conjunction with the |
293ba3b4 SB |
3246 | * rest of the clock API. In the event of an error clk_register will return an |
3247 | * error code; drivers must test for an error code after calling clk_register. | |
3248 | */ | |
3249 | struct clk *clk_register(struct device *dev, struct clk_hw *hw) | |
b2476490 | 3250 | { |
d1302a36 | 3251 | int i, ret; |
d6968fca | 3252 | struct clk_core *core; |
293ba3b4 | 3253 | |
d6968fca SB |
3254 | core = kzalloc(sizeof(*core), GFP_KERNEL); |
3255 | if (!core) { | |
293ba3b4 SB |
3256 | ret = -ENOMEM; |
3257 | goto fail_out; | |
3258 | } | |
b2476490 | 3259 | |
d6968fca SB |
3260 | core->name = kstrdup_const(hw->init->name, GFP_KERNEL); |
3261 | if (!core->name) { | |
0197b3ea SK |
3262 | ret = -ENOMEM; |
3263 | goto fail_name; | |
3264 | } | |
29fd2a34 JB |
3265 | |
3266 | if (WARN_ON(!hw->init->ops)) { | |
3267 | ret = -EINVAL; | |
3268 | goto fail_ops; | |
3269 | } | |
d6968fca | 3270 | core->ops = hw->init->ops; |
29fd2a34 | 3271 | |
9a34b453 MS |
3272 | if (dev && pm_runtime_enabled(dev)) |
3273 | core->dev = dev; | |
ac2df527 | 3274 | if (dev && dev->driver) |
d6968fca SB |
3275 | core->owner = dev->driver->owner; |
3276 | core->hw = hw; | |
3277 | core->flags = hw->init->flags; | |
3278 | core->num_parents = hw->init->num_parents; | |
9783c0d9 SB |
3279 | core->min_rate = 0; |
3280 | core->max_rate = ULONG_MAX; | |
d6968fca | 3281 | hw->core = core; |
b2476490 | 3282 | |
d1302a36 | 3283 | /* allocate local copy in case parent_names is __initdata */ |
d6968fca | 3284 | core->parent_names = kcalloc(core->num_parents, sizeof(char *), |
96a7ed90 | 3285 | GFP_KERNEL); |
d1302a36 | 3286 | |
d6968fca | 3287 | if (!core->parent_names) { |
d1302a36 MT |
3288 | ret = -ENOMEM; |
3289 | goto fail_parent_names; | |
3290 | } | |
3291 | ||
3292 | ||
3293 | /* copy each string name in case parent_names is __initdata */ | |
d6968fca SB |
3294 | for (i = 0; i < core->num_parents; i++) { |
3295 | core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], | |
0197b3ea | 3296 | GFP_KERNEL); |
d6968fca | 3297 | if (!core->parent_names[i]) { |
d1302a36 MT |
3298 | ret = -ENOMEM; |
3299 | goto fail_parent_names_copy; | |
3300 | } | |
3301 | } | |
3302 | ||
176d1169 MY |
3303 | /* avoid unnecessary string look-ups of clk_core's possible parents. */ |
3304 | core->parents = kcalloc(core->num_parents, sizeof(*core->parents), | |
3305 | GFP_KERNEL); | |
3306 | if (!core->parents) { | |
3307 | ret = -ENOMEM; | |
3308 | goto fail_parents; | |
3309 | }; | |
3310 | ||
d6968fca | 3311 | INIT_HLIST_HEAD(&core->clks); |
1c8e6004 | 3312 | |
035a61c3 TV |
3313 | hw->clk = __clk_create_clk(hw, NULL, NULL); |
3314 | if (IS_ERR(hw->clk)) { | |
035a61c3 | 3315 | ret = PTR_ERR(hw->clk); |
176d1169 | 3316 | goto fail_parents; |
035a61c3 TV |
3317 | } |
3318 | ||
be45ebf2 | 3319 | ret = __clk_core_init(core); |
d1302a36 | 3320 | if (!ret) |
035a61c3 | 3321 | return hw->clk; |
b2476490 | 3322 | |
1c8e6004 | 3323 | __clk_free_clk(hw->clk); |
035a61c3 | 3324 | hw->clk = NULL; |
b2476490 | 3325 | |
176d1169 MY |
3326 | fail_parents: |
3327 | kfree(core->parents); | |
d1302a36 MT |
3328 | fail_parent_names_copy: |
3329 | while (--i >= 0) | |
d6968fca SB |
3330 | kfree_const(core->parent_names[i]); |
3331 | kfree(core->parent_names); | |
d1302a36 | 3332 | fail_parent_names: |
29fd2a34 | 3333 | fail_ops: |
d6968fca | 3334 | kfree_const(core->name); |
0197b3ea | 3335 | fail_name: |
d6968fca | 3336 | kfree(core); |
d1302a36 MT |
3337 | fail_out: |
3338 | return ERR_PTR(ret); | |
b2476490 MT |
3339 | } |
3340 | EXPORT_SYMBOL_GPL(clk_register); | |
3341 | ||
4143804c SB |
3342 | /** |
3343 | * clk_hw_register - register a clk_hw and return an error code | |
3344 | * @dev: device that is registering this clock | |
3345 | * @hw: link to hardware-specific clock data | |
3346 | * | |
3347 | * clk_hw_register is the primary interface for populating the clock tree with | |
3348 | * new clock nodes. It returns an integer equal to zero indicating success or | |
3349 | * less than zero indicating failure. Drivers must test for an error code after | |
3350 | * calling clk_hw_register(). | |
3351 | */ | |
3352 | int clk_hw_register(struct device *dev, struct clk_hw *hw) | |
3353 | { | |
3354 | return PTR_ERR_OR_ZERO(clk_register(dev, hw)); | |
3355 | } | |
3356 | EXPORT_SYMBOL_GPL(clk_hw_register); | |
3357 | ||
6e5ab41b | 3358 | /* Free memory allocated for a clock. */ |
fcb0ee6a SN |
3359 | static void __clk_release(struct kref *ref) |
3360 | { | |
d6968fca SB |
3361 | struct clk_core *core = container_of(ref, struct clk_core, ref); |
3362 | int i = core->num_parents; | |
fcb0ee6a | 3363 | |
496eadf8 KK |
3364 | lockdep_assert_held(&prepare_lock); |
3365 | ||
d6968fca | 3366 | kfree(core->parents); |
fcb0ee6a | 3367 | while (--i >= 0) |
d6968fca | 3368 | kfree_const(core->parent_names[i]); |
fcb0ee6a | 3369 | |
d6968fca SB |
3370 | kfree(core->parent_names); |
3371 | kfree_const(core->name); | |
3372 | kfree(core); | |
fcb0ee6a SN |
3373 | } |
3374 | ||
3375 | /* | |
3376 | * Empty clk_ops for unregistered clocks. These are used temporarily | |
3377 | * after clk_unregister() was called on a clock and until last clock | |
3378 | * consumer calls clk_put() and the struct clk object is freed. | |
3379 | */ | |
3380 | static int clk_nodrv_prepare_enable(struct clk_hw *hw) | |
3381 | { | |
3382 | return -ENXIO; | |
3383 | } | |
3384 | ||
3385 | static void clk_nodrv_disable_unprepare(struct clk_hw *hw) | |
3386 | { | |
3387 | WARN_ON_ONCE(1); | |
3388 | } | |
3389 | ||
3390 | static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, | |
3391 | unsigned long parent_rate) | |
3392 | { | |
3393 | return -ENXIO; | |
3394 | } | |
3395 | ||
3396 | static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) | |
3397 | { | |
3398 | return -ENXIO; | |
3399 | } | |
3400 | ||
3401 | static const struct clk_ops clk_nodrv_ops = { | |
3402 | .enable = clk_nodrv_prepare_enable, | |
3403 | .disable = clk_nodrv_disable_unprepare, | |
3404 | .prepare = clk_nodrv_prepare_enable, | |
3405 | .unprepare = clk_nodrv_disable_unprepare, | |
3406 | .set_rate = clk_nodrv_set_rate, | |
3407 | .set_parent = clk_nodrv_set_parent, | |
3408 | }; | |
3409 | ||
1df5c939 MB |
3410 | /** |
3411 | * clk_unregister - unregister a currently registered clock | |
3412 | * @clk: clock to unregister | |
1df5c939 | 3413 | */ |
fcb0ee6a SN |
3414 | void clk_unregister(struct clk *clk) |
3415 | { | |
3416 | unsigned long flags; | |
3417 | ||
6314b679 SB |
3418 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
3419 | return; | |
3420 | ||
035a61c3 | 3421 | clk_debug_unregister(clk->core); |
fcb0ee6a SN |
3422 | |
3423 | clk_prepare_lock(); | |
3424 | ||
035a61c3 TV |
3425 | if (clk->core->ops == &clk_nodrv_ops) { |
3426 | pr_err("%s: unregistered clock: %s\n", __func__, | |
3427 | clk->core->name); | |
4106a3d9 | 3428 | goto unlock; |
fcb0ee6a SN |
3429 | } |
3430 | /* | |
3431 | * Assign empty clock ops for consumers that might still hold | |
3432 | * a reference to this clock. | |
3433 | */ | |
3434 | flags = clk_enable_lock(); | |
035a61c3 | 3435 | clk->core->ops = &clk_nodrv_ops; |
fcb0ee6a SN |
3436 | clk_enable_unlock(flags); |
3437 | ||
035a61c3 TV |
3438 | if (!hlist_empty(&clk->core->children)) { |
3439 | struct clk_core *child; | |
874f224c | 3440 | struct hlist_node *t; |
fcb0ee6a SN |
3441 | |
3442 | /* Reparent all children to the orphan list. */ | |
035a61c3 TV |
3443 | hlist_for_each_entry_safe(child, t, &clk->core->children, |
3444 | child_node) | |
91baa9ff | 3445 | clk_core_set_parent_nolock(child, NULL); |
fcb0ee6a SN |
3446 | } |
3447 | ||
035a61c3 | 3448 | hlist_del_init(&clk->core->child_node); |
fcb0ee6a | 3449 | |
035a61c3 | 3450 | if (clk->core->prepare_count) |
fcb0ee6a | 3451 | pr_warn("%s: unregistering prepared clock: %s\n", |
035a61c3 | 3452 | __func__, clk->core->name); |
e55a839a JB |
3453 | |
3454 | if (clk->core->protect_count) | |
3455 | pr_warn("%s: unregistering protected clock: %s\n", | |
3456 | __func__, clk->core->name); | |
3457 | ||
035a61c3 | 3458 | kref_put(&clk->core->ref, __clk_release); |
4106a3d9 | 3459 | unlock: |
fcb0ee6a SN |
3460 | clk_prepare_unlock(); |
3461 | } | |
1df5c939 MB |
3462 | EXPORT_SYMBOL_GPL(clk_unregister); |
3463 | ||
4143804c SB |
3464 | /** |
3465 | * clk_hw_unregister - unregister a currently registered clk_hw | |
3466 | * @hw: hardware-specific clock data to unregister | |
3467 | */ | |
3468 | void clk_hw_unregister(struct clk_hw *hw) | |
3469 | { | |
3470 | clk_unregister(hw->clk); | |
3471 | } | |
3472 | EXPORT_SYMBOL_GPL(clk_hw_unregister); | |
3473 | ||
46c8773a SB |
3474 | static void devm_clk_release(struct device *dev, void *res) |
3475 | { | |
293ba3b4 | 3476 | clk_unregister(*(struct clk **)res); |
46c8773a SB |
3477 | } |
3478 | ||
4143804c SB |
3479 | static void devm_clk_hw_release(struct device *dev, void *res) |
3480 | { | |
3481 | clk_hw_unregister(*(struct clk_hw **)res); | |
3482 | } | |
3483 | ||
46c8773a SB |
3484 | /** |
3485 | * devm_clk_register - resource managed clk_register() | |
3486 | * @dev: device that is registering this clock | |
3487 | * @hw: link to hardware-specific clock data | |
3488 | * | |
3489 | * Managed clk_register(). Clocks returned from this function are | |
3490 | * automatically clk_unregister()ed on driver detach. See clk_register() for | |
3491 | * more information. | |
3492 | */ | |
3493 | struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) | |
3494 | { | |
3495 | struct clk *clk; | |
293ba3b4 | 3496 | struct clk **clkp; |
46c8773a | 3497 | |
293ba3b4 SB |
3498 | clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); |
3499 | if (!clkp) | |
46c8773a SB |
3500 | return ERR_PTR(-ENOMEM); |
3501 | ||
293ba3b4 SB |
3502 | clk = clk_register(dev, hw); |
3503 | if (!IS_ERR(clk)) { | |
3504 | *clkp = clk; | |
3505 | devres_add(dev, clkp); | |
46c8773a | 3506 | } else { |
293ba3b4 | 3507 | devres_free(clkp); |
46c8773a SB |
3508 | } |
3509 | ||
3510 | return clk; | |
3511 | } | |
3512 | EXPORT_SYMBOL_GPL(devm_clk_register); | |
3513 | ||
4143804c SB |
3514 | /** |
3515 | * devm_clk_hw_register - resource managed clk_hw_register() | |
3516 | * @dev: device that is registering this clock | |
3517 | * @hw: link to hardware-specific clock data | |
3518 | * | |
c47265ad | 3519 | * Managed clk_hw_register(). Clocks registered by this function are |
4143804c SB |
3520 | * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register() |
3521 | * for more information. | |
3522 | */ | |
3523 | int devm_clk_hw_register(struct device *dev, struct clk_hw *hw) | |
3524 | { | |
3525 | struct clk_hw **hwp; | |
3526 | int ret; | |
3527 | ||
3528 | hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL); | |
3529 | if (!hwp) | |
3530 | return -ENOMEM; | |
3531 | ||
3532 | ret = clk_hw_register(dev, hw); | |
3533 | if (!ret) { | |
3534 | *hwp = hw; | |
3535 | devres_add(dev, hwp); | |
3536 | } else { | |
3537 | devres_free(hwp); | |
3538 | } | |
3539 | ||
3540 | return ret; | |
3541 | } | |
3542 | EXPORT_SYMBOL_GPL(devm_clk_hw_register); | |
3543 | ||
46c8773a SB |
3544 | static int devm_clk_match(struct device *dev, void *res, void *data) |
3545 | { | |
3546 | struct clk *c = res; | |
3547 | if (WARN_ON(!c)) | |
3548 | return 0; | |
3549 | return c == data; | |
3550 | } | |
3551 | ||
4143804c SB |
3552 | static int devm_clk_hw_match(struct device *dev, void *res, void *data) |
3553 | { | |
3554 | struct clk_hw *hw = res; | |
3555 | ||
3556 | if (WARN_ON(!hw)) | |
3557 | return 0; | |
3558 | return hw == data; | |
3559 | } | |
3560 | ||
46c8773a SB |
3561 | /** |
3562 | * devm_clk_unregister - resource managed clk_unregister() | |
3563 | * @clk: clock to unregister | |
3564 | * | |
3565 | * Deallocate a clock allocated with devm_clk_register(). Normally | |
3566 | * this function will not need to be called and the resource management | |
3567 | * code will ensure that the resource is freed. | |
3568 | */ | |
3569 | void devm_clk_unregister(struct device *dev, struct clk *clk) | |
3570 | { | |
3571 | WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); | |
3572 | } | |
3573 | EXPORT_SYMBOL_GPL(devm_clk_unregister); | |
3574 | ||
4143804c SB |
3575 | /** |
3576 | * devm_clk_hw_unregister - resource managed clk_hw_unregister() | |
3577 | * @dev: device that is unregistering the hardware-specific clock data | |
3578 | * @hw: link to hardware-specific clock data | |
3579 | * | |
3580 | * Unregister a clk_hw registered with devm_clk_hw_register(). Normally | |
3581 | * this function will not need to be called and the resource management | |
3582 | * code will ensure that the resource is freed. | |
3583 | */ | |
3584 | void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw) | |
3585 | { | |
3586 | WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match, | |
3587 | hw)); | |
3588 | } | |
3589 | EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); | |
3590 | ||
ac2df527 SN |
3591 | /* |
3592 | * clkdev helpers | |
3593 | */ | |
3594 | int __clk_get(struct clk *clk) | |
3595 | { | |
035a61c3 TV |
3596 | struct clk_core *core = !clk ? NULL : clk->core; |
3597 | ||
3598 | if (core) { | |
3599 | if (!try_module_get(core->owner)) | |
00efcb1c | 3600 | return 0; |
ac2df527 | 3601 | |
035a61c3 | 3602 | kref_get(&core->ref); |
00efcb1c | 3603 | } |
ac2df527 SN |
3604 | return 1; |
3605 | } | |
3606 | ||
365f7a89 | 3607 | /* keep in sync with __clk_free_clk */ |
ac2df527 SN |
3608 | void __clk_put(struct clk *clk) |
3609 | { | |
10cdfe54 TV |
3610 | struct module *owner; |
3611 | ||
00efcb1c | 3612 | if (!clk || WARN_ON_ONCE(IS_ERR(clk))) |
ac2df527 SN |
3613 | return; |
3614 | ||
fcb0ee6a | 3615 | clk_prepare_lock(); |
1c8e6004 | 3616 | |
55e9b8b7 JB |
3617 | /* |
3618 | * Before calling clk_put, all calls to clk_rate_exclusive_get() from a | |
3619 | * given user should be balanced with calls to clk_rate_exclusive_put() | |
3620 | * and by that same consumer | |
3621 | */ | |
3622 | if (WARN_ON(clk->exclusive_count)) { | |
3623 | /* We voiced our concern, let's sanitize the situation */ | |
3624 | clk->core->protect_count -= (clk->exclusive_count - 1); | |
3625 | clk_core_rate_unprotect(clk->core); | |
3626 | clk->exclusive_count = 0; | |
3627 | } | |
3628 | ||
50595f8b | 3629 | hlist_del(&clk->clks_node); |
ec02ace8 TV |
3630 | if (clk->min_rate > clk->core->req_rate || |
3631 | clk->max_rate < clk->core->req_rate) | |
3632 | clk_core_set_rate_nolock(clk->core, clk->core->req_rate); | |
3633 | ||
1c8e6004 TV |
3634 | owner = clk->core->owner; |
3635 | kref_put(&clk->core->ref, __clk_release); | |
3636 | ||
fcb0ee6a SN |
3637 | clk_prepare_unlock(); |
3638 | ||
10cdfe54 | 3639 | module_put(owner); |
035a61c3 | 3640 | |
365f7a89 | 3641 | kfree_const(clk->con_id); |
035a61c3 | 3642 | kfree(clk); |
ac2df527 SN |
3643 | } |
3644 | ||
b2476490 MT |
3645 | /*** clk rate change notifiers ***/ |
3646 | ||
3647 | /** | |
3648 | * clk_notifier_register - add a clk rate change notifier | |
3649 | * @clk: struct clk * to watch | |
3650 | * @nb: struct notifier_block * with callback info | |
3651 | * | |
3652 | * Request notification when clk's rate changes. This uses an SRCU | |
3653 | * notifier because we want it to block and notifier unregistrations are | |
3654 | * uncommon. The callbacks associated with the notifier must not | |
3655 | * re-enter into the clk framework by calling any top-level clk APIs; | |
3656 | * this will cause a nested prepare_lock mutex. | |
3657 | * | |
198bb594 MY |
3658 | * In all notification cases (pre, post and abort rate change) the original |
3659 | * clock rate is passed to the callback via struct clk_notifier_data.old_rate | |
3660 | * and the new frequency is passed via struct clk_notifier_data.new_rate. | |
b2476490 | 3661 | * |
b2476490 MT |
3662 | * clk_notifier_register() must be called from non-atomic context. |
3663 | * Returns -EINVAL if called with null arguments, -ENOMEM upon | |
3664 | * allocation failure; otherwise, passes along the return value of | |
3665 | * srcu_notifier_chain_register(). | |
3666 | */ | |
3667 | int clk_notifier_register(struct clk *clk, struct notifier_block *nb) | |
3668 | { | |
3669 | struct clk_notifier *cn; | |
3670 | int ret = -ENOMEM; | |
3671 | ||
3672 | if (!clk || !nb) | |
3673 | return -EINVAL; | |
3674 | ||
eab89f69 | 3675 | clk_prepare_lock(); |
b2476490 MT |
3676 | |
3677 | /* search the list of notifiers for this clk */ | |
3678 | list_for_each_entry(cn, &clk_notifier_list, node) | |
3679 | if (cn->clk == clk) | |
3680 | break; | |
3681 | ||
3682 | /* if clk wasn't in the notifier list, allocate new clk_notifier */ | |
3683 | if (cn->clk != clk) { | |
1808a320 | 3684 | cn = kzalloc(sizeof(*cn), GFP_KERNEL); |
b2476490 MT |
3685 | if (!cn) |
3686 | goto out; | |
3687 | ||
3688 | cn->clk = clk; | |
3689 | srcu_init_notifier_head(&cn->notifier_head); | |
3690 | ||
3691 | list_add(&cn->node, &clk_notifier_list); | |
3692 | } | |
3693 | ||
3694 | ret = srcu_notifier_chain_register(&cn->notifier_head, nb); | |
3695 | ||
035a61c3 | 3696 | clk->core->notifier_count++; |
b2476490 MT |
3697 | |
3698 | out: | |
eab89f69 | 3699 | clk_prepare_unlock(); |
b2476490 MT |
3700 | |
3701 | return ret; | |
3702 | } | |
3703 | EXPORT_SYMBOL_GPL(clk_notifier_register); | |
3704 | ||
3705 | /** | |
3706 | * clk_notifier_unregister - remove a clk rate change notifier | |
3707 | * @clk: struct clk * | |
3708 | * @nb: struct notifier_block * with callback info | |
3709 | * | |
3710 | * Request no further notification for changes to 'clk' and frees memory | |
3711 | * allocated in clk_notifier_register. | |
3712 | * | |
3713 | * Returns -EINVAL if called with null arguments; otherwise, passes | |
3714 | * along the return value of srcu_notifier_chain_unregister(). | |
3715 | */ | |
3716 | int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) | |
3717 | { | |
3718 | struct clk_notifier *cn = NULL; | |
3719 | int ret = -EINVAL; | |
3720 | ||
3721 | if (!clk || !nb) | |
3722 | return -EINVAL; | |
3723 | ||
eab89f69 | 3724 | clk_prepare_lock(); |
b2476490 MT |
3725 | |
3726 | list_for_each_entry(cn, &clk_notifier_list, node) | |
3727 | if (cn->clk == clk) | |
3728 | break; | |
3729 | ||
3730 | if (cn->clk == clk) { | |
3731 | ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); | |
3732 | ||
035a61c3 | 3733 | clk->core->notifier_count--; |
b2476490 MT |
3734 | |
3735 | /* XXX the notifier code should handle this better */ | |
3736 | if (!cn->notifier_head.head) { | |
3737 | srcu_cleanup_notifier_head(&cn->notifier_head); | |
72b5322f | 3738 | list_del(&cn->node); |
b2476490 MT |
3739 | kfree(cn); |
3740 | } | |
3741 | ||
3742 | } else { | |
3743 | ret = -ENOENT; | |
3744 | } | |
3745 | ||
eab89f69 | 3746 | clk_prepare_unlock(); |
b2476490 MT |
3747 | |
3748 | return ret; | |
3749 | } | |
3750 | EXPORT_SYMBOL_GPL(clk_notifier_unregister); | |
766e6a4e GL |
3751 | |
3752 | #ifdef CONFIG_OF | |
3753 | /** | |
3754 | * struct of_clk_provider - Clock provider registration structure | |
3755 | * @link: Entry in global list of clock providers | |
3756 | * @node: Pointer to device tree node of clock provider | |
3757 | * @get: Get clock callback. Returns NULL or a struct clk for the | |
3758 | * given clock specifier | |
3759 | * @data: context pointer to be passed into @get callback | |
3760 | */ | |
3761 | struct of_clk_provider { | |
3762 | struct list_head link; | |
3763 | ||
3764 | struct device_node *node; | |
3765 | struct clk *(*get)(struct of_phandle_args *clkspec, void *data); | |
0861e5b8 | 3766 | struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data); |
766e6a4e GL |
3767 | void *data; |
3768 | }; | |
3769 | ||
f2f6c255 PG |
3770 | static const struct of_device_id __clk_of_table_sentinel |
3771 | __used __section(__clk_of_table_end); | |
3772 | ||
766e6a4e | 3773 | static LIST_HEAD(of_clk_providers); |
d6782c26 SN |
3774 | static DEFINE_MUTEX(of_clk_mutex); |
3775 | ||
766e6a4e GL |
3776 | struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, |
3777 | void *data) | |
3778 | { | |
3779 | return data; | |
3780 | } | |
3781 | EXPORT_SYMBOL_GPL(of_clk_src_simple_get); | |
3782 | ||
0861e5b8 SB |
3783 | struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data) |
3784 | { | |
3785 | return data; | |
3786 | } | |
3787 | EXPORT_SYMBOL_GPL(of_clk_hw_simple_get); | |
3788 | ||
494bfec9 SG |
3789 | struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) |
3790 | { | |
3791 | struct clk_onecell_data *clk_data = data; | |
3792 | unsigned int idx = clkspec->args[0]; | |
3793 | ||
3794 | if (idx >= clk_data->clk_num) { | |
7e96353c | 3795 | pr_err("%s: invalid clock index %u\n", __func__, idx); |
494bfec9 SG |
3796 | return ERR_PTR(-EINVAL); |
3797 | } | |
3798 | ||
3799 | return clk_data->clks[idx]; | |
3800 | } | |
3801 | EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); | |
3802 | ||
0861e5b8 SB |
3803 | struct clk_hw * |
3804 | of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data) | |
3805 | { | |
3806 | struct clk_hw_onecell_data *hw_data = data; | |
3807 | unsigned int idx = clkspec->args[0]; | |
3808 | ||
3809 | if (idx >= hw_data->num) { | |
3810 | pr_err("%s: invalid index %u\n", __func__, idx); | |
3811 | return ERR_PTR(-EINVAL); | |
3812 | } | |
3813 | ||
3814 | return hw_data->hws[idx]; | |
3815 | } | |
3816 | EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get); | |
3817 | ||
766e6a4e GL |
3818 | /** |
3819 | * of_clk_add_provider() - Register a clock provider for a node | |
3820 | * @np: Device node pointer associated with clock provider | |
3821 | * @clk_src_get: callback for decoding clock | |
3822 | * @data: context pointer for @clk_src_get callback. | |
3823 | */ | |
3824 | int of_clk_add_provider(struct device_node *np, | |
3825 | struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, | |
3826 | void *data), | |
3827 | void *data) | |
3828 | { | |
3829 | struct of_clk_provider *cp; | |
86be408b | 3830 | int ret; |
766e6a4e | 3831 | |
1808a320 | 3832 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
766e6a4e GL |
3833 | if (!cp) |
3834 | return -ENOMEM; | |
3835 | ||
3836 | cp->node = of_node_get(np); | |
3837 | cp->data = data; | |
3838 | cp->get = clk_src_get; | |
3839 | ||
d6782c26 | 3840 | mutex_lock(&of_clk_mutex); |
766e6a4e | 3841 | list_add(&cp->link, &of_clk_providers); |
d6782c26 | 3842 | mutex_unlock(&of_clk_mutex); |
16673931 | 3843 | pr_debug("Added clock from %pOF\n", np); |
766e6a4e | 3844 | |
86be408b SN |
3845 | ret = of_clk_set_defaults(np, true); |
3846 | if (ret < 0) | |
3847 | of_clk_del_provider(np); | |
3848 | ||
3849 | return ret; | |
766e6a4e GL |
3850 | } |
3851 | EXPORT_SYMBOL_GPL(of_clk_add_provider); | |
3852 | ||
0861e5b8 SB |
3853 | /** |
3854 | * of_clk_add_hw_provider() - Register a clock provider for a node | |
3855 | * @np: Device node pointer associated with clock provider | |
3856 | * @get: callback for decoding clk_hw | |
3857 | * @data: context pointer for @get callback. | |
3858 | */ | |
3859 | int of_clk_add_hw_provider(struct device_node *np, | |
3860 | struct clk_hw *(*get)(struct of_phandle_args *clkspec, | |
3861 | void *data), | |
3862 | void *data) | |
3863 | { | |
3864 | struct of_clk_provider *cp; | |
3865 | int ret; | |
3866 | ||
3867 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | |
3868 | if (!cp) | |
3869 | return -ENOMEM; | |
3870 | ||
3871 | cp->node = of_node_get(np); | |
3872 | cp->data = data; | |
3873 | cp->get_hw = get; | |
3874 | ||
3875 | mutex_lock(&of_clk_mutex); | |
3876 | list_add(&cp->link, &of_clk_providers); | |
3877 | mutex_unlock(&of_clk_mutex); | |
16673931 | 3878 | pr_debug("Added clk_hw provider from %pOF\n", np); |
0861e5b8 SB |
3879 | |
3880 | ret = of_clk_set_defaults(np, true); | |
3881 | if (ret < 0) | |
3882 | of_clk_del_provider(np); | |
3883 | ||
3884 | return ret; | |
3885 | } | |
3886 | EXPORT_SYMBOL_GPL(of_clk_add_hw_provider); | |
3887 | ||
aa795c41 SB |
3888 | static void devm_of_clk_release_provider(struct device *dev, void *res) |
3889 | { | |
3890 | of_clk_del_provider(*(struct device_node **)res); | |
3891 | } | |
3892 | ||
05502bf9 MV |
3893 | /* |
3894 | * We allow a child device to use its parent device as the clock provider node | |
3895 | * for cases like MFD sub-devices where the child device driver wants to use | |
3896 | * devm_*() APIs but not list the device in DT as a sub-node. | |
3897 | */ | |
3898 | static struct device_node *get_clk_provider_node(struct device *dev) | |
3899 | { | |
3900 | struct device_node *np, *parent_np; | |
3901 | ||
3902 | np = dev->of_node; | |
3903 | parent_np = dev->parent ? dev->parent->of_node : NULL; | |
3904 | ||
3905 | if (!of_find_property(np, "#clock-cells", NULL)) | |
3906 | if (of_find_property(parent_np, "#clock-cells", NULL)) | |
3907 | np = parent_np; | |
3908 | ||
3909 | return np; | |
3910 | } | |
3911 | ||
e45838b5 MV |
3912 | /** |
3913 | * devm_of_clk_add_hw_provider() - Managed clk provider node registration | |
3914 | * @dev: Device acting as the clock provider (used for DT node and lifetime) | |
3915 | * @get: callback for decoding clk_hw | |
3916 | * @data: context pointer for @get callback | |
3917 | * | |
05502bf9 MV |
3918 | * Registers clock provider for given device's node. If the device has no DT |
3919 | * node or if the device node lacks of clock provider information (#clock-cells) | |
3920 | * then the parent device's node is scanned for this information. If parent node | |
3921 | * has the #clock-cells then it is used in registration. Provider is | |
3922 | * automatically released at device exit. | |
e45838b5 MV |
3923 | * |
3924 | * Return: 0 on success or an errno on failure. | |
3925 | */ | |
aa795c41 SB |
3926 | int devm_of_clk_add_hw_provider(struct device *dev, |
3927 | struct clk_hw *(*get)(struct of_phandle_args *clkspec, | |
3928 | void *data), | |
3929 | void *data) | |
3930 | { | |
3931 | struct device_node **ptr, *np; | |
3932 | int ret; | |
3933 | ||
3934 | ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr), | |
3935 | GFP_KERNEL); | |
3936 | if (!ptr) | |
3937 | return -ENOMEM; | |
3938 | ||
05502bf9 | 3939 | np = get_clk_provider_node(dev); |
aa795c41 SB |
3940 | ret = of_clk_add_hw_provider(np, get, data); |
3941 | if (!ret) { | |
3942 | *ptr = np; | |
3943 | devres_add(dev, ptr); | |
3944 | } else { | |
3945 | devres_free(ptr); | |
3946 | } | |
3947 | ||
3948 | return ret; | |
3949 | } | |
3950 | EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider); | |
3951 | ||
766e6a4e GL |
3952 | /** |
3953 | * of_clk_del_provider() - Remove a previously registered clock provider | |
3954 | * @np: Device node pointer associated with clock provider | |
3955 | */ | |
3956 | void of_clk_del_provider(struct device_node *np) | |
3957 | { | |
3958 | struct of_clk_provider *cp; | |
3959 | ||
d6782c26 | 3960 | mutex_lock(&of_clk_mutex); |
766e6a4e GL |
3961 | list_for_each_entry(cp, &of_clk_providers, link) { |
3962 | if (cp->node == np) { | |
3963 | list_del(&cp->link); | |
3964 | of_node_put(cp->node); | |
3965 | kfree(cp); | |
3966 | break; | |
3967 | } | |
3968 | } | |
d6782c26 | 3969 | mutex_unlock(&of_clk_mutex); |
766e6a4e GL |
3970 | } |
3971 | EXPORT_SYMBOL_GPL(of_clk_del_provider); | |
3972 | ||
aa795c41 SB |
3973 | static int devm_clk_provider_match(struct device *dev, void *res, void *data) |
3974 | { | |
3975 | struct device_node **np = res; | |
3976 | ||
3977 | if (WARN_ON(!np || !*np)) | |
3978 | return 0; | |
3979 | ||
3980 | return *np == data; | |
3981 | } | |
3982 | ||
e45838b5 MV |
3983 | /** |
3984 | * devm_of_clk_del_provider() - Remove clock provider registered using devm | |
3985 | * @dev: Device to whose lifetime the clock provider was bound | |
3986 | */ | |
aa795c41 SB |
3987 | void devm_of_clk_del_provider(struct device *dev) |
3988 | { | |
3989 | int ret; | |
05502bf9 | 3990 | struct device_node *np = get_clk_provider_node(dev); |
aa795c41 SB |
3991 | |
3992 | ret = devres_release(dev, devm_of_clk_release_provider, | |
05502bf9 | 3993 | devm_clk_provider_match, np); |
aa795c41 SB |
3994 | |
3995 | WARN_ON(ret); | |
3996 | } | |
3997 | EXPORT_SYMBOL(devm_of_clk_del_provider); | |
3998 | ||
0861e5b8 SB |
3999 | static struct clk_hw * |
4000 | __of_clk_get_hw_from_provider(struct of_clk_provider *provider, | |
4001 | struct of_phandle_args *clkspec) | |
4002 | { | |
4003 | struct clk *clk; | |
0861e5b8 | 4004 | |
74002fcd SB |
4005 | if (provider->get_hw) |
4006 | return provider->get_hw(clkspec, provider->data); | |
0861e5b8 | 4007 | |
74002fcd SB |
4008 | clk = provider->get(clkspec, provider->data); |
4009 | if (IS_ERR(clk)) | |
4010 | return ERR_CAST(clk); | |
4011 | return __clk_get_hw(clk); | |
0861e5b8 SB |
4012 | } |
4013 | ||
73e0e496 SB |
4014 | struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, |
4015 | const char *dev_id, const char *con_id) | |
766e6a4e GL |
4016 | { |
4017 | struct of_clk_provider *provider; | |
a34cd466 | 4018 | struct clk *clk = ERR_PTR(-EPROBE_DEFER); |
f155d15b | 4019 | struct clk_hw *hw; |
766e6a4e | 4020 | |
306c342f SB |
4021 | if (!clkspec) |
4022 | return ERR_PTR(-EINVAL); | |
4023 | ||
766e6a4e | 4024 | /* Check if we have such a provider in our array */ |
306c342f | 4025 | mutex_lock(&of_clk_mutex); |
766e6a4e | 4026 | list_for_each_entry(provider, &of_clk_providers, link) { |
f155d15b | 4027 | if (provider->node == clkspec->np) { |
0861e5b8 | 4028 | hw = __of_clk_get_hw_from_provider(provider, clkspec); |
0861e5b8 | 4029 | clk = __clk_create_clk(hw, dev_id, con_id); |
f155d15b | 4030 | } |
73e0e496 | 4031 | |
f155d15b SB |
4032 | if (!IS_ERR(clk)) { |
4033 | if (!__clk_get(clk)) { | |
73e0e496 SB |
4034 | __clk_free_clk(clk); |
4035 | clk = ERR_PTR(-ENOENT); | |
4036 | } | |
4037 | ||
766e6a4e | 4038 | break; |
73e0e496 | 4039 | } |
766e6a4e | 4040 | } |
306c342f | 4041 | mutex_unlock(&of_clk_mutex); |
d6782c26 SN |
4042 | |
4043 | return clk; | |
4044 | } | |
4045 | ||
306c342f SB |
4046 | /** |
4047 | * of_clk_get_from_provider() - Lookup a clock from a clock provider | |
4048 | * @clkspec: pointer to a clock specifier data structure | |
4049 | * | |
4050 | * This function looks up a struct clk from the registered list of clock | |
4051 | * providers, an input is a clock specifier data structure as returned | |
4052 | * from the of_parse_phandle_with_args() function call. | |
4053 | */ | |
d6782c26 SN |
4054 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) |
4055 | { | |
306c342f | 4056 | return __of_clk_get_from_provider(clkspec, NULL, __func__); |
766e6a4e | 4057 | } |
fb4dd222 | 4058 | EXPORT_SYMBOL_GPL(of_clk_get_from_provider); |
766e6a4e | 4059 | |
929e7f3b SB |
4060 | /** |
4061 | * of_clk_get_parent_count() - Count the number of clocks a device node has | |
4062 | * @np: device node to count | |
4063 | * | |
4064 | * Returns: The number of clocks that are possible parents of this node | |
4065 | */ | |
4066 | unsigned int of_clk_get_parent_count(struct device_node *np) | |
f6102742 | 4067 | { |
929e7f3b SB |
4068 | int count; |
4069 | ||
4070 | count = of_count_phandle_with_args(np, "clocks", "#clock-cells"); | |
4071 | if (count < 0) | |
4072 | return 0; | |
4073 | ||
4074 | return count; | |
f6102742 MT |
4075 | } |
4076 | EXPORT_SYMBOL_GPL(of_clk_get_parent_count); | |
4077 | ||
766e6a4e GL |
4078 | const char *of_clk_get_parent_name(struct device_node *np, int index) |
4079 | { | |
4080 | struct of_phandle_args clkspec; | |
7a0fc1a3 | 4081 | struct property *prop; |
766e6a4e | 4082 | const char *clk_name; |
7a0fc1a3 BD |
4083 | const __be32 *vp; |
4084 | u32 pv; | |
766e6a4e | 4085 | int rc; |
7a0fc1a3 | 4086 | int count; |
0a4807c2 | 4087 | struct clk *clk; |
766e6a4e | 4088 | |
766e6a4e GL |
4089 | rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, |
4090 | &clkspec); | |
4091 | if (rc) | |
4092 | return NULL; | |
4093 | ||
7a0fc1a3 BD |
4094 | index = clkspec.args_count ? clkspec.args[0] : 0; |
4095 | count = 0; | |
4096 | ||
4097 | /* if there is an indices property, use it to transfer the index | |
4098 | * specified into an array offset for the clock-output-names property. | |
4099 | */ | |
4100 | of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { | |
4101 | if (index == pv) { | |
4102 | index = count; | |
4103 | break; | |
4104 | } | |
4105 | count++; | |
4106 | } | |
8da411cc MY |
4107 | /* We went off the end of 'clock-indices' without finding it */ |
4108 | if (prop && !vp) | |
4109 | return NULL; | |
7a0fc1a3 | 4110 | |
766e6a4e | 4111 | if (of_property_read_string_index(clkspec.np, "clock-output-names", |
7a0fc1a3 | 4112 | index, |
0a4807c2 SB |
4113 | &clk_name) < 0) { |
4114 | /* | |
4115 | * Best effort to get the name if the clock has been | |
4116 | * registered with the framework. If the clock isn't | |
4117 | * registered, we return the node name as the name of | |
4118 | * the clock as long as #clock-cells = 0. | |
4119 | */ | |
4120 | clk = of_clk_get_from_provider(&clkspec); | |
4121 | if (IS_ERR(clk)) { | |
4122 | if (clkspec.args_count == 0) | |
4123 | clk_name = clkspec.np->name; | |
4124 | else | |
4125 | clk_name = NULL; | |
4126 | } else { | |
4127 | clk_name = __clk_get_name(clk); | |
4128 | clk_put(clk); | |
4129 | } | |
4130 | } | |
4131 | ||
766e6a4e GL |
4132 | |
4133 | of_node_put(clkspec.np); | |
4134 | return clk_name; | |
4135 | } | |
4136 | EXPORT_SYMBOL_GPL(of_clk_get_parent_name); | |
4137 | ||
2e61dfb3 DN |
4138 | /** |
4139 | * of_clk_parent_fill() - Fill @parents with names of @np's parents and return | |
4140 | * number of parents | |
4141 | * @np: Device node pointer associated with clock provider | |
4142 | * @parents: pointer to char array that hold the parents' names | |
4143 | * @size: size of the @parents array | |
4144 | * | |
4145 | * Return: number of parents for the clock node. | |
4146 | */ | |
4147 | int of_clk_parent_fill(struct device_node *np, const char **parents, | |
4148 | unsigned int size) | |
4149 | { | |
4150 | unsigned int i = 0; | |
4151 | ||
4152 | while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) | |
4153 | i++; | |
4154 | ||
4155 | return i; | |
4156 | } | |
4157 | EXPORT_SYMBOL_GPL(of_clk_parent_fill); | |
4158 | ||
1771b10d | 4159 | struct clock_provider { |
a5970433 | 4160 | void (*clk_init_cb)(struct device_node *); |
1771b10d GC |
4161 | struct device_node *np; |
4162 | struct list_head node; | |
4163 | }; | |
4164 | ||
1771b10d GC |
4165 | /* |
4166 | * This function looks for a parent clock. If there is one, then it | |
4167 | * checks that the provider for this parent clock was initialized, in | |
4168 | * this case the parent clock will be ready. | |
4169 | */ | |
4170 | static int parent_ready(struct device_node *np) | |
4171 | { | |
4172 | int i = 0; | |
4173 | ||
4174 | while (true) { | |
4175 | struct clk *clk = of_clk_get(np, i); | |
4176 | ||
4177 | /* this parent is ready we can check the next one */ | |
4178 | if (!IS_ERR(clk)) { | |
4179 | clk_put(clk); | |
4180 | i++; | |
4181 | continue; | |
4182 | } | |
4183 | ||
4184 | /* at least one parent is not ready, we exit now */ | |
4185 | if (PTR_ERR(clk) == -EPROBE_DEFER) | |
4186 | return 0; | |
4187 | ||
4188 | /* | |
4189 | * Here we make assumption that the device tree is | |
4190 | * written correctly. So an error means that there is | |
4191 | * no more parent. As we didn't exit yet, then the | |
4192 | * previous parent are ready. If there is no clock | |
4193 | * parent, no need to wait for them, then we can | |
4194 | * consider their absence as being ready | |
4195 | */ | |
4196 | return 1; | |
4197 | } | |
4198 | } | |
4199 | ||
d56f8994 LJ |
4200 | /** |
4201 | * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree | |
4202 | * @np: Device node pointer associated with clock provider | |
4203 | * @index: clock index | |
f7ae7503 | 4204 | * @flags: pointer to top-level framework flags |
d56f8994 LJ |
4205 | * |
4206 | * Detects if the clock-critical property exists and, if so, sets the | |
4207 | * corresponding CLK_IS_CRITICAL flag. | |
4208 | * | |
4209 | * Do not use this function. It exists only for legacy Device Tree | |
4210 | * bindings, such as the one-clock-per-node style that are outdated. | |
4211 | * Those bindings typically put all clock data into .dts and the Linux | |
4212 | * driver has no clock data, thus making it impossible to set this flag | |
4213 | * correctly from the driver. Only those drivers may call | |
4214 | * of_clk_detect_critical from their setup functions. | |
4215 | * | |
4216 | * Return: error code or zero on success | |
4217 | */ | |
4218 | int of_clk_detect_critical(struct device_node *np, | |
4219 | int index, unsigned long *flags) | |
4220 | { | |
4221 | struct property *prop; | |
4222 | const __be32 *cur; | |
4223 | uint32_t idx; | |
4224 | ||
4225 | if (!np || !flags) | |
4226 | return -EINVAL; | |
4227 | ||
4228 | of_property_for_each_u32(np, "clock-critical", prop, cur, idx) | |
4229 | if (index == idx) | |
4230 | *flags |= CLK_IS_CRITICAL; | |
4231 | ||
4232 | return 0; | |
4233 | } | |
4234 | ||
766e6a4e GL |
4235 | /** |
4236 | * of_clk_init() - Scan and init clock providers from the DT | |
4237 | * @matches: array of compatible values and init functions for providers. | |
4238 | * | |
1771b10d | 4239 | * This function scans the device tree for matching clock providers |
e5ca8fb4 | 4240 | * and calls their initialization functions. It also does it by trying |
1771b10d | 4241 | * to follow the dependencies. |
766e6a4e GL |
4242 | */ |
4243 | void __init of_clk_init(const struct of_device_id *matches) | |
4244 | { | |
7f7ed584 | 4245 | const struct of_device_id *match; |
766e6a4e | 4246 | struct device_node *np; |
1771b10d GC |
4247 | struct clock_provider *clk_provider, *next; |
4248 | bool is_init_done; | |
4249 | bool force = false; | |
2573a02a | 4250 | LIST_HEAD(clk_provider_list); |
766e6a4e | 4251 | |
f2f6c255 | 4252 | if (!matches) |
819b4861 | 4253 | matches = &__clk_of_table; |
f2f6c255 | 4254 | |
1771b10d | 4255 | /* First prepare the list of the clocks providers */ |
7f7ed584 | 4256 | for_each_matching_node_and_match(np, matches, &match) { |
2e3b19f1 SB |
4257 | struct clock_provider *parent; |
4258 | ||
3e5dd6f6 GU |
4259 | if (!of_device_is_available(np)) |
4260 | continue; | |
4261 | ||
2e3b19f1 SB |
4262 | parent = kzalloc(sizeof(*parent), GFP_KERNEL); |
4263 | if (!parent) { | |
4264 | list_for_each_entry_safe(clk_provider, next, | |
4265 | &clk_provider_list, node) { | |
4266 | list_del(&clk_provider->node); | |
6bc9d9d6 | 4267 | of_node_put(clk_provider->np); |
2e3b19f1 SB |
4268 | kfree(clk_provider); |
4269 | } | |
6bc9d9d6 | 4270 | of_node_put(np); |
2e3b19f1 SB |
4271 | return; |
4272 | } | |
1771b10d GC |
4273 | |
4274 | parent->clk_init_cb = match->data; | |
6bc9d9d6 | 4275 | parent->np = of_node_get(np); |
3f6d439f | 4276 | list_add_tail(&parent->node, &clk_provider_list); |
1771b10d GC |
4277 | } |
4278 | ||
4279 | while (!list_empty(&clk_provider_list)) { | |
4280 | is_init_done = false; | |
4281 | list_for_each_entry_safe(clk_provider, next, | |
4282 | &clk_provider_list, node) { | |
4283 | if (force || parent_ready(clk_provider->np)) { | |
86be408b | 4284 | |
989eafd0 RRD |
4285 | /* Don't populate platform devices */ |
4286 | of_node_set_flag(clk_provider->np, | |
4287 | OF_POPULATED); | |
4288 | ||
1771b10d | 4289 | clk_provider->clk_init_cb(clk_provider->np); |
86be408b SN |
4290 | of_clk_set_defaults(clk_provider->np, true); |
4291 | ||
1771b10d | 4292 | list_del(&clk_provider->node); |
6bc9d9d6 | 4293 | of_node_put(clk_provider->np); |
1771b10d GC |
4294 | kfree(clk_provider); |
4295 | is_init_done = true; | |
4296 | } | |
4297 | } | |
4298 | ||
4299 | /* | |
e5ca8fb4 | 4300 | * We didn't manage to initialize any of the |
1771b10d GC |
4301 | * remaining providers during the last loop, so now we |
4302 | * initialize all the remaining ones unconditionally | |
4303 | * in case the clock parent was not mandatory | |
4304 | */ | |
4305 | if (!is_init_done) | |
4306 | force = true; | |
766e6a4e GL |
4307 | } |
4308 | } | |
4309 | #endif |