1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Power Interface (SCMI) Protocol based clock driver
5 * Copyright (C) 2018-2022 ARM Ltd.
8 #include <linux/clk-provider.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
12 #include <linux/module.h>
13 #include <linux/scmi_protocol.h>
14 #include <asm/div64.h>
16 #define NOT_ATOMIC false
19 static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
25 const struct scmi_clock_info *info;
26 const struct scmi_protocol_handle *ph;
27 struct clk_parent_data *parent_data;
30 #define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
32 static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
33 unsigned long parent_rate)
37 struct scmi_clk *clk = to_scmi_clk(hw);
39 ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
45 static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
46 unsigned long *parent_rate)
49 struct scmi_clk *clk = to_scmi_clk(hw);
52 * We can't figure out what rate it will be, so just return the
53 * rate back to the caller. scmi_clk_recalc_rate() will be called
54 * after the rate is set and we'll know what rate the clock is
57 if (clk->info->rate_discrete)
60 fmin = clk->info->range.min_rate;
61 fmax = clk->info->range.max_rate;
64 else if (rate >= fmax)
68 ftmp += clk->info->range.step_size - 1; /* to round up */
69 do_div(ftmp, clk->info->range.step_size);
71 return ftmp * clk->info->range.step_size + fmin;
74 static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
75 unsigned long parent_rate)
77 struct scmi_clk *clk = to_scmi_clk(hw);
79 return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
82 static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index)
84 struct scmi_clk *clk = to_scmi_clk(hw);
86 return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index);
89 static u8 scmi_clk_get_parent(struct clk_hw *hw)
91 struct scmi_clk *clk = to_scmi_clk(hw);
95 ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id);
99 for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) {
100 if (clk->parent_data[p_idx].index == parent_id)
104 if (p_idx == clk->info->num_parents)
110 static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
113 * Suppose all the requested rates are supported, and let firmware
114 * to handle the left work.
119 static int scmi_clk_enable(struct clk_hw *hw)
121 struct scmi_clk *clk = to_scmi_clk(hw);
123 return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC);
126 static void scmi_clk_disable(struct clk_hw *hw)
128 struct scmi_clk *clk = to_scmi_clk(hw);
130 scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC);
133 static int scmi_clk_atomic_enable(struct clk_hw *hw)
135 struct scmi_clk *clk = to_scmi_clk(hw);
137 return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC);
140 static void scmi_clk_atomic_disable(struct clk_hw *hw)
142 struct scmi_clk *clk = to_scmi_clk(hw);
144 scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
147 static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
150 bool enabled = false;
151 struct scmi_clk *clk = to_scmi_clk(hw);
153 ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
156 "Failed to get state for clock ID %d\n", clk->id);
162 * We can provide enable/disable/is_enabled atomic callbacks only if the
163 * underlying SCMI transport for an SCMI instance is configured to handle
164 * SCMI commands in an atomic manner.
166 * When no SCMI atomic transport support is available we instead provide only
167 * the prepare/unprepare API, as allowed by the clock framework when atomic
168 * calls are not available.
170 * Two distinct sets of clk_ops are provided since we could have multiple SCMI
171 * instances with different underlying transport quality, so they cannot be
174 static const struct clk_ops scmi_clk_ops = {
175 .recalc_rate = scmi_clk_recalc_rate,
176 .round_rate = scmi_clk_round_rate,
177 .set_rate = scmi_clk_set_rate,
178 .prepare = scmi_clk_enable,
179 .unprepare = scmi_clk_disable,
180 .set_parent = scmi_clk_set_parent,
181 .get_parent = scmi_clk_get_parent,
182 .determine_rate = scmi_clk_determine_rate,
185 static const struct clk_ops scmi_atomic_clk_ops = {
186 .recalc_rate = scmi_clk_recalc_rate,
187 .round_rate = scmi_clk_round_rate,
188 .set_rate = scmi_clk_set_rate,
189 .enable = scmi_clk_atomic_enable,
190 .disable = scmi_clk_atomic_disable,
191 .is_enabled = scmi_clk_atomic_is_enabled,
192 .set_parent = scmi_clk_set_parent,
193 .get_parent = scmi_clk_get_parent,
194 .determine_rate = scmi_clk_determine_rate,
197 static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
198 const struct clk_ops *scmi_ops)
201 unsigned long min_rate, max_rate;
203 struct clk_init_data init = {
204 .flags = CLK_GET_RATE_NOCACHE,
205 .num_parents = sclk->info->num_parents,
207 .name = sclk->info->name,
208 .parent_data = sclk->parent_data,
211 sclk->hw.init = &init;
212 ret = devm_clk_hw_register(dev, &sclk->hw);
216 if (sclk->info->rate_discrete) {
217 int num_rates = sclk->info->list.num_rates;
222 min_rate = sclk->info->list.rates[0];
223 max_rate = sclk->info->list.rates[num_rates - 1];
225 min_rate = sclk->info->range.min_rate;
226 max_rate = sclk->info->range.max_rate;
229 clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
233 static int scmi_clocks_probe(struct scmi_device *sdev)
236 unsigned int atomic_threshold;
239 struct clk_hw_onecell_data *clk_data;
240 struct device *dev = &sdev->dev;
241 struct device_node *np = dev->of_node;
242 const struct scmi_handle *handle = sdev->handle;
243 struct scmi_protocol_handle *ph;
249 handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
250 if (IS_ERR(scmi_proto_clk_ops))
251 return PTR_ERR(scmi_proto_clk_ops);
253 count = scmi_proto_clk_ops->count_get(ph);
255 dev_err(dev, "%pOFn: invalid clock output count\n", np);
259 clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
264 clk_data->num = count;
267 is_atomic = handle->is_transport_atomic(handle, &atomic_threshold);
269 for (idx = 0; idx < count; idx++) {
270 struct scmi_clk *sclk;
271 const struct clk_ops *scmi_ops;
273 sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
277 sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
279 dev_dbg(dev, "invalid clock info for idx %d\n", idx);
280 devm_kfree(dev, sclk);
289 * Note that when transport is atomic but SCMI protocol did not
290 * specify (or support) an enable_latency associated with a
291 * clock, we default to use atomic operations mode.
294 sclk->info->enable_latency <= atomic_threshold)
295 scmi_ops = &scmi_atomic_clk_ops;
297 scmi_ops = &scmi_clk_ops;
299 /* Initialize clock parent data. */
300 if (sclk->info->num_parents > 0) {
301 sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents,
302 sizeof(*sclk->parent_data), GFP_KERNEL);
303 if (!sclk->parent_data)
306 for (int i = 0; i < sclk->info->num_parents; i++) {
307 sclk->parent_data[i].index = sclk->info->parents[i];
308 sclk->parent_data[i].hw = hws[sclk->info->parents[i]];
312 err = scmi_clk_ops_init(dev, sclk, scmi_ops);
314 dev_err(dev, "failed to register clock %d\n", idx);
315 devm_kfree(dev, sclk->parent_data);
316 devm_kfree(dev, sclk);
319 dev_dbg(dev, "Registered clock:%s%s\n",
321 scmi_ops == &scmi_atomic_clk_ops ?
322 " (atomic ops)" : "");
323 hws[idx] = &sclk->hw;
327 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
331 static const struct scmi_device_id scmi_id_table[] = {
332 { SCMI_PROTOCOL_CLOCK, "clocks" },
335 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
337 static struct scmi_driver scmi_clocks_driver = {
338 .name = "scmi-clocks",
339 .probe = scmi_clocks_probe,
340 .id_table = scmi_id_table,
342 module_scmi_driver(scmi_clocks_driver);
344 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
345 MODULE_DESCRIPTION("ARM SCMI clock driver");
346 MODULE_LICENSE("GPL v2");