Commit | Line | Data |
---|---|---|
ba764c4d AE |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. | |
4c7ccfcd | 4 | * Copyright (C) 2018-2021 Linaro Ltd. |
ba764c4d AE |
5 | */ |
6 | ||
ba764c4d AE |
7 | #include <linux/clk.h> |
8 | #include <linux/device.h> | |
9 | #include <linux/interconnect.h> | |
73ff316d | 10 | #include <linux/pm.h> |
2abb0c7f | 11 | #include <linux/pm_runtime.h> |
73ff316d | 12 | #include <linux/bitops.h> |
ba764c4d | 13 | |
34a08176 AE |
14 | #include "linux/soc/qcom/qcom_aoss.h" |
15 | ||
ba764c4d | 16 | #include "ipa.h" |
2775cbc5 | 17 | #include "ipa_power.h" |
73ff316d | 18 | #include "ipa_endpoint.h" |
ba764c4d | 19 | #include "ipa_modem.h" |
dfccb8b1 | 20 | #include "ipa_data.h" |
ba764c4d AE |
21 | |
22 | /** | |
7aa0e8b8 | 23 | * DOC: IPA Power Management |
ba764c4d | 24 | * |
7aa0e8b8 AE |
25 | * The IPA hardware is enabled when the IPA core clock and all the |
26 | * interconnects (buses) it depends on are enabled. Runtime power | |
27 | * management is used to determine whether the core clock and | |
28 | * interconnects are enabled, and if not in use to be suspended | |
29 | * automatically. | |
ba764c4d | 30 | * |
7aa0e8b8 AE |
31 | * The core clock currently runs at a fixed clock rate when enabled, |
32 | * an all interconnects use a fixed average and peak bandwidth. | |
ba764c4d AE |
33 | */ |
34 | ||
1aac309d AE |
35 | #define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ |
36 | ||
afb08b7e AE |
37 | /** |
38 | * enum ipa_power_flag - IPA power flags | |
39 | * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled | |
b9c532c1 | 40 | * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended |
b8e36e13 AE |
41 | * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit() |
42 | * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume() | |
afb08b7e AE |
43 | * @IPA_POWER_FLAG_COUNT: Number of defined power flags |
44 | */ | |
45 | enum ipa_power_flag { | |
46 | IPA_POWER_FLAG_RESUMED, | |
b9c532c1 | 47 | IPA_POWER_FLAG_SYSTEM, |
b8e36e13 AE |
48 | IPA_POWER_FLAG_STOPPED, |
49 | IPA_POWER_FLAG_STARTED, | |
afb08b7e AE |
50 | IPA_POWER_FLAG_COUNT, /* Last; not a flag */ |
51 | }; | |
52 | ||
ba764c4d | 53 | /** |
7aa0e8b8 | 54 | * struct ipa_power - IPA power management information |
63de79f0 | 55 | * @dev: IPA device pointer |
ba764c4d | 56 | * @core: IPA core clock |
34a08176 | 57 | * @qmp: QMP handle for AOSS communication |
b8e36e13 | 58 | * @spinlock: Protects modem TX queue enable/disable |
afb08b7e | 59 | * @flags: Boolean state flags |
ea151e19 | 60 | * @interconnect_count: Number of elements in interconnect[] |
5b40810b | 61 | * @interconnect: Interconnect array |
ba764c4d | 62 | */ |
7aa0e8b8 | 63 | struct ipa_power { |
63de79f0 | 64 | struct device *dev; |
ba764c4d | 65 | struct clk *core; |
34a08176 | 66 | struct qmp *qmp; |
b8e36e13 | 67 | spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ |
afb08b7e | 68 | DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); |
ea151e19 | 69 | u32 interconnect_count; |
8ee7ec48 | 70 | struct icc_bulk_data interconnect[]; |
ba764c4d AE |
71 | }; |
72 | ||
ba764c4d | 73 | /* Initialize interconnects required for IPA operation */ |
37e0cf33 | 74 | static int ipa_interconnect_init(struct ipa_power *power, |
db6cd514 | 75 | const struct ipa_interconnect_data *data) |
ba764c4d | 76 | { |
c7be12fa | 77 | struct icc_bulk_data *interconnect; |
10d0d397 | 78 | int ret; |
8ee7ec48 | 79 | u32 i; |
ea151e19 | 80 | |
63ac8cce | 81 | /* Initialize our interconnect data array for bulk operations */ |
8ee7ec48 AE |
82 | interconnect = &power->interconnect[0]; |
83 | for (i = 0; i < power->interconnect_count; i++) { | |
63ac8cce AE |
84 | /* interconnect->path is filled in by of_icc_bulk_get() */ |
85 | interconnect->name = data->name; | |
86 | interconnect->avg_bw = data->average_bandwidth; | |
87 | interconnect->peak_bw = data->peak_bandwidth; | |
88 | data++; | |
ea151e19 AE |
89 | interconnect++; |
90 | } | |
ba764c4d | 91 | |
37e0cf33 | 92 | ret = of_icc_bulk_get(power->dev, power->interconnect_count, |
63ac8cce AE |
93 | power->interconnect); |
94 | if (ret) | |
8ee7ec48 | 95 | return ret; |
63ac8cce AE |
96 | |
97 | /* All interconnects are initially disabled */ | |
98 | icc_bulk_disable(power->interconnect_count, power->interconnect); | |
99 | ||
100 | /* Set the bandwidth values to be used when enabled */ | |
101 | ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect); | |
102 | if (ret) | |
8ee7ec48 | 103 | icc_bulk_put(power->interconnect_count, power->interconnect); |
10d0d397 AE |
104 | |
105 | return ret; | |
ba764c4d AE |
106 | } |
107 | ||
108 | /* Inverse of ipa_interconnect_init() */ | |
7aa0e8b8 | 109 | static void ipa_interconnect_exit(struct ipa_power *power) |
ba764c4d | 110 | { |
63ac8cce | 111 | icc_bulk_put(power->interconnect_count, power->interconnect); |
ba764c4d AE |
112 | } |
113 | ||
7aa0e8b8 AE |
114 | /* Enable IPA power, enabling interconnects and the core clock */ |
115 | static int ipa_power_enable(struct ipa *ipa) | |
ba764c4d | 116 | { |
90078e63 | 117 | struct ipa_power *power = ipa->power; |
ba764c4d AE |
118 | int ret; |
119 | ||
90078e63 | 120 | ret = icc_bulk_enable(power->interconnect_count, power->interconnect); |
ba764c4d AE |
121 | if (ret) |
122 | return ret; | |
123 | ||
90078e63 | 124 | ret = clk_prepare_enable(power->core); |
8ee7c40a | 125 | if (ret) { |
37e0cf33 | 126 | dev_err(power->dev, "error %d enabling core clock\n", ret); |
90078e63 AE |
127 | icc_bulk_disable(power->interconnect_count, |
128 | power->interconnect); | |
8ee7c40a | 129 | } |
ba764c4d AE |
130 | |
131 | return ret; | |
132 | } | |
133 | ||
7aa0e8b8 | 134 | /* Inverse of ipa_power_enable() */ |
90078e63 | 135 | static void ipa_power_disable(struct ipa *ipa) |
ba764c4d | 136 | { |
90078e63 AE |
137 | struct ipa_power *power = ipa->power; |
138 | ||
139 | clk_disable_unprepare(power->core); | |
7ebd168c | 140 | |
90078e63 | 141 | icc_bulk_disable(power->interconnect_count, power->interconnect); |
ba764c4d AE |
142 | } |
143 | ||
2abb0c7f AE |
144 | static int ipa_runtime_suspend(struct device *dev) |
145 | { | |
146 | struct ipa *ipa = dev_get_drvdata(dev); | |
147 | ||
148 | /* Endpoints aren't usable until setup is complete */ | |
149 | if (ipa->setup_complete) { | |
7aa0e8b8 | 150 | __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags); |
2abb0c7f AE |
151 | ipa_endpoint_suspend(ipa); |
152 | gsi_suspend(&ipa->gsi); | |
153 | } | |
154 | ||
90078e63 AE |
155 | ipa_power_disable(ipa); |
156 | ||
157 | return 0; | |
2abb0c7f AE |
158 | } |
159 | ||
160 | static int ipa_runtime_resume(struct device *dev) | |
161 | { | |
162 | struct ipa *ipa = dev_get_drvdata(dev); | |
163 | int ret; | |
164 | ||
7aa0e8b8 | 165 | ret = ipa_power_enable(ipa); |
2abb0c7f AE |
166 | if (WARN_ON(ret < 0)) |
167 | return ret; | |
168 | ||
169 | /* Endpoints aren't usable until setup is complete */ | |
170 | if (ipa->setup_complete) { | |
171 | gsi_resume(&ipa->gsi); | |
172 | ipa_endpoint_resume(ipa); | |
173 | } | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
b9c532c1 AE |
178 | static int ipa_suspend(struct device *dev) |
179 | { | |
180 | struct ipa *ipa = dev_get_drvdata(dev); | |
181 | ||
7aa0e8b8 | 182 | __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); |
b9c532c1 AE |
183 | |
184 | return pm_runtime_force_suspend(dev); | |
185 | } | |
186 | ||
187 | static int ipa_resume(struct device *dev) | |
188 | { | |
189 | struct ipa *ipa = dev_get_drvdata(dev); | |
190 | int ret; | |
191 | ||
192 | ret = pm_runtime_force_resume(dev); | |
193 | ||
7aa0e8b8 | 194 | __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); |
b9c532c1 AE |
195 | |
196 | return ret; | |
197 | } | |
198 | ||
78b348f3 | 199 | /* Return the current IPA core clock rate */ |
7aa0e8b8 | 200 | u32 ipa_core_clock_rate(struct ipa *ipa) |
78b348f3 | 201 | { |
7aa0e8b8 | 202 | return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; |
78b348f3 AE |
203 | } |
204 | ||
afe1baa8 AE |
205 | /** |
206 | * ipa_suspend_handler() - Handle the suspend IPA interrupt | |
207 | * @ipa: IPA pointer | |
208 | * @irq_id: IPA interrupt type (unused) | |
209 | * | |
210 | * If an RX endpoint is suspended, and the IPA has a packet destined for | |
211 | * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP | |
212 | * that it should resume the endpoint. If we get one of these interrupts | |
213 | * we just wake up the system. | |
214 | */ | |
215 | static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) | |
216 | { | |
b9c532c1 AE |
217 | /* To handle an IPA interrupt we will have resumed the hardware |
218 | * just to handle the interrupt, so we're done. If we are in a | |
219 | * system suspend, trigger a system resume. | |
afe1baa8 | 220 | */ |
7aa0e8b8 AE |
221 | if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags)) |
222 | if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags)) | |
b9c532c1 | 223 | pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); |
afe1baa8 AE |
224 | |
225 | /* Acknowledge/clear the suspend interrupt on all endpoints */ | |
226 | ipa_interrupt_suspend_clear_all(ipa->interrupt); | |
227 | } | |
228 | ||
b8e36e13 AE |
229 | /* The next few functions coordinate stopping and starting the modem |
230 | * network device transmit queue. | |
231 | * | |
232 | * Transmit can be running concurrent with power resume, and there's a | |
233 | * chance the resume completes before the transmit path stops the queue, | |
234 | * leaving the queue in a stopped state. The next two functions are used | |
235 | * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit() | |
236 | * to conditionally stop the TX queue; and ipa_power_modem_queue_start() | |
237 | * is used by ipa_runtime_resume() to conditionally restart it. | |
238 | * | |
239 | * Two flags and a spinlock are used. If the queue is stopped, the STOPPED | |
240 | * power flag is set. And if the queue is started, the STARTED flag is set. | |
241 | * The queue is only started on resume if the STOPPED flag is set. And the | |
242 | * queue is only started in ipa_start_xmit() if the STARTED flag is *not* | |
243 | * set. As a result, the queue remains operational if the two activites | |
244 | * happen concurrently regardless of the order they complete. The spinlock | |
245 | * ensures the flag and TX queue operations are done atomically. | |
246 | * | |
247 | * The first function stops the modem netdev transmit queue, but only if | |
248 | * the STARTED flag is *not* set. That flag is cleared if it was set. | |
249 | * If the queue is stopped, the STOPPED flag is set. This is called only | |
250 | * from the power ->runtime_resume operation. | |
251 | */ | |
252 | void ipa_power_modem_queue_stop(struct ipa *ipa) | |
253 | { | |
7aa0e8b8 | 254 | struct ipa_power *power = ipa->power; |
b8e36e13 AE |
255 | unsigned long flags; |
256 | ||
7aa0e8b8 | 257 | spin_lock_irqsave(&power->spinlock, flags); |
b8e36e13 | 258 | |
7aa0e8b8 | 259 | if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) { |
b8e36e13 | 260 | netif_stop_queue(ipa->modem_netdev); |
7aa0e8b8 | 261 | __set_bit(IPA_POWER_FLAG_STOPPED, power->flags); |
b8e36e13 AE |
262 | } |
263 | ||
7aa0e8b8 | 264 | spin_unlock_irqrestore(&power->spinlock, flags); |
b8e36e13 AE |
265 | } |
266 | ||
267 | /* This function starts the modem netdev transmit queue, but only if the | |
268 | * STOPPED flag is set. That flag is cleared if it was set. If the queue | |
269 | * was restarted, the STARTED flag is set; this allows ipa_start_xmit() | |
270 | * to skip stopping the queue in the event of a race. | |
271 | */ | |
272 | void ipa_power_modem_queue_wake(struct ipa *ipa) | |
273 | { | |
7aa0e8b8 | 274 | struct ipa_power *power = ipa->power; |
b8e36e13 AE |
275 | unsigned long flags; |
276 | ||
7aa0e8b8 | 277 | spin_lock_irqsave(&power->spinlock, flags); |
b8e36e13 | 278 | |
7aa0e8b8 AE |
279 | if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) { |
280 | __set_bit(IPA_POWER_FLAG_STARTED, power->flags); | |
b8e36e13 AE |
281 | netif_wake_queue(ipa->modem_netdev); |
282 | } | |
283 | ||
7aa0e8b8 | 284 | spin_unlock_irqrestore(&power->spinlock, flags); |
b8e36e13 AE |
285 | } |
286 | ||
287 | /* This function clears the STARTED flag once the TX queue is operating */ | |
288 | void ipa_power_modem_queue_active(struct ipa *ipa) | |
289 | { | |
7aa0e8b8 | 290 | clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); |
b8e36e13 AE |
291 | } |
292 | ||
34a08176 AE |
293 | static int ipa_power_retention_init(struct ipa_power *power) |
294 | { | |
295 | struct qmp *qmp = qmp_get(power->dev); | |
296 | ||
297 | if (IS_ERR(qmp)) { | |
298 | if (PTR_ERR(qmp) == -EPROBE_DEFER) | |
299 | return -EPROBE_DEFER; | |
300 | ||
301 | /* We assume any other error means it's not defined/needed */ | |
302 | qmp = NULL; | |
303 | } | |
304 | power->qmp = qmp; | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
309 | static void ipa_power_retention_exit(struct ipa_power *power) | |
310 | { | |
311 | qmp_put(power->qmp); | |
312 | power->qmp = NULL; | |
313 | } | |
314 | ||
315 | /* Control register retention on power collapse */ | |
316 | void ipa_power_retention(struct ipa *ipa, bool enable) | |
317 | { | |
318 | static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }"; | |
319 | struct ipa_power *power = ipa->power; | |
320 | char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */ | |
321 | int ret; | |
322 | ||
323 | if (!power->qmp) | |
324 | return; /* Not needed on this platform */ | |
325 | ||
326 | (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0'); | |
327 | ||
328 | ret = qmp_send(power->qmp, buf, sizeof(buf)); | |
329 | if (ret) | |
330 | dev_err(power->dev, "error %d sending QMP %sable request\n", | |
331 | ret, enable ? "en" : "dis"); | |
332 | } | |
333 | ||
d430fe4b | 334 | int ipa_power_setup(struct ipa *ipa) |
afe1baa8 | 335 | { |
d430fe4b AE |
336 | int ret; |
337 | ||
afe1baa8 AE |
338 | ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, |
339 | ipa_suspend_handler); | |
d430fe4b AE |
340 | |
341 | ret = device_init_wakeup(&ipa->pdev->dev, true); | |
342 | if (ret) | |
343 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); | |
344 | ||
345 | return ret; | |
afe1baa8 AE |
346 | } |
347 | ||
348 | void ipa_power_teardown(struct ipa *ipa) | |
349 | { | |
d430fe4b | 350 | (void)device_init_wakeup(&ipa->pdev->dev, false); |
afe1baa8 AE |
351 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); |
352 | } | |
353 | ||
7aa0e8b8 AE |
354 | /* Initialize IPA power management */ |
355 | struct ipa_power * | |
356 | ipa_power_init(struct device *dev, const struct ipa_power_data *data) | |
ba764c4d | 357 | { |
7aa0e8b8 | 358 | struct ipa_power *power; |
ba764c4d | 359 | struct clk *clk; |
8ee7ec48 | 360 | size_t size; |
ba764c4d AE |
361 | int ret; |
362 | ||
363 | clk = clk_get(dev, "core"); | |
364 | if (IS_ERR(clk)) { | |
4c7ccfcd AE |
365 | dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); |
366 | ||
ba764c4d AE |
367 | return ERR_CAST(clk); |
368 | } | |
369 | ||
91d02f95 | 370 | ret = clk_set_rate(clk, data->core_clock_rate); |
ba764c4d | 371 | if (ret) { |
91d02f95 AE |
372 | dev_err(dev, "error %d setting core clock rate to %u\n", |
373 | ret, data->core_clock_rate); | |
ba764c4d AE |
374 | goto err_clk_put; |
375 | } | |
376 | ||
cb631a63 AE |
377 | size = struct_size(power, interconnect, data->interconnect_count); |
378 | power = kzalloc(size, GFP_KERNEL); | |
7aa0e8b8 | 379 | if (!power) { |
ba764c4d AE |
380 | ret = -ENOMEM; |
381 | goto err_clk_put; | |
382 | } | |
7aa0e8b8 AE |
383 | power->dev = dev; |
384 | power->core = clk; | |
385 | spin_lock_init(&power->spinlock); | |
386 | power->interconnect_count = data->interconnect_count; | |
ba764c4d | 387 | |
37e0cf33 | 388 | ret = ipa_interconnect_init(power, data->interconnect_data); |
ba764c4d AE |
389 | if (ret) |
390 | goto err_kfree; | |
391 | ||
34a08176 AE |
392 | ret = ipa_power_retention_init(power); |
393 | if (ret) | |
394 | goto err_interconnect_exit; | |
395 | ||
1aac309d AE |
396 | pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); |
397 | pm_runtime_use_autosuspend(dev); | |
63de79f0 AE |
398 | pm_runtime_enable(dev); |
399 | ||
7aa0e8b8 | 400 | return power; |
ba764c4d | 401 | |
34a08176 AE |
402 | err_interconnect_exit: |
403 | ipa_interconnect_exit(power); | |
ba764c4d | 404 | err_kfree: |
7aa0e8b8 | 405 | kfree(power); |
ba764c4d AE |
406 | err_clk_put: |
407 | clk_put(clk); | |
408 | ||
409 | return ERR_PTR(ret); | |
410 | } | |
411 | ||
7aa0e8b8 AE |
412 | /* Inverse of ipa_power_init() */ |
413 | void ipa_power_exit(struct ipa_power *power) | |
ba764c4d | 414 | { |
7aa0e8b8 AE |
415 | struct device *dev = power->dev; |
416 | struct clk *clk = power->core; | |
ba764c4d | 417 | |
1aac309d AE |
418 | pm_runtime_disable(dev); |
419 | pm_runtime_dont_use_autosuspend(dev); | |
34a08176 | 420 | ipa_power_retention_exit(power); |
7aa0e8b8 AE |
421 | ipa_interconnect_exit(power); |
422 | kfree(power); | |
ba764c4d AE |
423 | clk_put(clk); |
424 | } | |
73ff316d | 425 | |
73ff316d | 426 | const struct dev_pm_ops ipa_pm_ops = { |
b9c532c1 AE |
427 | .suspend = ipa_suspend, |
428 | .resume = ipa_resume, | |
63de79f0 AE |
429 | .runtime_suspend = ipa_runtime_suspend, |
430 | .runtime_resume = ipa_runtime_resume, | |
73ff316d | 431 | }; |