Commit | Line | Data |
---|---|---|
bca6b067 BVA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/blk-pm.h> | |
4 | #include <linux/blkdev.h> | |
5 | #include <linux/pm_runtime.h> | |
7cedffec | 6 | #include "blk-mq.h" |
bca6b067 BVA |
7 | |
8 | /** | |
9 | * blk_pm_runtime_init - Block layer runtime PM initialization routine | |
10 | * @q: the queue of the device | |
11 | * @dev: the device the queue belongs to | |
12 | * | |
13 | * Description: | |
14 | * Initialize runtime-PM-related fields for @q and start auto suspend for | |
15 | * @dev. Drivers that want to take advantage of request-based runtime PM | |
16 | * should call this function after @dev has been initialized, and its | |
17 | * request queue @q has been allocated, and runtime PM for it can not happen | |
18 | * yet(either due to disabled/forbidden or its usage_count > 0). In most | |
19 | * cases, driver should call this function before any I/O has taken place. | |
20 | * | |
21 | * This function takes care of setting up using auto suspend for the device, | |
22 | * the autosuspend delay is set to -1 to make runtime suspend impossible | |
23 | * until an updated value is either set by user or by driver. Drivers do | |
24 | * not need to touch other autosuspend settings. | |
25 | * | |
26 | * The block layer runtime PM is request based, so only works for drivers | |
27 | * that use request as their IO unit instead of those directly use bio's. | |
28 | */ | |
29 | void blk_pm_runtime_init(struct request_queue *q, struct device *dev) | |
30 | { | |
bca6b067 BVA |
31 | q->dev = dev; |
32 | q->rpm_status = RPM_ACTIVE; | |
33 | pm_runtime_set_autosuspend_delay(q->dev, -1); | |
34 | pm_runtime_use_autosuspend(q->dev); | |
35 | } | |
36 | EXPORT_SYMBOL(blk_pm_runtime_init); | |
37 | ||
38 | /** | |
39 | * blk_pre_runtime_suspend - Pre runtime suspend check | |
40 | * @q: the queue of the device | |
41 | * | |
42 | * Description: | |
43 | * This function will check if runtime suspend is allowed for the device | |
44 | * by examining if there are any requests pending in the queue. If there | |
45 | * are requests pending, the device can not be runtime suspended; otherwise, | |
46 | * the queue's status will be updated to SUSPENDING and the driver can | |
47 | * proceed to suspend the device. | |
48 | * | |
49 | * For the not allowed case, we mark last busy for the device so that | |
50 | * runtime PM core will try to autosuspend it some time later. | |
51 | * | |
52 | * This function should be called near the start of the device's | |
53 | * runtime_suspend callback. | |
54 | * | |
55 | * Return: | |
56 | * 0 - OK to runtime suspend the device | |
57 | * -EBUSY - Device should not be runtime suspended | |
58 | */ | |
59 | int blk_pre_runtime_suspend(struct request_queue *q) | |
60 | { | |
61 | int ret = 0; | |
62 | ||
63 | if (!q->dev) | |
64 | return ret; | |
65 | ||
7cedffec BVA |
66 | WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); |
67 | ||
fa4d0f19 BVA |
68 | spin_lock_irq(&q->queue_lock); |
69 | q->rpm_status = RPM_SUSPENDING; | |
70 | spin_unlock_irq(&q->queue_lock); | |
71 | ||
7cedffec BVA |
72 | /* |
73 | * Increase the pm_only counter before checking whether any | |
74 | * non-PM blk_queue_enter() calls are in progress to avoid that any | |
75 | * new non-PM blk_queue_enter() calls succeed before the pm_only | |
76 | * counter is decreased again. | |
77 | */ | |
78 | blk_set_pm_only(q); | |
79 | ret = -EBUSY; | |
80 | /* Switch q_usage_counter from per-cpu to atomic mode. */ | |
81 | blk_freeze_queue_start(q); | |
82 | /* | |
83 | * Wait until atomic mode has been reached. Since that | |
84 | * involves calling call_rcu(), it is guaranteed that later | |
85 | * blk_queue_enter() calls see the pm-only state. See also | |
86 | * http://lwn.net/Articles/573497/. | |
87 | */ | |
88 | percpu_ref_switch_to_atomic_sync(&q->q_usage_counter); | |
89 | if (percpu_ref_is_zero(&q->q_usage_counter)) | |
90 | ret = 0; | |
91 | /* Switch q_usage_counter back to per-cpu mode. */ | |
92 | blk_mq_unfreeze_queue(q); | |
93 | ||
fa4d0f19 BVA |
94 | if (ret < 0) { |
95 | spin_lock_irq(&q->queue_lock); | |
96 | q->rpm_status = RPM_ACTIVE; | |
bca6b067 | 97 | pm_runtime_mark_last_busy(q->dev); |
fa4d0f19 | 98 | spin_unlock_irq(&q->queue_lock); |
7cedffec | 99 | |
7cedffec | 100 | blk_clear_pm_only(q); |
fa4d0f19 | 101 | } |
7cedffec | 102 | |
bca6b067 BVA |
103 | return ret; |
104 | } | |
105 | EXPORT_SYMBOL(blk_pre_runtime_suspend); | |
106 | ||
107 | /** | |
108 | * blk_post_runtime_suspend - Post runtime suspend processing | |
109 | * @q: the queue of the device | |
110 | * @err: return value of the device's runtime_suspend function | |
111 | * | |
112 | * Description: | |
113 | * Update the queue's runtime status according to the return value of the | |
114 | * device's runtime suspend function and mark last busy for the device so | |
115 | * that PM core will try to auto suspend the device at a later time. | |
116 | * | |
117 | * This function should be called near the end of the device's | |
118 | * runtime_suspend callback. | |
119 | */ | |
120 | void blk_post_runtime_suspend(struct request_queue *q, int err) | |
121 | { | |
122 | if (!q->dev) | |
123 | return; | |
124 | ||
0d945c1f | 125 | spin_lock_irq(&q->queue_lock); |
bca6b067 BVA |
126 | if (!err) { |
127 | q->rpm_status = RPM_SUSPENDED; | |
128 | } else { | |
129 | q->rpm_status = RPM_ACTIVE; | |
130 | pm_runtime_mark_last_busy(q->dev); | |
131 | } | |
0d945c1f | 132 | spin_unlock_irq(&q->queue_lock); |
7cedffec BVA |
133 | |
134 | if (err) | |
135 | blk_clear_pm_only(q); | |
bca6b067 BVA |
136 | } |
137 | EXPORT_SYMBOL(blk_post_runtime_suspend); | |
138 | ||
139 | /** | |
140 | * blk_pre_runtime_resume - Pre runtime resume processing | |
141 | * @q: the queue of the device | |
142 | * | |
143 | * Description: | |
144 | * Update the queue's runtime status to RESUMING in preparation for the | |
145 | * runtime resume of the device. | |
146 | * | |
147 | * This function should be called near the start of the device's | |
148 | * runtime_resume callback. | |
149 | */ | |
150 | void blk_pre_runtime_resume(struct request_queue *q) | |
151 | { | |
152 | if (!q->dev) | |
153 | return; | |
154 | ||
0d945c1f | 155 | spin_lock_irq(&q->queue_lock); |
bca6b067 | 156 | q->rpm_status = RPM_RESUMING; |
0d945c1f | 157 | spin_unlock_irq(&q->queue_lock); |
bca6b067 BVA |
158 | } |
159 | EXPORT_SYMBOL(blk_pre_runtime_resume); | |
160 | ||
161 | /** | |
162 | * blk_post_runtime_resume - Post runtime resume processing | |
163 | * @q: the queue of the device | |
bca6b067 BVA |
164 | * |
165 | * Description: | |
6e1fcab0 AS |
166 | * For historical reasons, this routine merely calls blk_set_runtime_active() |
167 | * to do the real work of restarting the queue. It does this regardless of | |
168 | * whether the device's runtime-resume succeeded; even if it failed the | |
169 | * driver or error handler will need to communicate with the device. | |
bca6b067 BVA |
170 | * |
171 | * This function should be called near the end of the device's | |
172 | * runtime_resume callback. | |
173 | */ | |
6e1fcab0 | 174 | void blk_post_runtime_resume(struct request_queue *q) |
bca6b067 | 175 | { |
6e1fcab0 | 176 | blk_set_runtime_active(q); |
bca6b067 BVA |
177 | } |
178 | EXPORT_SYMBOL(blk_post_runtime_resume); | |
179 | ||
180 | /** | |
181 | * blk_set_runtime_active - Force runtime status of the queue to be active | |
182 | * @q: the queue of the device | |
183 | * | |
184 | * If the device is left runtime suspended during system suspend the resume | |
185 | * hook typically resumes the device and corrects runtime status | |
186 | * accordingly. However, that does not affect the queue runtime PM status | |
187 | * which is still "suspended". This prevents processing requests from the | |
188 | * queue. | |
189 | * | |
190 | * This function can be used in driver's resume hook to correct queue | |
191 | * runtime PM status and re-enable peeking requests from the queue. It | |
192 | * should be called before first request is added to the queue. | |
8f38f8e0 | 193 | * |
6e1fcab0 | 194 | * This function is also called by blk_post_runtime_resume() for |
8f38f8e0 | 195 | * runtime resumes. It does everything necessary to restart the queue. |
bca6b067 BVA |
196 | */ |
197 | void blk_set_runtime_active(struct request_queue *q) | |
198 | { | |
8f38f8e0 AS |
199 | int old_status; |
200 | ||
201 | if (!q->dev) | |
202 | return; | |
203 | ||
204 | spin_lock_irq(&q->queue_lock); | |
205 | old_status = q->rpm_status; | |
206 | q->rpm_status = RPM_ACTIVE; | |
207 | pm_runtime_mark_last_busy(q->dev); | |
208 | pm_request_autosuspend(q->dev); | |
209 | spin_unlock_irq(&q->queue_lock); | |
210 | ||
211 | if (old_status != RPM_ACTIVE) | |
212 | blk_clear_pm_only(q); | |
bca6b067 BVA |
213 | } |
214 | EXPORT_SYMBOL(blk_set_runtime_active); |