PM / QOS: Pass request type to dev_pm_qos_{add|remove}_notifier()
[linux-2.6-block.git] / drivers / base / power / qos.c
CommitLineData
5de363b6 1// SPDX-License-Identifier: GPL-2.0
91ff4cb8
JP
2/*
3 * Devices PM QoS constraints management
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
91ff4cb8
JP
7 * This module exposes the interface to kernel space for specifying
8 * per-device PM QoS dependencies. It provides infrastructure for registration
9 * of:
10 *
11 * Dependents on a QoS value : register requests
12 * Watchers of QoS value : get notified when target QoS value changes
13 *
14 * This QoS design is best effort based. Dependents register their QoS needs.
15 * Watchers register to keep track of the current QoS needs of the system.
d08d1b27
VK
16 * Watchers can register a per-device notification callback using the
17 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18 * per-device constraint data struct.
91ff4cb8
JP
19 *
20 * Note about the per-device constraint data struct allocation:
07a6c71b 21 * . The per-device constraints data struct ptr is stored into the device
91ff4cb8
JP
22 * dev_pm_info.
23 * . To minimize the data usage by the per-device constraints, the data struct
24 * is only allocated at the first call to dev_pm_qos_add_request.
25 * . The data is later free'd when the device is removed from the system.
91ff4cb8
JP
26 * . A global mutex protects the constraints users from the data being
27 * allocated and free'd.
28 */
29
30#include <linux/pm_qos.h>
31#include <linux/spinlock.h>
32#include <linux/slab.h>
33#include <linux/device.h>
34#include <linux/mutex.h>
1b6bc32f 35#include <linux/export.h>
e39473d0 36#include <linux/pm_runtime.h>
37530f2b 37#include <linux/err.h>
96d9d0b5 38#include <trace/events/power.h>
91ff4cb8 39
85dc0b8a 40#include "power.h"
91ff4cb8
JP
41
42static DEFINE_MUTEX(dev_pm_qos_mtx);
0f703069 43static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
1a9a9152 44
ae0fb4b7
RW
45/**
46 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47 * @dev: Device to check the PM QoS flags for.
48 * @mask: Flags to check against.
49 *
50 * This routine must be called with dev->power.lock held.
51 */
52enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53{
54 struct dev_pm_qos *qos = dev->power.qos;
55 struct pm_qos_flags *pqf;
56 s32 val;
57
f90b8ad8
KK
58 lockdep_assert_held(&dev->power.lock);
59
37530f2b 60 if (IS_ERR_OR_NULL(qos))
ae0fb4b7
RW
61 return PM_QOS_FLAGS_UNDEFINED;
62
63 pqf = &qos->flags;
64 if (list_empty(&pqf->list))
65 return PM_QOS_FLAGS_UNDEFINED;
66
67 val = pqf->effective_flags & mask;
68 if (val)
69 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70
71 return PM_QOS_FLAGS_NONE;
72}
73
74/**
75 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76 * @dev: Device to check the PM QoS flags for.
77 * @mask: Flags to check against.
78 */
79enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80{
81 unsigned long irqflags;
82 enum pm_qos_flags_status ret;
83
84 spin_lock_irqsave(&dev->power.lock, irqflags);
85 ret = __dev_pm_qos_flags(dev, mask);
86 spin_unlock_irqrestore(&dev->power.lock, irqflags);
87
88 return ret;
89}
6802771b 90EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
ae0fb4b7 91
1a9a9152 92/**
00dc9ad1
RW
93 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
94 * @dev: Device to get the PM QoS constraint value for.
95 *
96 * This routine must be called with dev->power.lock held.
97 */
98s32 __dev_pm_qos_read_value(struct device *dev)
99{
f90b8ad8
KK
100 lockdep_assert_held(&dev->power.lock);
101
6dbf5cea 102 return dev_pm_qos_raw_read_value(dev);
00dc9ad1
RW
103}
104
105/**
106 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
1a9a9152
RW
107 * @dev: Device to get the PM QoS constraint value for.
108 */
109s32 dev_pm_qos_read_value(struct device *dev)
110{
1a9a9152 111 unsigned long flags;
00dc9ad1 112 s32 ret;
1a9a9152
RW
113
114 spin_lock_irqsave(&dev->power.lock, flags);
00dc9ad1 115 ret = __dev_pm_qos_read_value(dev);
1a9a9152
RW
116 spin_unlock_irqrestore(&dev->power.lock, flags);
117
118 return ret;
119}
120
ae0fb4b7
RW
121/**
122 * apply_constraint - Add/modify/remove device PM QoS request.
123 * @req: Constraint request to apply
124 * @action: Action to perform (add/update/remove).
125 * @value: Value to assign to the QoS request.
b66213cd
JP
126 *
127 * Internal function to update the constraints list using the PM QoS core
d08d1b27 128 * code and if needed call the per-device callbacks.
b66213cd
JP
129 */
130static int apply_constraint(struct dev_pm_qos_request *req,
ae0fb4b7 131 enum pm_qos_req_action action, s32 value)
b66213cd 132{
ae0fb4b7
RW
133 struct dev_pm_qos *qos = req->dev->power.qos;
134 int ret;
b66213cd 135
ae0fb4b7 136 switch(req->type) {
b02f6695 137 case DEV_PM_QOS_RESUME_LATENCY:
0759e80b
RW
138 if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
139 value = 0;
140
b02f6695
RW
141 ret = pm_qos_update_target(&qos->resume_latency,
142 &req->data.pnode, action, value);
ae0fb4b7 143 break;
2d984ad1
RW
144 case DEV_PM_QOS_LATENCY_TOLERANCE:
145 ret = pm_qos_update_target(&qos->latency_tolerance,
146 &req->data.pnode, action, value);
147 if (ret) {
148 value = pm_qos_read_value(&qos->latency_tolerance);
149 req->dev->power.set_latency_tolerance(req->dev, value);
150 }
151 break;
ae0fb4b7
RW
152 case DEV_PM_QOS_FLAGS:
153 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
154 action, value);
155 break;
156 default:
157 ret = -EINVAL;
b66213cd
JP
158 }
159
160 return ret;
161}
91ff4cb8
JP
162
163/*
164 * dev_pm_qos_constraints_allocate
165 * @dev: device to allocate data for
166 *
167 * Called at the first call to add_request, for constraint data allocation
168 * Must be called with the dev_pm_qos_mtx mutex held
169 */
170static int dev_pm_qos_constraints_allocate(struct device *dev)
171{
5f986c59 172 struct dev_pm_qos *qos;
91ff4cb8
JP
173 struct pm_qos_constraints *c;
174 struct blocking_notifier_head *n;
175
5f986c59
RW
176 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
177 if (!qos)
91ff4cb8
JP
178 return -ENOMEM;
179
180 n = kzalloc(sizeof(*n), GFP_KERNEL);
181 if (!n) {
5f986c59 182 kfree(qos);
91ff4cb8
JP
183 return -ENOMEM;
184 }
185 BLOCKING_INIT_NOTIFIER_HEAD(n);
186
b02f6695 187 c = &qos->resume_latency;
1a9a9152 188 plist_head_init(&c->list);
b02f6695
RW
189 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
190 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
0759e80b 191 c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
1a9a9152
RW
192 c->type = PM_QOS_MIN;
193 c->notifiers = n;
194
2d984ad1
RW
195 c = &qos->latency_tolerance;
196 plist_head_init(&c->list);
197 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
198 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
199 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
200 c->type = PM_QOS_MIN;
201
ae0fb4b7
RW
202 INIT_LIST_HEAD(&qos->flags.list);
203
1a9a9152 204 spin_lock_irq(&dev->power.lock);
5f986c59 205 dev->power.qos = qos;
1a9a9152 206 spin_unlock_irq(&dev->power.lock);
91ff4cb8
JP
207
208 return 0;
209}
210
37530f2b
RW
211static void __dev_pm_qos_hide_latency_limit(struct device *dev);
212static void __dev_pm_qos_hide_flags(struct device *dev);
91ff4cb8
JP
213
214/**
215 * dev_pm_qos_constraints_destroy
216 * @dev: target device
217 *
1a9a9152 218 * Called from the device PM subsystem on device removal under device_pm_lock().
91ff4cb8
JP
219 */
220void dev_pm_qos_constraints_destroy(struct device *dev)
221{
5f986c59 222 struct dev_pm_qos *qos;
91ff4cb8 223 struct dev_pm_qos_request *req, *tmp;
1a9a9152 224 struct pm_qos_constraints *c;
35546bd4 225 struct pm_qos_flags *f;
91ff4cb8 226
0f703069 227 mutex_lock(&dev_pm_qos_sysfs_mtx);
37530f2b 228
85dc0b8a 229 /*
35546bd4
RW
230 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point.
85dc0b8a 232 */
b02f6695 233 pm_qos_sysfs_remove_resume_latency(dev);
0f703069
RW
234 pm_qos_sysfs_remove_flags(dev);
235
236 mutex_lock(&dev_pm_qos_mtx);
237
37530f2b
RW
238 __dev_pm_qos_hide_latency_limit(dev);
239 __dev_pm_qos_hide_flags(dev);
91ff4cb8 240
5f986c59
RW
241 qos = dev->power.qos;
242 if (!qos)
1a9a9152 243 goto out;
91ff4cb8 244
35546bd4 245 /* Flush the constraints lists for the device. */
b02f6695 246 c = &qos->resume_latency;
021c870b 247 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
1a9a9152
RW
248 /*
249 * Update constraints list and call the notification
250 * callbacks if needed
251 */
252 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
253 memset(req, 0, sizeof(*req));
91ff4cb8 254 }
2d984ad1
RW
255 c = &qos->latency_tolerance;
256 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
257 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
258 memset(req, 0, sizeof(*req));
259 }
35546bd4
RW
260 f = &qos->flags;
261 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
262 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
263 memset(req, 0, sizeof(*req));
91ff4cb8 264 }
91ff4cb8 265
1a9a9152 266 spin_lock_irq(&dev->power.lock);
37530f2b 267 dev->power.qos = ERR_PTR(-ENODEV);
1a9a9152
RW
268 spin_unlock_irq(&dev->power.lock);
269
e84b4a84 270 kfree(qos->resume_latency.notifiers);
9eaee2cd 271 kfree(qos);
1a9a9152
RW
272
273 out:
91ff4cb8 274 mutex_unlock(&dev_pm_qos_mtx);
0f703069
RW
275
276 mutex_unlock(&dev_pm_qos_sysfs_mtx);
91ff4cb8
JP
277}
278
41ba8bd0
JS
279static bool dev_pm_qos_invalid_req_type(struct device *dev,
280 enum dev_pm_qos_req_type type)
2d984ad1 281{
41ba8bd0
JS
282 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
283 !dev->power.set_latency_tolerance;
2d984ad1
RW
284}
285
286static int __dev_pm_qos_add_request(struct device *dev,
287 struct dev_pm_qos_request *req,
288 enum dev_pm_qos_req_type type, s32 value)
289{
290 int ret = 0;
291
41ba8bd0 292 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
2d984ad1
RW
293 return -EINVAL;
294
295 if (WARN(dev_pm_qos_request_active(req),
296 "%s() called for already added request\n", __func__))
297 return -EINVAL;
298
299 if (IS_ERR(dev->power.qos))
300 ret = -ENODEV;
301 else if (!dev->power.qos)
302 ret = dev_pm_qos_constraints_allocate(dev);
303
304 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
305 if (!ret) {
306 req->dev = dev;
307 req->type = type;
308 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
309 }
310 return ret;
311}
312
91ff4cb8
JP
313/**
314 * dev_pm_qos_add_request - inserts new qos request into the list
315 * @dev: target device for the constraint
316 * @req: pointer to a preallocated handle
ae0fb4b7 317 * @type: type of the request
91ff4cb8
JP
318 * @value: defines the qos request
319 *
320 * This function inserts a new entry in the device constraints list of
321 * requested qos performance characteristics. It recomputes the aggregate
322 * QoS expectations of parameters and initializes the dev_pm_qos_request
323 * handle. Caller needs to save this handle for later use in updates and
324 * removal.
325 *
326 * Returns 1 if the aggregated constraint value has changed,
327 * 0 if the aggregated constraint value has not changed,
1a9a9152
RW
328 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
329 * to allocate for data structures, -ENODEV if the device has just been removed
330 * from the system.
436ede89
RW
331 *
332 * Callers should ensure that the target device is not RPM_SUSPENDED before
333 * using this function for requests of type DEV_PM_QOS_FLAGS.
91ff4cb8
JP
334 */
335int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
ae0fb4b7 336 enum dev_pm_qos_req_type type, s32 value)
91ff4cb8 337{
2d984ad1 338 int ret;
91ff4cb8 339
1a9a9152 340 mutex_lock(&dev_pm_qos_mtx);
2d984ad1 341 ret = __dev_pm_qos_add_request(dev, req, type, value);
91ff4cb8
JP
342 mutex_unlock(&dev_pm_qos_mtx);
343 return ret;
344}
345EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
346
e39473d0
RW
347/**
348 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
349 * @req : PM QoS request to modify.
350 * @new_value: New value to request.
351 */
352static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
353 s32 new_value)
354{
355 s32 curr_value;
356 int ret = 0;
357
b81ea1b5
RW
358 if (!req) /*guard against callers passing in null */
359 return -EINVAL;
360
361 if (WARN(!dev_pm_qos_request_active(req),
362 "%s() called for unknown object\n", __func__))
363 return -EINVAL;
364
37530f2b 365 if (IS_ERR_OR_NULL(req->dev->power.qos))
e39473d0
RW
366 return -ENODEV;
367
368 switch(req->type) {
b02f6695 369 case DEV_PM_QOS_RESUME_LATENCY:
2d984ad1 370 case DEV_PM_QOS_LATENCY_TOLERANCE:
e39473d0
RW
371 curr_value = req->data.pnode.prio;
372 break;
373 case DEV_PM_QOS_FLAGS:
374 curr_value = req->data.flr.flags;
375 break;
376 default:
377 return -EINVAL;
378 }
379
96d9d0b5
S
380 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
381 new_value);
e39473d0
RW
382 if (curr_value != new_value)
383 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
384
385 return ret;
386}
387
91ff4cb8
JP
388/**
389 * dev_pm_qos_update_request - modifies an existing qos request
390 * @req : handle to list element holding a dev_pm_qos request to use
391 * @new_value: defines the qos request
392 *
393 * Updates an existing dev PM qos request along with updating the
394 * target value.
395 *
396 * Attempts are made to make this code callable on hot code paths.
397 *
398 * Returns 1 if the aggregated constraint value has changed,
399 * 0 if the aggregated constraint value has not changed,
400 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
401 * removed from the system
436ede89
RW
402 *
403 * Callers should ensure that the target device is not RPM_SUSPENDED before
404 * using this function for requests of type DEV_PM_QOS_FLAGS.
91ff4cb8 405 */
e39473d0 406int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
91ff4cb8 407{
e39473d0 408 int ret;
91ff4cb8 409
b81ea1b5
RW
410 mutex_lock(&dev_pm_qos_mtx);
411 ret = __dev_pm_qos_update_request(req, new_value);
412 mutex_unlock(&dev_pm_qos_mtx);
413 return ret;
414}
415EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
416
417static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
418{
37530f2b 419 int ret;
b81ea1b5 420
91ff4cb8
JP
421 if (!req) /*guard against callers passing in null */
422 return -EINVAL;
423
af4c720e
GL
424 if (WARN(!dev_pm_qos_request_active(req),
425 "%s() called for unknown object\n", __func__))
91ff4cb8 426 return -EINVAL;
91ff4cb8 427
37530f2b
RW
428 if (IS_ERR_OR_NULL(req->dev->power.qos))
429 return -ENODEV;
430
96d9d0b5
S
431 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
432 PM_QOS_DEFAULT_VALUE);
37530f2b
RW
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
434 memset(req, 0, sizeof(*req));
91ff4cb8
JP
435 return ret;
436}
91ff4cb8
JP
437
438/**
439 * dev_pm_qos_remove_request - modifies an existing qos request
440 * @req: handle to request list element
441 *
442 * Will remove pm qos request from the list of constraints and
443 * recompute the current target value. Call this on slow code paths.
444 *
445 * Returns 1 if the aggregated constraint value has changed,
446 * 0 if the aggregated constraint value has not changed,
447 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
448 * removed from the system
436ede89
RW
449 *
450 * Callers should ensure that the target device is not RPM_SUSPENDED before
451 * using this function for requests of type DEV_PM_QOS_FLAGS.
91ff4cb8
JP
452 */
453int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
454{
b81ea1b5 455 int ret;
91ff4cb8
JP
456
457 mutex_lock(&dev_pm_qos_mtx);
b81ea1b5 458 ret = __dev_pm_qos_remove_request(req);
91ff4cb8
JP
459 mutex_unlock(&dev_pm_qos_mtx);
460 return ret;
461}
462EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
463
464/**
465 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
466 * of per-device PM QoS constraints
467 *
468 * @dev: target device for the constraint
469 * @notifier: notifier block managed by caller.
0b07ee94 470 * @type: request type.
91ff4cb8
JP
471 *
472 * Will register the notifier into a notification chain that gets called
473 * upon changes to the target value for the device.
23e0fc5a
RW
474 *
475 * If the device's constraints object doesn't exist when this routine is called,
476 * it will be created (or error code will be returned if that fails).
91ff4cb8 477 */
0b07ee94
VK
478int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
479 enum dev_pm_qos_req_type type)
91ff4cb8 480{
23e0fc5a 481 int ret = 0;
91ff4cb8 482
0b07ee94
VK
483 if (WARN_ON(type != DEV_PM_QOS_RESUME_LATENCY))
484 return -EINVAL;
485
91ff4cb8
JP
486 mutex_lock(&dev_pm_qos_mtx);
487
37530f2b
RW
488 if (IS_ERR(dev->power.qos))
489 ret = -ENODEV;
490 else if (!dev->power.qos)
491 ret = dev_pm_qos_constraints_allocate(dev);
23e0fc5a
RW
492
493 if (!ret)
b02f6695
RW
494 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
495 notifier);
91ff4cb8 496
91ff4cb8 497 mutex_unlock(&dev_pm_qos_mtx);
23e0fc5a 498 return ret;
91ff4cb8
JP
499}
500EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
501
502/**
503 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
504 * of per-device PM QoS constraints
505 *
506 * @dev: target device for the constraint
507 * @notifier: notifier block to be removed.
0b07ee94 508 * @type: request type.
91ff4cb8
JP
509 *
510 * Will remove the notifier from the notification chain that gets called
511 * upon changes to the target value.
512 */
513int dev_pm_qos_remove_notifier(struct device *dev,
0b07ee94
VK
514 struct notifier_block *notifier,
515 enum dev_pm_qos_req_type type)
91ff4cb8
JP
516{
517 int retval = 0;
518
0b07ee94
VK
519 if (WARN_ON(type != DEV_PM_QOS_RESUME_LATENCY))
520 return -EINVAL;
521
91ff4cb8
JP
522 mutex_lock(&dev_pm_qos_mtx);
523
1a9a9152 524 /* Silently return if the constraints object is not present. */
37530f2b 525 if (!IS_ERR_OR_NULL(dev->power.qos))
b02f6695
RW
526 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
527 notifier);
91ff4cb8 528
91ff4cb8
JP
529 mutex_unlock(&dev_pm_qos_mtx);
530 return retval;
531}
532EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
b66213cd 533
40a5f8be
RW
534/**
535 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
536 * @dev: Device whose ancestor to add the request for.
537 * @req: Pointer to the preallocated handle.
71d821fd 538 * @type: Type of the request.
40a5f8be
RW
539 * @value: Constraint latency value.
540 */
541int dev_pm_qos_add_ancestor_request(struct device *dev,
71d821fd
RW
542 struct dev_pm_qos_request *req,
543 enum dev_pm_qos_req_type type, s32 value)
40a5f8be
RW
544{
545 struct device *ancestor = dev->parent;
4ce47802 546 int ret = -ENODEV;
40a5f8be 547
71d821fd
RW
548 switch (type) {
549 case DEV_PM_QOS_RESUME_LATENCY:
550 while (ancestor && !ancestor->power.ignore_children)
551 ancestor = ancestor->parent;
40a5f8be 552
71d821fd
RW
553 break;
554 case DEV_PM_QOS_LATENCY_TOLERANCE:
555 while (ancestor && !ancestor->power.set_latency_tolerance)
556 ancestor = ancestor->parent;
557
558 break;
559 default:
560 ancestor = NULL;
561 }
40a5f8be 562 if (ancestor)
71d821fd 563 ret = dev_pm_qos_add_request(ancestor, req, type, value);
40a5f8be 564
4ce47802 565 if (ret < 0)
40a5f8be
RW
566 req->dev = NULL;
567
4ce47802 568 return ret;
40a5f8be
RW
569}
570EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
85dc0b8a 571
e39473d0
RW
572static void __dev_pm_qos_drop_user_request(struct device *dev,
573 enum dev_pm_qos_req_type type)
85dc0b8a 574{
b81ea1b5
RW
575 struct dev_pm_qos_request *req = NULL;
576
e39473d0 577 switch(type) {
b02f6695
RW
578 case DEV_PM_QOS_RESUME_LATENCY:
579 req = dev->power.qos->resume_latency_req;
580 dev->power.qos->resume_latency_req = NULL;
e39473d0 581 break;
2d984ad1
RW
582 case DEV_PM_QOS_LATENCY_TOLERANCE:
583 req = dev->power.qos->latency_tolerance_req;
584 dev->power.qos->latency_tolerance_req = NULL;
585 break;
e39473d0 586 case DEV_PM_QOS_FLAGS:
b81ea1b5 587 req = dev->power.qos->flags_req;
e39473d0
RW
588 dev->power.qos->flags_req = NULL;
589 break;
590 }
b81ea1b5
RW
591 __dev_pm_qos_remove_request(req);
592 kfree(req);
85dc0b8a
RW
593}
594
0f703069
RW
595static void dev_pm_qos_drop_user_request(struct device *dev,
596 enum dev_pm_qos_req_type type)
597{
598 mutex_lock(&dev_pm_qos_mtx);
599 __dev_pm_qos_drop_user_request(dev, type);
600 mutex_unlock(&dev_pm_qos_mtx);
601}
602
85dc0b8a
RW
603/**
604 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
605 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
606 * @value: Initial value of the latency limit.
607 */
608int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
609{
610 struct dev_pm_qos_request *req;
611 int ret;
612
613 if (!device_is_registered(dev) || value < 0)
614 return -EINVAL;
615
85dc0b8a
RW
616 req = kzalloc(sizeof(*req), GFP_KERNEL);
617 if (!req)
618 return -ENOMEM;
619
b02f6695 620 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
b81ea1b5
RW
621 if (ret < 0) {
622 kfree(req);
85dc0b8a 623 return ret;
b81ea1b5
RW
624 }
625
0f703069
RW
626 mutex_lock(&dev_pm_qos_sysfs_mtx);
627
b81ea1b5
RW
628 mutex_lock(&dev_pm_qos_mtx);
629
37530f2b 630 if (IS_ERR_OR_NULL(dev->power.qos))
b81ea1b5 631 ret = -ENODEV;
b02f6695 632 else if (dev->power.qos->resume_latency_req)
b81ea1b5
RW
633 ret = -EEXIST;
634
635 if (ret < 0) {
636 __dev_pm_qos_remove_request(req);
637 kfree(req);
0f703069 638 mutex_unlock(&dev_pm_qos_mtx);
b81ea1b5
RW
639 goto out;
640 }
b02f6695 641 dev->power.qos->resume_latency_req = req;
0f703069
RW
642
643 mutex_unlock(&dev_pm_qos_mtx);
644
b02f6695 645 ret = pm_qos_sysfs_add_resume_latency(dev);
85dc0b8a 646 if (ret)
b02f6695 647 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
85dc0b8a 648
b81ea1b5 649 out:
0f703069 650 mutex_unlock(&dev_pm_qos_sysfs_mtx);
85dc0b8a
RW
651 return ret;
652}
653EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
654
37530f2b
RW
655static void __dev_pm_qos_hide_latency_limit(struct device *dev)
656{
b02f6695
RW
657 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
658 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
37530f2b
RW
659}
660
85dc0b8a
RW
661/**
662 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
663 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
664 */
665void dev_pm_qos_hide_latency_limit(struct device *dev)
666{
0f703069
RW
667 mutex_lock(&dev_pm_qos_sysfs_mtx);
668
b02f6695 669 pm_qos_sysfs_remove_resume_latency(dev);
0f703069 670
b81ea1b5 671 mutex_lock(&dev_pm_qos_mtx);
37530f2b 672 __dev_pm_qos_hide_latency_limit(dev);
b81ea1b5 673 mutex_unlock(&dev_pm_qos_mtx);
0f703069
RW
674
675 mutex_unlock(&dev_pm_qos_sysfs_mtx);
85dc0b8a
RW
676}
677EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
e39473d0
RW
678
679/**
680 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
681 * @dev: Device whose PM QoS flags are to be exposed to user space.
682 * @val: Initial values of the flags.
683 */
684int dev_pm_qos_expose_flags(struct device *dev, s32 val)
685{
686 struct dev_pm_qos_request *req;
687 int ret;
688
689 if (!device_is_registered(dev))
690 return -EINVAL;
691
e39473d0
RW
692 req = kzalloc(sizeof(*req), GFP_KERNEL);
693 if (!req)
694 return -ENOMEM;
695
696 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
b81ea1b5
RW
697 if (ret < 0) {
698 kfree(req);
699 return ret;
700 }
701
702 pm_runtime_get_sync(dev);
0f703069
RW
703 mutex_lock(&dev_pm_qos_sysfs_mtx);
704
b81ea1b5
RW
705 mutex_lock(&dev_pm_qos_mtx);
706
37530f2b 707 if (IS_ERR_OR_NULL(dev->power.qos))
b81ea1b5
RW
708 ret = -ENODEV;
709 else if (dev->power.qos->flags_req)
710 ret = -EEXIST;
711
712 if (ret < 0) {
713 __dev_pm_qos_remove_request(req);
714 kfree(req);
0f703069 715 mutex_unlock(&dev_pm_qos_mtx);
b81ea1b5
RW
716 goto out;
717 }
e39473d0 718 dev->power.qos->flags_req = req;
0f703069
RW
719
720 mutex_unlock(&dev_pm_qos_mtx);
721
e39473d0
RW
722 ret = pm_qos_sysfs_add_flags(dev);
723 if (ret)
0f703069 724 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
e39473d0 725
b81ea1b5 726 out:
0f703069 727 mutex_unlock(&dev_pm_qos_sysfs_mtx);
7e4d6844 728 pm_runtime_put(dev);
e39473d0
RW
729 return ret;
730}
731EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
732
37530f2b
RW
733static void __dev_pm_qos_hide_flags(struct device *dev)
734{
0f703069 735 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
37530f2b 736 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
37530f2b
RW
737}
738
e39473d0
RW
739/**
740 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
741 * @dev: Device whose PM QoS flags are to be hidden from user space.
742 */
743void dev_pm_qos_hide_flags(struct device *dev)
744{
b81ea1b5 745 pm_runtime_get_sync(dev);
0f703069
RW
746 mutex_lock(&dev_pm_qos_sysfs_mtx);
747
748 pm_qos_sysfs_remove_flags(dev);
749
b81ea1b5 750 mutex_lock(&dev_pm_qos_mtx);
37530f2b 751 __dev_pm_qos_hide_flags(dev);
b81ea1b5 752 mutex_unlock(&dev_pm_qos_mtx);
0f703069
RW
753
754 mutex_unlock(&dev_pm_qos_sysfs_mtx);
b81ea1b5 755 pm_runtime_put(dev);
e39473d0
RW
756}
757EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
758
759/**
760 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
761 * @dev: Device to update the PM QoS flags request for.
762 * @mask: Flags to set/clear.
763 * @set: Whether to set or clear the flags (true means set).
764 */
765int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
766{
767 s32 value;
768 int ret;
769
e39473d0
RW
770 pm_runtime_get_sync(dev);
771 mutex_lock(&dev_pm_qos_mtx);
772
37530f2b 773 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
b81ea1b5
RW
774 ret = -EINVAL;
775 goto out;
776 }
777
e39473d0
RW
778 value = dev_pm_qos_requested_flags(dev);
779 if (set)
780 value |= mask;
781 else
782 value &= ~mask;
783
784 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
785
b81ea1b5 786 out:
e39473d0
RW
787 mutex_unlock(&dev_pm_qos_mtx);
788 pm_runtime_put(dev);
e39473d0
RW
789 return ret;
790}
2d984ad1
RW
791
792/**
793 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
794 * @dev: Device to obtain the user space latency tolerance for.
795 */
796s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
797{
798 s32 ret;
799
800 mutex_lock(&dev_pm_qos_mtx);
801 ret = IS_ERR_OR_NULL(dev->power.qos)
802 || !dev->power.qos->latency_tolerance_req ?
803 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
804 dev->power.qos->latency_tolerance_req->data.pnode.prio;
805 mutex_unlock(&dev_pm_qos_mtx);
806 return ret;
807}
808
809/**
810 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
811 * @dev: Device to update the user space latency tolerance for.
812 * @val: New user space latency tolerance for @dev (negative values disable).
813 */
814int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
815{
816 int ret;
817
818 mutex_lock(&dev_pm_qos_mtx);
819
820 if (IS_ERR_OR_NULL(dev->power.qos)
821 || !dev->power.qos->latency_tolerance_req) {
822 struct dev_pm_qos_request *req;
823
824 if (val < 0) {
80a6f7c7
AL
825 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
826 ret = 0;
827 else
828 ret = -EINVAL;
2d984ad1
RW
829 goto out;
830 }
831 req = kzalloc(sizeof(*req), GFP_KERNEL);
832 if (!req) {
833 ret = -ENOMEM;
834 goto out;
835 }
836 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
837 if (ret < 0) {
838 kfree(req);
839 goto out;
840 }
841 dev->power.qos->latency_tolerance_req = req;
842 } else {
843 if (val < 0) {
844 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
845 ret = 0;
846 } else {
847 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
848 }
849 }
850
851 out:
852 mutex_unlock(&dev_pm_qos_mtx);
853 return ret;
854}
034e7906 855EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
13b2c4a0
MW
856
857/**
858 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
859 * @dev: Device whose latency tolerance to expose
860 */
861int dev_pm_qos_expose_latency_tolerance(struct device *dev)
862{
863 int ret;
864
865 if (!dev->power.set_latency_tolerance)
866 return -EINVAL;
867
868 mutex_lock(&dev_pm_qos_sysfs_mtx);
869 ret = pm_qos_sysfs_add_latency_tolerance(dev);
870 mutex_unlock(&dev_pm_qos_sysfs_mtx);
871
872 return ret;
873}
874EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
875
876/**
877 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
878 * @dev: Device whose latency tolerance to hide
879 */
880void dev_pm_qos_hide_latency_tolerance(struct device *dev)
881{
882 mutex_lock(&dev_pm_qos_sysfs_mtx);
883 pm_qos_sysfs_remove_latency_tolerance(dev);
884 mutex_unlock(&dev_pm_qos_sysfs_mtx);
885
886 /* Remove the request from user space now */
887 pm_runtime_get_sync(dev);
888 dev_pm_qos_update_user_latency_tolerance(dev,
889 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
890 pm_runtime_put(dev);
891}
892EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);