1 // SPDX-License-Identifier: GPL-2.0
3 * Devices PM QoS constraints management
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * This module exposes the interface to kernel space for specifying
8 * per-device PM QoS dependencies. It provides infrastructure for registration
11 * Dependents on a QoS value : register requests
12 * Watchers of QoS value : get notified when target QoS value changes
14 * This QoS design is best effort based. Dependents register their QoS needs.
15 * Watchers register to keep track of the current QoS needs of the system.
16 * Watchers can register a per-device notification callback using the
17 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18 * per-device constraint data struct.
20 * Note about the per-device constraint data struct allocation:
21 * . The per-device constraints data struct ptr is stored into the device
23 * . To minimize the data usage by the per-device constraints, the data struct
24 * is only allocated at the first call to dev_pm_qos_add_request.
25 * . The data is later free'd when the device is removed from the system.
26 * . A global mutex protects the constraints users from the data being
27 * allocated and free'd.
30 #include <linux/pm_qos.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/device.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/err.h>
38 #include <trace/events/power.h>
42 static DEFINE_MUTEX(dev_pm_qos_mtx);
43 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
46 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47 * @dev: Device to check the PM QoS flags for.
48 * @mask: Flags to check against.
50 * This routine must be called with dev->power.lock held.
52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
54 struct dev_pm_qos *qos = dev->power.qos;
55 struct pm_qos_flags *pqf;
58 lockdep_assert_held(&dev->power.lock);
60 if (IS_ERR_OR_NULL(qos))
61 return PM_QOS_FLAGS_UNDEFINED;
64 if (list_empty(&pqf->list))
65 return PM_QOS_FLAGS_UNDEFINED;
67 val = pqf->effective_flags & mask;
69 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
71 return PM_QOS_FLAGS_NONE;
75 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76 * @dev: Device to check the PM QoS flags for.
77 * @mask: Flags to check against.
79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
81 unsigned long irqflags;
82 enum pm_qos_flags_status ret;
84 spin_lock_irqsave(&dev->power.lock, irqflags);
85 ret = __dev_pm_qos_flags(dev, mask);
86 spin_unlock_irqrestore(&dev->power.lock, irqflags);
90 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
93 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
94 * @dev: Device to get the PM QoS constraint value for.
96 * This routine must be called with dev->power.lock held.
98 s32 __dev_pm_qos_read_value(struct device *dev)
100 lockdep_assert_held(&dev->power.lock);
102 return dev_pm_qos_raw_read_value(dev);
106 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107 * @dev: Device to get the PM QoS constraint value for.
109 s32 dev_pm_qos_read_value(struct device *dev)
114 spin_lock_irqsave(&dev->power.lock, flags);
115 ret = __dev_pm_qos_read_value(dev);
116 spin_unlock_irqrestore(&dev->power.lock, flags);
122 * apply_constraint - Add/modify/remove device PM QoS request.
123 * @req: Constraint request to apply
124 * @action: Action to perform (add/update/remove).
125 * @value: Value to assign to the QoS request.
127 * Internal function to update the constraints list using the PM QoS core
128 * code and if needed call the per-device callbacks.
130 static int apply_constraint(struct dev_pm_qos_request *req,
131 enum pm_qos_req_action action, s32 value)
133 struct dev_pm_qos *qos = req->dev->power.qos;
137 case DEV_PM_QOS_RESUME_LATENCY:
138 if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
141 ret = pm_qos_update_target(&qos->resume_latency,
142 &req->data.pnode, action, value);
144 case DEV_PM_QOS_LATENCY_TOLERANCE:
145 ret = pm_qos_update_target(&qos->latency_tolerance,
146 &req->data.pnode, action, value);
148 value = pm_qos_read_value(&qos->latency_tolerance);
149 req->dev->power.set_latency_tolerance(req->dev, value);
152 case DEV_PM_QOS_FLAGS:
153 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
164 * dev_pm_qos_constraints_allocate
165 * @dev: device to allocate data for
167 * Called at the first call to add_request, for constraint data allocation
168 * Must be called with the dev_pm_qos_mtx mutex held
170 static int dev_pm_qos_constraints_allocate(struct device *dev)
172 struct dev_pm_qos *qos;
173 struct pm_qos_constraints *c;
174 struct blocking_notifier_head *n;
176 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
180 n = kzalloc(sizeof(*n), GFP_KERNEL);
185 BLOCKING_INIT_NOTIFIER_HEAD(n);
187 c = &qos->resume_latency;
188 plist_head_init(&c->list);
189 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
190 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
191 c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
192 c->type = PM_QOS_MIN;
195 c = &qos->latency_tolerance;
196 plist_head_init(&c->list);
197 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
198 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
199 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
200 c->type = PM_QOS_MIN;
202 INIT_LIST_HEAD(&qos->flags.list);
204 spin_lock_irq(&dev->power.lock);
205 dev->power.qos = qos;
206 spin_unlock_irq(&dev->power.lock);
211 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
212 static void __dev_pm_qos_hide_flags(struct device *dev);
215 * dev_pm_qos_constraints_destroy
216 * @dev: target device
218 * Called from the device PM subsystem on device removal under device_pm_lock().
220 void dev_pm_qos_constraints_destroy(struct device *dev)
222 struct dev_pm_qos *qos;
223 struct dev_pm_qos_request *req, *tmp;
224 struct pm_qos_constraints *c;
225 struct pm_qos_flags *f;
227 mutex_lock(&dev_pm_qos_sysfs_mtx);
230 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point.
233 pm_qos_sysfs_remove_resume_latency(dev);
234 pm_qos_sysfs_remove_flags(dev);
236 mutex_lock(&dev_pm_qos_mtx);
238 __dev_pm_qos_hide_latency_limit(dev);
239 __dev_pm_qos_hide_flags(dev);
241 qos = dev->power.qos;
245 /* Flush the constraints lists for the device. */
246 c = &qos->resume_latency;
247 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
249 * Update constraints list and call the notification
250 * callbacks if needed
252 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
253 memset(req, 0, sizeof(*req));
255 c = &qos->latency_tolerance;
256 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
257 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
258 memset(req, 0, sizeof(*req));
261 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
262 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
263 memset(req, 0, sizeof(*req));
266 spin_lock_irq(&dev->power.lock);
267 dev->power.qos = ERR_PTR(-ENODEV);
268 spin_unlock_irq(&dev->power.lock);
270 kfree(qos->resume_latency.notifiers);
274 mutex_unlock(&dev_pm_qos_mtx);
276 mutex_unlock(&dev_pm_qos_sysfs_mtx);
279 static bool dev_pm_qos_invalid_req_type(struct device *dev,
280 enum dev_pm_qos_req_type type)
282 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
283 !dev->power.set_latency_tolerance;
286 static int __dev_pm_qos_add_request(struct device *dev,
287 struct dev_pm_qos_request *req,
288 enum dev_pm_qos_req_type type, s32 value)
292 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
295 if (WARN(dev_pm_qos_request_active(req),
296 "%s() called for already added request\n", __func__))
299 if (IS_ERR(dev->power.qos))
301 else if (!dev->power.qos)
302 ret = dev_pm_qos_constraints_allocate(dev);
304 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
308 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
314 * dev_pm_qos_add_request - inserts new qos request into the list
315 * @dev: target device for the constraint
316 * @req: pointer to a preallocated handle
317 * @type: type of the request
318 * @value: defines the qos request
320 * This function inserts a new entry in the device constraints list of
321 * requested qos performance characteristics. It recomputes the aggregate
322 * QoS expectations of parameters and initializes the dev_pm_qos_request
323 * handle. Caller needs to save this handle for later use in updates and
326 * Returns 1 if the aggregated constraint value has changed,
327 * 0 if the aggregated constraint value has not changed,
328 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
329 * to allocate for data structures, -ENODEV if the device has just been removed
332 * Callers should ensure that the target device is not RPM_SUSPENDED before
333 * using this function for requests of type DEV_PM_QOS_FLAGS.
335 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
336 enum dev_pm_qos_req_type type, s32 value)
340 mutex_lock(&dev_pm_qos_mtx);
341 ret = __dev_pm_qos_add_request(dev, req, type, value);
342 mutex_unlock(&dev_pm_qos_mtx);
345 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
348 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
349 * @req : PM QoS request to modify.
350 * @new_value: New value to request.
352 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
358 if (!req) /*guard against callers passing in null */
361 if (WARN(!dev_pm_qos_request_active(req),
362 "%s() called for unknown object\n", __func__))
365 if (IS_ERR_OR_NULL(req->dev->power.qos))
369 case DEV_PM_QOS_RESUME_LATENCY:
370 case DEV_PM_QOS_LATENCY_TOLERANCE:
371 curr_value = req->data.pnode.prio;
373 case DEV_PM_QOS_FLAGS:
374 curr_value = req->data.flr.flags;
380 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
382 if (curr_value != new_value)
383 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
389 * dev_pm_qos_update_request - modifies an existing qos request
390 * @req : handle to list element holding a dev_pm_qos request to use
391 * @new_value: defines the qos request
393 * Updates an existing dev PM qos request along with updating the
396 * Attempts are made to make this code callable on hot code paths.
398 * Returns 1 if the aggregated constraint value has changed,
399 * 0 if the aggregated constraint value has not changed,
400 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
401 * removed from the system
403 * Callers should ensure that the target device is not RPM_SUSPENDED before
404 * using this function for requests of type DEV_PM_QOS_FLAGS.
406 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
410 mutex_lock(&dev_pm_qos_mtx);
411 ret = __dev_pm_qos_update_request(req, new_value);
412 mutex_unlock(&dev_pm_qos_mtx);
415 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
421 if (!req) /*guard against callers passing in null */
424 if (WARN(!dev_pm_qos_request_active(req),
425 "%s() called for unknown object\n", __func__))
428 if (IS_ERR_OR_NULL(req->dev->power.qos))
431 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
432 PM_QOS_DEFAULT_VALUE);
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
434 memset(req, 0, sizeof(*req));
439 * dev_pm_qos_remove_request - modifies an existing qos request
440 * @req: handle to request list element
442 * Will remove pm qos request from the list of constraints and
443 * recompute the current target value. Call this on slow code paths.
445 * Returns 1 if the aggregated constraint value has changed,
446 * 0 if the aggregated constraint value has not changed,
447 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
448 * removed from the system
450 * Callers should ensure that the target device is not RPM_SUSPENDED before
451 * using this function for requests of type DEV_PM_QOS_FLAGS.
453 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
457 mutex_lock(&dev_pm_qos_mtx);
458 ret = __dev_pm_qos_remove_request(req);
459 mutex_unlock(&dev_pm_qos_mtx);
462 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
465 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
466 * of per-device PM QoS constraints
468 * @dev: target device for the constraint
469 * @notifier: notifier block managed by caller.
471 * Will register the notifier into a notification chain that gets called
472 * upon changes to the target value for the device.
474 * If the device's constraints object doesn't exist when this routine is called,
475 * it will be created (or error code will be returned if that fails).
477 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
481 mutex_lock(&dev_pm_qos_mtx);
483 if (IS_ERR(dev->power.qos))
485 else if (!dev->power.qos)
486 ret = dev_pm_qos_constraints_allocate(dev);
489 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
492 mutex_unlock(&dev_pm_qos_mtx);
495 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
498 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
499 * of per-device PM QoS constraints
501 * @dev: target device for the constraint
502 * @notifier: notifier block to be removed.
504 * Will remove the notifier from the notification chain that gets called
505 * upon changes to the target value.
507 int dev_pm_qos_remove_notifier(struct device *dev,
508 struct notifier_block *notifier)
512 mutex_lock(&dev_pm_qos_mtx);
514 /* Silently return if the constraints object is not present. */
515 if (!IS_ERR_OR_NULL(dev->power.qos))
516 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
519 mutex_unlock(&dev_pm_qos_mtx);
522 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
525 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
526 * @dev: Device whose ancestor to add the request for.
527 * @req: Pointer to the preallocated handle.
528 * @type: Type of the request.
529 * @value: Constraint latency value.
531 int dev_pm_qos_add_ancestor_request(struct device *dev,
532 struct dev_pm_qos_request *req,
533 enum dev_pm_qos_req_type type, s32 value)
535 struct device *ancestor = dev->parent;
539 case DEV_PM_QOS_RESUME_LATENCY:
540 while (ancestor && !ancestor->power.ignore_children)
541 ancestor = ancestor->parent;
544 case DEV_PM_QOS_LATENCY_TOLERANCE:
545 while (ancestor && !ancestor->power.set_latency_tolerance)
546 ancestor = ancestor->parent;
553 ret = dev_pm_qos_add_request(ancestor, req, type, value);
560 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
562 static void __dev_pm_qos_drop_user_request(struct device *dev,
563 enum dev_pm_qos_req_type type)
565 struct dev_pm_qos_request *req = NULL;
568 case DEV_PM_QOS_RESUME_LATENCY:
569 req = dev->power.qos->resume_latency_req;
570 dev->power.qos->resume_latency_req = NULL;
572 case DEV_PM_QOS_LATENCY_TOLERANCE:
573 req = dev->power.qos->latency_tolerance_req;
574 dev->power.qos->latency_tolerance_req = NULL;
576 case DEV_PM_QOS_FLAGS:
577 req = dev->power.qos->flags_req;
578 dev->power.qos->flags_req = NULL;
581 __dev_pm_qos_remove_request(req);
585 static void dev_pm_qos_drop_user_request(struct device *dev,
586 enum dev_pm_qos_req_type type)
588 mutex_lock(&dev_pm_qos_mtx);
589 __dev_pm_qos_drop_user_request(dev, type);
590 mutex_unlock(&dev_pm_qos_mtx);
594 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
595 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
596 * @value: Initial value of the latency limit.
598 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
600 struct dev_pm_qos_request *req;
603 if (!device_is_registered(dev) || value < 0)
606 req = kzalloc(sizeof(*req), GFP_KERNEL);
610 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
616 mutex_lock(&dev_pm_qos_sysfs_mtx);
618 mutex_lock(&dev_pm_qos_mtx);
620 if (IS_ERR_OR_NULL(dev->power.qos))
622 else if (dev->power.qos->resume_latency_req)
626 __dev_pm_qos_remove_request(req);
628 mutex_unlock(&dev_pm_qos_mtx);
631 dev->power.qos->resume_latency_req = req;
633 mutex_unlock(&dev_pm_qos_mtx);
635 ret = pm_qos_sysfs_add_resume_latency(dev);
637 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
640 mutex_unlock(&dev_pm_qos_sysfs_mtx);
643 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
645 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
647 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
648 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
652 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
653 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
655 void dev_pm_qos_hide_latency_limit(struct device *dev)
657 mutex_lock(&dev_pm_qos_sysfs_mtx);
659 pm_qos_sysfs_remove_resume_latency(dev);
661 mutex_lock(&dev_pm_qos_mtx);
662 __dev_pm_qos_hide_latency_limit(dev);
663 mutex_unlock(&dev_pm_qos_mtx);
665 mutex_unlock(&dev_pm_qos_sysfs_mtx);
667 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
670 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
671 * @dev: Device whose PM QoS flags are to be exposed to user space.
672 * @val: Initial values of the flags.
674 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
676 struct dev_pm_qos_request *req;
679 if (!device_is_registered(dev))
682 req = kzalloc(sizeof(*req), GFP_KERNEL);
686 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
692 pm_runtime_get_sync(dev);
693 mutex_lock(&dev_pm_qos_sysfs_mtx);
695 mutex_lock(&dev_pm_qos_mtx);
697 if (IS_ERR_OR_NULL(dev->power.qos))
699 else if (dev->power.qos->flags_req)
703 __dev_pm_qos_remove_request(req);
705 mutex_unlock(&dev_pm_qos_mtx);
708 dev->power.qos->flags_req = req;
710 mutex_unlock(&dev_pm_qos_mtx);
712 ret = pm_qos_sysfs_add_flags(dev);
714 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
717 mutex_unlock(&dev_pm_qos_sysfs_mtx);
721 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
723 static void __dev_pm_qos_hide_flags(struct device *dev)
725 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
726 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
730 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
731 * @dev: Device whose PM QoS flags are to be hidden from user space.
733 void dev_pm_qos_hide_flags(struct device *dev)
735 pm_runtime_get_sync(dev);
736 mutex_lock(&dev_pm_qos_sysfs_mtx);
738 pm_qos_sysfs_remove_flags(dev);
740 mutex_lock(&dev_pm_qos_mtx);
741 __dev_pm_qos_hide_flags(dev);
742 mutex_unlock(&dev_pm_qos_mtx);
744 mutex_unlock(&dev_pm_qos_sysfs_mtx);
747 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
750 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
751 * @dev: Device to update the PM QoS flags request for.
752 * @mask: Flags to set/clear.
753 * @set: Whether to set or clear the flags (true means set).
755 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
760 pm_runtime_get_sync(dev);
761 mutex_lock(&dev_pm_qos_mtx);
763 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
768 value = dev_pm_qos_requested_flags(dev);
774 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
777 mutex_unlock(&dev_pm_qos_mtx);
783 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
784 * @dev: Device to obtain the user space latency tolerance for.
786 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
790 mutex_lock(&dev_pm_qos_mtx);
791 ret = IS_ERR_OR_NULL(dev->power.qos)
792 || !dev->power.qos->latency_tolerance_req ?
793 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
794 dev->power.qos->latency_tolerance_req->data.pnode.prio;
795 mutex_unlock(&dev_pm_qos_mtx);
800 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
801 * @dev: Device to update the user space latency tolerance for.
802 * @val: New user space latency tolerance for @dev (negative values disable).
804 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
808 mutex_lock(&dev_pm_qos_mtx);
810 if (IS_ERR_OR_NULL(dev->power.qos)
811 || !dev->power.qos->latency_tolerance_req) {
812 struct dev_pm_qos_request *req;
815 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
821 req = kzalloc(sizeof(*req), GFP_KERNEL);
826 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
831 dev->power.qos->latency_tolerance_req = req;
834 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
837 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
842 mutex_unlock(&dev_pm_qos_mtx);
845 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
848 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
849 * @dev: Device whose latency tolerance to expose
851 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
855 if (!dev->power.set_latency_tolerance)
858 mutex_lock(&dev_pm_qos_sysfs_mtx);
859 ret = pm_qos_sysfs_add_latency_tolerance(dev);
860 mutex_unlock(&dev_pm_qos_sysfs_mtx);
864 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
867 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
868 * @dev: Device whose latency tolerance to hide
870 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
872 mutex_lock(&dev_pm_qos_sysfs_mtx);
873 pm_qos_sysfs_remove_latency_tolerance(dev);
874 mutex_unlock(&dev_pm_qos_sysfs_mtx);
876 /* Remove the request from user space now */
877 pm_runtime_get_sync(dev);
878 dev_pm_qos_update_user_latency_tolerance(dev,
879 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
882 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);