cpufreq: dev_pm_qos_update_request() can return 1 on success
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
index 0a9f675f2af447cf89a69e3fd70753c216e52af0..c28ebf2810f11508a1c36cb1660bdd949ef3aeff 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/pm_qos.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
@@ -158,7 +159,7 @@ EXPORT_SYMBOL_GPL(arch_set_freq_scale);
  * - set policies transition latency
  * - policy->cpus with all possible CPUs
  */
-int cpufreq_generic_init(struct cpufreq_policy *policy,
+void cpufreq_generic_init(struct cpufreq_policy *policy,
                struct cpufreq_frequency_table *table,
                unsigned int transition_latency)
 {
@@ -170,8 +171,6 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
         * share the clock and voltage and clock.
         */
        cpumask_setall(policy->cpus);
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
 
@@ -714,23 +713,15 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
 static ssize_t store_##file_name                                       \
 (struct cpufreq_policy *policy, const char *buf, size_t count)         \
 {                                                                      \
-       int ret, temp;                                                  \
-       struct cpufreq_policy new_policy;                               \
+       unsigned long val;                                              \
+       int ret;                                                        \
                                                                        \
-       memcpy(&new_policy, policy, sizeof(*policy));                   \
-       new_policy.min = policy->user_policy.min;                       \
-       new_policy.max = policy->user_policy.max;                       \
-                                                                       \
-       ret = sscanf(buf, "%u", &new_policy.object);                    \
+       ret = sscanf(buf, "%lu", &val);                                 \
        if (ret != 1)                                                   \
                return -EINVAL;                                         \
                                                                        \
-       temp = new_policy.object;                                       \
-       ret = cpufreq_set_policy(policy, &new_policy);          \
-       if (!ret)                                                       \
-               policy->user_policy.object = temp;                      \
-                                                                       \
-       return ret ? ret : count;                                       \
+       ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
+       return ret >= 0 ? count : ret;                                  \
 }
 
 store_one(scaling_min_freq, min);
@@ -996,7 +987,7 @@ static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
 {
        struct device *dev = get_cpu_device(cpu);
 
-       if (!dev)
+       if (unlikely(!dev))
                return;
 
        if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
@@ -1112,17 +1103,18 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
        return ret;
 }
 
-static void refresh_frequency_limits(struct cpufreq_policy *policy)
+void refresh_frequency_limits(struct cpufreq_policy *policy)
 {
-       struct cpufreq_policy new_policy = *policy;
-
-       pr_debug("updating policy for CPU %u\n", policy->cpu);
+       struct cpufreq_policy new_policy;
 
-       new_policy.min = policy->user_policy.min;
-       new_policy.max = policy->user_policy.max;
+       if (!policy_is_inactive(policy)) {
+               new_policy = *policy;
+               pr_debug("updating policy for CPU %u\n", policy->cpu);
 
-       cpufreq_set_policy(policy, &new_policy);
+               cpufreq_set_policy(policy, &new_policy);
+       }
 }
+EXPORT_SYMBOL(refresh_frequency_limits);
 
 static void handle_update(struct work_struct *work)
 {
@@ -1130,14 +1122,60 @@ static void handle_update(struct work_struct *work)
                container_of(work, struct cpufreq_policy, update);
 
        pr_debug("handle_update for cpu %u called\n", policy->cpu);
+       down_write(&policy->rwsem);
        refresh_frequency_limits(policy);
+       up_write(&policy->rwsem);
+}
+
+static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
+                               void *data)
+{
+       struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
+
+       schedule_work(&policy->update);
+       return 0;
+}
+
+static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
+                               void *data)
+{
+       struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
+
+       schedule_work(&policy->update);
+       return 0;
+}
+
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+       struct kobject *kobj;
+       struct completion *cmp;
+
+       down_write(&policy->rwsem);
+       cpufreq_stats_free_table(policy);
+       kobj = &policy->kobj;
+       cmp = &policy->kobj_unregister;
+       up_write(&policy->rwsem);
+       kobject_put(kobj);
+
+       /*
+        * We need to make sure that the underlying kobj is
+        * actually not referenced anymore by anybody before we
+        * proceed with unloading.
+        */
+       pr_debug("waiting for dropping of refcount\n");
+       wait_for_completion(cmp);
+       pr_debug("wait complete\n");
 }
 
 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
+       struct device *dev = get_cpu_device(cpu);
        int ret;
 
+       if (!dev)
+               return NULL;
+
        policy = kzalloc(sizeof(*policy), GFP_KERNEL);
        if (!policy)
                return NULL;
@@ -1154,7 +1192,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
                                   cpufreq_global_kobject, "policy%u", cpu);
        if (ret) {
-               pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+               dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
                /*
                 * The entire policy object will be freed below, but the extra
                 * memory allocated for the kobject name needs to be freed by
@@ -1164,6 +1202,25 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
                goto err_free_real_cpus;
        }
 
+       policy->nb_min.notifier_call = cpufreq_notifier_min;
+       policy->nb_max.notifier_call = cpufreq_notifier_max;
+
+       ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
+                                     DEV_PM_QOS_MIN_FREQUENCY);
+       if (ret) {
+               dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
+                       ret, cpumask_pr_args(policy->cpus));
+               goto err_kobj_remove;
+       }
+
+       ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
+                                     DEV_PM_QOS_MAX_FREQUENCY);
+       if (ret) {
+               dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
+                       ret, cpumask_pr_args(policy->cpus));
+               goto err_min_qos_notifier;
+       }
+
        INIT_LIST_HEAD(&policy->policy_list);
        init_rwsem(&policy->rwsem);
        spin_lock_init(&policy->transition_lock);
@@ -1174,6 +1231,11 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
        policy->cpu = cpu;
        return policy;
 
+err_min_qos_notifier:
+       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
+                                  DEV_PM_QOS_MIN_FREQUENCY);
+err_kobj_remove:
+       cpufreq_policy_put_kobj(policy);
 err_free_real_cpus:
        free_cpumask_var(policy->real_cpus);
 err_free_rcpumask:
@@ -1186,30 +1248,9 @@ err_free_policy:
        return NULL;
 }
 
-static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
-{
-       struct kobject *kobj;
-       struct completion *cmp;
-
-       down_write(&policy->rwsem);
-       cpufreq_stats_free_table(policy);
-       kobj = &policy->kobj;
-       cmp = &policy->kobj_unregister;
-       up_write(&policy->rwsem);
-       kobject_put(kobj);
-
-       /*
-        * We need to make sure that the underlying kobj is
-        * actually not referenced anymore by anybody before we
-        * proceed with unloading.
-        */
-       pr_debug("waiting for dropping of refcount\n");
-       wait_for_completion(cmp);
-       pr_debug("wait complete\n");
-}
-
 static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
+       struct device *dev = get_cpu_device(policy->cpu);
        unsigned long flags;
        int cpu;
 
@@ -1221,6 +1262,14 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                per_cpu(cpufreq_cpu_data, cpu) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+       dev_pm_qos_remove_notifier(dev, &policy->nb_max,
+                                  DEV_PM_QOS_MAX_FREQUENCY);
+       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
+                                  DEV_PM_QOS_MIN_FREQUENCY);
+       dev_pm_qos_remove_request(policy->max_freq_req);
+       dev_pm_qos_remove_request(policy->min_freq_req);
+       kfree(policy->min_freq_req);
+
        cpufreq_policy_put_kobj(policy);
        free_cpumask_var(policy->real_cpus);
        free_cpumask_var(policy->related_cpus);
@@ -1298,16 +1347,50 @@ static int cpufreq_online(unsigned int cpu)
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
        if (new_policy) {
-               policy->user_policy.min = policy->min;
-               policy->user_policy.max = policy->max;
+               struct device *dev = get_cpu_device(cpu);
 
                for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
                        add_cpu_dev_symlink(policy, j);
                }
-       } else {
-               policy->min = policy->user_policy.min;
-               policy->max = policy->user_policy.max;
+
+               policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
+                                              GFP_KERNEL);
+               if (!policy->min_freq_req)
+                       goto out_destroy_policy;
+
+               ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
+                                            DEV_PM_QOS_MIN_FREQUENCY,
+                                            policy->min);
+               if (ret < 0) {
+                       /*
+                        * So we don't call dev_pm_qos_remove_request() for an
+                        * uninitialized request.
+                        */
+                       kfree(policy->min_freq_req);
+                       policy->min_freq_req = NULL;
+
+                       dev_err(dev, "Failed to add min-freq constraint (%d)\n",
+                               ret);
+                       goto out_destroy_policy;
+               }
+
+               /*
+                * This must be initialized right here to avoid calling
+                * dev_pm_qos_remove_request() on uninitialized request in case
+                * of errors.
+                */
+               policy->max_freq_req = policy->min_freq_req + 1;
+
+               ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
+                                            DEV_PM_QOS_MAX_FREQUENCY,
+                                            policy->max);
+               if (ret < 0) {
+                       policy->max_freq_req = NULL;
+                       dev_err(dev, "Failed to add max-freq constraint (%d)\n",
+                               ret);
+                       goto out_destroy_policy;
+               }
        }
 
        if (cpufreq_driver->get && has_target()) {
@@ -2280,6 +2363,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
                       struct cpufreq_policy *new_policy)
 {
        struct cpufreq_governor *old_gov;
+       struct device *cpu_dev = get_cpu_device(policy->cpu);
        int ret;
 
        pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
@@ -2288,17 +2372,21 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
 
        /*
-       * This check works well when we store new min/max freq attributes,
-       * because new_policy is a copy of policy with one field updated.
-       */
-       if (new_policy->min > new_policy->max)
-               return -EINVAL;
+        * PM QoS framework collects all the requests from users and provide us
+        * the final aggregated value here.
+        */
+       new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
+       new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
 
        /* verify the cpu speed can be set within this limit */
        ret = cpufreq_driver->verify(new_policy);
        if (ret)
                return ret;
 
+       /*
+        * The notifier-chain shall be removed once all the users of
+        * CPUFREQ_ADJUST are moved to use the QoS framework.
+        */
        /* adjust if necessary - all reasons */
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_ADJUST, new_policy);
@@ -2377,10 +2465,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
  * @cpu: CPU to re-evaluate the policy for.
  *
  * Update the current frequency for the cpufreq policy of @cpu and use
- * cpufreq_set_policy() to re-apply the min and max limits saved in the
- * user_policy sub-structure of that policy, which triggers the evaluation
- * of policy notifiers and the cpufreq driver's ->verify() callback for the
- * policy in question, among other things.
+ * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
+ * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
+ * for the policy in question, among other things.
  */
 void cpufreq_update_policy(unsigned int cpu)
 {
@@ -2440,10 +2527,9 @@ static int cpufreq_boost_set_sw(int state)
                        break;
                }
 
-               down_write(&policy->rwsem);
-               policy->user_policy.max = policy->max;
-               cpufreq_governor_limits(policy);
-               up_write(&policy->rwsem);
+               ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
+               if (ret < 0)
+                       break;
        }
 
        return ret;