1a7fcaf39cc9b5e33b6140bc053e8c3143feb057
[linux-2.6-block.git] / drivers / cpufreq / cpufreq_governor.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * drivers/cpufreq/cpufreq_governor.c
4  *
5  * CPUFREQ governors common code
6  *
7  * Copyright    (C) 2001 Russell King
8  *              (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
9  *              (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
10  *              (C) 2009 Alexander Clouter <alex@digriz.org.uk>
11  *              (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/export.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/slab.h>
19
20 #include "cpufreq_governor.h"
21
22 #define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL       (2 * TICK_NSEC / NSEC_PER_USEC)
23
24 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
25
26 static DEFINE_MUTEX(gov_dbs_data_mutex);
27
28 /* Common sysfs tunables */
29 /*
30  * sampling_rate_store - update sampling rate effective immediately if needed.
31  *
32  * If new rate is smaller than the old, simply updating
33  * dbs.sampling_rate might not be appropriate. For example, if the
34  * original sampling_rate was 1 second and the requested new sampling rate is 10
35  * ms because the user needs immediate reaction from ondemand governor, but not
36  * sure if higher frequency will be required or not, then, the governor may
37  * change the sampling rate too late; up to 1 second later. Thus, if we are
38  * reducing the sampling rate, we need to make the new value effective
39  * immediately.
40  *
41  * This must be called with dbs_data->mutex held, otherwise traversing
42  * policy_dbs_list isn't safe.
43  */
44 ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
45                             size_t count)
46 {
47         struct dbs_data *dbs_data = to_dbs_data(attr_set);
48         struct policy_dbs_info *policy_dbs;
49         unsigned int sampling_interval;
50         int ret;
51
52         ret = sscanf(buf, "%u", &sampling_interval);
53         if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
54                 return -EINVAL;
55
56         dbs_data->sampling_rate = sampling_interval;
57
58         /*
59          * We are operating under dbs_data->mutex and so the list and its
60          * entries can't be freed concurrently.
61          */
62         list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
63                 mutex_lock(&policy_dbs->update_mutex);
64                 /*
65                  * On 32-bit architectures this may race with the
66                  * sample_delay_ns read in dbs_update_util_handler(), but that
67                  * really doesn't matter.  If the read returns a value that's
68                  * too big, the sample will be skipped, but the next invocation
69                  * of dbs_update_util_handler() (when the update has been
70                  * completed) will take a sample.
71                  *
72                  * If this runs in parallel with dbs_work_handler(), we may end
73                  * up overwriting the sample_delay_ns value that it has just
74                  * written, but it will be corrected next time a sample is
75                  * taken, so it shouldn't be significant.
76                  */
77                 gov_update_sample_delay(policy_dbs, 0);
78                 mutex_unlock(&policy_dbs->update_mutex);
79         }
80
81         return count;
82 }
83 EXPORT_SYMBOL_GPL(sampling_rate_store);
84
85 /**
86  * gov_update_cpu_data - Update CPU load data.
87  * @dbs_data: Top-level governor data pointer.
88  *
89  * Update CPU load data for all CPUs in the domain governed by @dbs_data
90  * (that may be a single policy or a bunch of them if governor tunables are
91  * system-wide).
92  *
93  * Call under the @dbs_data mutex.
94  */
95 void gov_update_cpu_data(struct dbs_data *dbs_data)
96 {
97         struct policy_dbs_info *policy_dbs;
98
99         list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
100                 unsigned int j;
101
102                 for_each_cpu(j, policy_dbs->policy->cpus) {
103                         struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
104
105                         j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
106                                                                   dbs_data->io_is_busy);
107                         if (dbs_data->ignore_nice_load)
108                                 j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
109                 }
110         }
111 }
112 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
113
114 unsigned int dbs_update(struct cpufreq_policy *policy)
115 {
116         struct policy_dbs_info *policy_dbs = policy->governor_data;
117         struct dbs_data *dbs_data = policy_dbs->dbs_data;
118         unsigned int ignore_nice = dbs_data->ignore_nice_load;
119         unsigned int max_load = 0, idle_periods = UINT_MAX;
120         unsigned int sampling_rate, io_busy, j;
121
122         /*
123          * Sometimes governors may use an additional multiplier to increase
124          * sample delays temporarily.  Apply that multiplier to sampling_rate
125          * so as to keep the wake-up-from-idle detection logic a bit
126          * conservative.
127          */
128         sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
129         /*
130          * For the purpose of ondemand, waiting for disk IO is an indication
131          * that you're performance critical, and not that the system is actually
132          * idle, so do not add the iowait time to the CPU idle time then.
133          */
134         io_busy = dbs_data->io_is_busy;
135
136         /* Get Absolute Load */
137         for_each_cpu(j, policy->cpus) {
138                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
139                 u64 update_time, cur_idle_time;
140                 unsigned int idle_time, time_elapsed;
141                 unsigned int load;
142
143                 cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
144
145                 time_elapsed = update_time - j_cdbs->prev_update_time;
146                 j_cdbs->prev_update_time = update_time;
147
148                 /*
149                  * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
150                  * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
151                  * off, where idle_time is calculated by the difference between
152                  * time elapsed in jiffies and "busy time" obtained from CPU
153                  * statistics.  If a CPU is 100% busy, the time elapsed and busy
154                  * time should grow with the same amount in two consecutive
155                  * samples, but in practice there could be a tiny difference,
156                  * making the accumulated idle time decrease sometimes.  Hence,
157                  * in this case, idle_time should be regarded as 0 in order to
158                  * make the further process correct.
159                  */
160                 if (cur_idle_time > j_cdbs->prev_cpu_idle)
161                         idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
162                 else
163                         idle_time = 0;
164
165                 j_cdbs->prev_cpu_idle = cur_idle_time;
166
167                 if (ignore_nice) {
168                         u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
169
170                         idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
171                         j_cdbs->prev_cpu_nice = cur_nice;
172                 }
173
174                 if (unlikely(!time_elapsed)) {
175                         /*
176                          * That can only happen when this function is called
177                          * twice in a row with a very short interval between the
178                          * calls, so the previous load value can be used then.
179                          */
180                         load = j_cdbs->prev_load;
181                 } else if (unlikely(idle_time > 2 * sampling_rate &&
182                                     j_cdbs->prev_load)) {
183                         /*
184                          * If the CPU had gone completely idle and a task has
185                          * just woken up on this CPU now, it would be unfair to
186                          * calculate 'load' the usual way for this elapsed
187                          * time-window, because it would show near-zero load,
188                          * irrespective of how CPU intensive that task actually
189                          * was. This is undesirable for latency-sensitive bursty
190                          * workloads.
191                          *
192                          * To avoid this, reuse the 'load' from the previous
193                          * time-window and give this task a chance to start with
194                          * a reasonably high CPU frequency. However, that
195                          * shouldn't be over-done, lest we get stuck at a high
196                          * load (high frequency) for too long, even when the
197                          * current system load has actually dropped down, so
198                          * clear prev_load to guarantee that the load will be
199                          * computed again next time.
200                          *
201                          * Detecting this situation is easy: an unusually large
202                          * 'idle_time' (as compared to the sampling rate)
203                          * indicates this scenario.
204                          */
205                         load = j_cdbs->prev_load;
206                         j_cdbs->prev_load = 0;
207                 } else {
208                         if (time_elapsed > idle_time)
209                                 load = 100 * (time_elapsed - idle_time) / time_elapsed;
210                         else
211                                 load = 0;
212
213                         j_cdbs->prev_load = load;
214                 }
215
216                 if (unlikely(idle_time > 2 * sampling_rate)) {
217                         unsigned int periods = idle_time / sampling_rate;
218
219                         if (periods < idle_periods)
220                                 idle_periods = periods;
221                 }
222
223                 if (load > max_load)
224                         max_load = load;
225         }
226
227         policy_dbs->idle_periods = idle_periods;
228
229         return max_load;
230 }
231 EXPORT_SYMBOL_GPL(dbs_update);
232
233 static void dbs_work_handler(struct work_struct *work)
234 {
235         struct policy_dbs_info *policy_dbs;
236         struct cpufreq_policy *policy;
237         struct dbs_governor *gov;
238
239         policy_dbs = container_of(work, struct policy_dbs_info, work);
240         policy = policy_dbs->policy;
241         gov = dbs_governor_of(policy);
242
243         /*
244          * Make sure cpufreq_governor_limits() isn't evaluating load or the
245          * ondemand governor isn't updating the sampling rate in parallel.
246          */
247         mutex_lock(&policy_dbs->update_mutex);
248         gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
249         mutex_unlock(&policy_dbs->update_mutex);
250
251         /* Allow the utilization update handler to queue up more work. */
252         atomic_set(&policy_dbs->work_count, 0);
253         /*
254          * If the update below is reordered with respect to the sample delay
255          * modification, the utilization update handler may end up using a stale
256          * sample delay value.
257          */
258         smp_wmb();
259         policy_dbs->work_in_progress = false;
260 }
261
262 static void dbs_irq_work(struct irq_work *irq_work)
263 {
264         struct policy_dbs_info *policy_dbs;
265
266         policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
267         schedule_work_on(smp_processor_id(), &policy_dbs->work);
268 }
269
270 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
271                                     unsigned int flags)
272 {
273         struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
274         struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
275         u64 delta_ns, lst;
276
277         if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
278                 return;
279
280         /*
281          * The work may not be allowed to be queued up right now.
282          * Possible reasons:
283          * - Work has already been queued up or is in progress.
284          * - It is too early (too little time from the previous sample).
285          */
286         if (policy_dbs->work_in_progress)
287                 return;
288
289         /*
290          * If the reads below are reordered before the check above, the value
291          * of sample_delay_ns used in the computation may be stale.
292          */
293         smp_rmb();
294         lst = READ_ONCE(policy_dbs->last_sample_time);
295         delta_ns = time - lst;
296         if ((s64)delta_ns < policy_dbs->sample_delay_ns)
297                 return;
298
299         /*
300          * If the policy is not shared, the irq_work may be queued up right away
301          * at this point.  Otherwise, we need to ensure that only one of the
302          * CPUs sharing the policy will do that.
303          */
304         if (policy_dbs->is_shared) {
305                 if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
306                         return;
307
308                 /*
309                  * If another CPU updated last_sample_time in the meantime, we
310                  * shouldn't be here, so clear the work counter and bail out.
311                  */
312                 if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
313                         atomic_set(&policy_dbs->work_count, 0);
314                         return;
315                 }
316         }
317
318         policy_dbs->last_sample_time = time;
319         policy_dbs->work_in_progress = true;
320         irq_work_queue(&policy_dbs->irq_work);
321 }
322
323 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
324                                 unsigned int delay_us)
325 {
326         struct cpufreq_policy *policy = policy_dbs->policy;
327         int cpu;
328
329         gov_update_sample_delay(policy_dbs, delay_us);
330         policy_dbs->last_sample_time = 0;
331
332         for_each_cpu(cpu, policy->cpus) {
333                 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
334
335                 cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
336                                              dbs_update_util_handler);
337         }
338 }
339
340 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
341 {
342         int i;
343
344         for_each_cpu(i, policy->cpus)
345                 cpufreq_remove_update_util_hook(i);
346
347         synchronize_rcu();
348 }
349
350 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
351                                                      struct dbs_governor *gov)
352 {
353         struct policy_dbs_info *policy_dbs;
354         int j;
355
356         /* Allocate memory for per-policy governor data. */
357         policy_dbs = gov->alloc();
358         if (!policy_dbs)
359                 return NULL;
360
361         policy_dbs->policy = policy;
362         mutex_init(&policy_dbs->update_mutex);
363         atomic_set(&policy_dbs->work_count, 0);
364         init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
365         INIT_WORK(&policy_dbs->work, dbs_work_handler);
366
367         /* Set policy_dbs for all CPUs, online+offline */
368         for_each_cpu(j, policy->related_cpus) {
369                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
370
371                 j_cdbs->policy_dbs = policy_dbs;
372         }
373         return policy_dbs;
374 }
375
376 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
377                                  struct dbs_governor *gov)
378 {
379         int j;
380
381         mutex_destroy(&policy_dbs->update_mutex);
382
383         for_each_cpu(j, policy_dbs->policy->related_cpus) {
384                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
385
386                 j_cdbs->policy_dbs = NULL;
387                 j_cdbs->update_util.func = NULL;
388         }
389         gov->free(policy_dbs);
390 }
391
392 static void cpufreq_dbs_data_release(struct kobject *kobj)
393 {
394         struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
395         struct dbs_governor *gov = dbs_data->gov;
396
397         gov->exit(dbs_data);
398         kfree(dbs_data);
399 }
400
401 int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
402 {
403         struct dbs_governor *gov = dbs_governor_of(policy);
404         struct dbs_data *dbs_data;
405         struct policy_dbs_info *policy_dbs;
406         int ret = 0;
407
408         /* State should be equivalent to EXIT */
409         if (policy->governor_data)
410                 return -EBUSY;
411
412         policy_dbs = alloc_policy_dbs_info(policy, gov);
413         if (!policy_dbs)
414                 return -ENOMEM;
415
416         /* Protect gov->gdbs_data against concurrent updates. */
417         mutex_lock(&gov_dbs_data_mutex);
418
419         dbs_data = gov->gdbs_data;
420         if (dbs_data) {
421                 if (WARN_ON(have_governor_per_policy())) {
422                         ret = -EINVAL;
423                         goto free_policy_dbs_info;
424                 }
425                 policy_dbs->dbs_data = dbs_data;
426                 policy->governor_data = policy_dbs;
427
428                 gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
429                 goto out;
430         }
431
432         dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
433         if (!dbs_data) {
434                 ret = -ENOMEM;
435                 goto free_policy_dbs_info;
436         }
437
438         dbs_data->gov = gov;
439         gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
440
441         ret = gov->init(dbs_data);
442         if (ret)
443                 goto free_dbs_data;
444
445         /*
446          * The sampling interval should not be less than the transition latency
447          * of the CPU and it also cannot be too small for dbs_update() to work
448          * correctly.
449          */
450         dbs_data->sampling_rate = max_t(unsigned int,
451                                         CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
452                                         cpufreq_policy_transition_delay_us(policy));
453
454         if (!have_governor_per_policy())
455                 gov->gdbs_data = dbs_data;
456
457         policy_dbs->dbs_data = dbs_data;
458         policy->governor_data = policy_dbs;
459
460         gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
461         gov->kobj_type.release = cpufreq_dbs_data_release;
462         ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
463                                    get_governor_parent_kobj(policy),
464                                    "%s", gov->gov.name);
465         if (!ret)
466                 goto out;
467
468         /* Failure, so roll back. */
469         pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
470
471         kobject_put(&dbs_data->attr_set.kobj);
472
473         policy->governor_data = NULL;
474
475         if (!have_governor_per_policy())
476                 gov->gdbs_data = NULL;
477         gov->exit(dbs_data);
478
479 free_dbs_data:
480         kfree(dbs_data);
481
482 free_policy_dbs_info:
483         free_policy_dbs_info(policy_dbs, gov);
484
485 out:
486         mutex_unlock(&gov_dbs_data_mutex);
487         return ret;
488 }
489 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
490
491 void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
492 {
493         struct dbs_governor *gov = dbs_governor_of(policy);
494         struct policy_dbs_info *policy_dbs = policy->governor_data;
495         struct dbs_data *dbs_data = policy_dbs->dbs_data;
496         unsigned int count;
497
498         /* Protect gov->gdbs_data against concurrent updates. */
499         mutex_lock(&gov_dbs_data_mutex);
500
501         count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
502
503         policy->governor_data = NULL;
504
505         if (!count && !have_governor_per_policy())
506                 gov->gdbs_data = NULL;
507
508         free_policy_dbs_info(policy_dbs, gov);
509
510         mutex_unlock(&gov_dbs_data_mutex);
511 }
512 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
513
514 int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
515 {
516         struct dbs_governor *gov = dbs_governor_of(policy);
517         struct policy_dbs_info *policy_dbs = policy->governor_data;
518         struct dbs_data *dbs_data = policy_dbs->dbs_data;
519         unsigned int sampling_rate, ignore_nice, j;
520         unsigned int io_busy;
521
522         if (!policy->cur)
523                 return -EINVAL;
524
525         policy_dbs->is_shared = policy_is_shared(policy);
526         policy_dbs->rate_mult = 1;
527
528         sampling_rate = dbs_data->sampling_rate;
529         ignore_nice = dbs_data->ignore_nice_load;
530         io_busy = dbs_data->io_is_busy;
531
532         for_each_cpu(j, policy->cpus) {
533                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
534
535                 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
536                 /*
537                  * Make the first invocation of dbs_update() compute the load.
538                  */
539                 j_cdbs->prev_load = 0;
540
541                 if (ignore_nice)
542                         j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
543         }
544
545         gov->start(policy);
546
547         gov_set_update_util(policy_dbs, sampling_rate);
548         return 0;
549 }
550 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
551
552 void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
553 {
554         struct policy_dbs_info *policy_dbs = policy->governor_data;
555
556         gov_clear_update_util(policy_dbs->policy);
557         irq_work_sync(&policy_dbs->irq_work);
558         cancel_work_sync(&policy_dbs->work);
559         atomic_set(&policy_dbs->work_count, 0);
560         policy_dbs->work_in_progress = false;
561 }
562 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
563
564 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
565 {
566         struct policy_dbs_info *policy_dbs;
567
568         /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
569         mutex_lock(&gov_dbs_data_mutex);
570         policy_dbs = policy->governor_data;
571         if (!policy_dbs)
572                 goto out;
573
574         mutex_lock(&policy_dbs->update_mutex);
575         cpufreq_policy_apply_limits(policy);
576         gov_update_sample_delay(policy_dbs, 0);
577         mutex_unlock(&policy_dbs->update_mutex);
578
579 out:
580         mutex_unlock(&gov_dbs_data_mutex);
581 }
582 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);