Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * drivers/cpufreq/cpufreq_ondemand.c | |
3 | * | |
4 | * Copyright (C) 2001 Russell King | |
5 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
6 | * Jun Nakajima <jun.nakajima@intel.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
4471a34f VK |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | ||
1da177e4 | 15 | #include <linux/cpufreq.h> |
4471a34f VK |
16 | #include <linux/init.h> |
17 | #include <linux/kernel.h> | |
1da177e4 | 18 | #include <linux/kernel_stat.h> |
4471a34f VK |
19 | #include <linux/kobject.h> |
20 | #include <linux/module.h> | |
3fc54d37 | 21 | #include <linux/mutex.h> |
4471a34f VK |
22 | #include <linux/percpu-defs.h> |
23 | #include <linux/sysfs.h> | |
80800913 | 24 | #include <linux/tick.h> |
4471a34f | 25 | #include <linux/types.h> |
1da177e4 | 26 | |
4471a34f | 27 | #include "cpufreq_governor.h" |
1da177e4 | 28 | |
4471a34f | 29 | /* On-demand governor macors */ |
e9d95bf7 | 30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) |
1da177e4 | 31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
3f78a9f7 DN |
32 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
33 | #define MAX_SAMPLING_DOWN_FACTOR (100000) | |
80800913 | 34 | #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) |
35 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) | |
cef9615a | 36 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) |
c29f1403 | 37 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
1da177e4 LT |
38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
39 | ||
4471a34f VK |
40 | static struct dbs_data od_dbs_data; |
41 | static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); | |
1da177e4 | 42 | |
3e33ee9e FB |
43 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
44 | static struct cpufreq_governor cpufreq_gov_ondemand; | |
45 | #endif | |
46 | ||
4471a34f | 47 | static struct od_dbs_tuners od_tuners = { |
32ee8c3e | 48 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
3f78a9f7 | 49 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
e9d95bf7 | 50 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, |
9cbad61b | 51 | .ignore_nice = 0, |
05ca0350 | 52 | .powersave_bias = 0, |
1da177e4 LT |
53 | }; |
54 | ||
4471a34f | 55 | static void ondemand_powersave_bias_init_cpu(int cpu) |
6b8fcd90 | 56 | { |
4471a34f | 57 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
6b8fcd90 | 58 | |
4471a34f VK |
59 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); |
60 | dbs_info->freq_lo = 0; | |
61 | } | |
6b8fcd90 | 62 | |
4471a34f VK |
63 | /* |
64 | * Not all CPUs want IO time to be accounted as busy; this depends on how | |
65 | * efficient idling at a higher frequency/voltage is. | |
66 | * Pavel Machek says this is not so for various generations of AMD and old | |
67 | * Intel systems. | |
68 | * Mike Chan (androidlcom) calis this is also not true for ARM. | |
69 | * Because of this, whitelist specific known (series) of CPUs by default, and | |
70 | * leave all others up to the user. | |
71 | */ | |
72 | static int should_io_be_busy(void) | |
73 | { | |
74 | #if defined(CONFIG_X86) | |
75 | /* | |
76 | * For Intel, Core 2 (model 15) andl later have an efficient idle. | |
77 | */ | |
78 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
79 | boot_cpu_data.x86 == 6 && | |
80 | boot_cpu_data.x86_model >= 15) | |
81 | return 1; | |
82 | #endif | |
83 | return 0; | |
6b8fcd90 AV |
84 | } |
85 | ||
05ca0350 AS |
86 | /* |
87 | * Find right freq to be set now with powersave_bias on. | |
88 | * Returns the freq_hi to be used right now and will set freq_hi_jiffies, | |
89 | * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. | |
90 | */ | |
b5ecf60f | 91 | static unsigned int powersave_bias_target(struct cpufreq_policy *policy, |
4471a34f | 92 | unsigned int freq_next, unsigned int relation) |
05ca0350 AS |
93 | { |
94 | unsigned int freq_req, freq_reduc, freq_avg; | |
95 | unsigned int freq_hi, freq_lo; | |
96 | unsigned int index = 0; | |
97 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; | |
4471a34f | 98 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
245b2e70 | 99 | policy->cpu); |
05ca0350 AS |
100 | |
101 | if (!dbs_info->freq_table) { | |
102 | dbs_info->freq_lo = 0; | |
103 | dbs_info->freq_lo_jiffies = 0; | |
104 | return freq_next; | |
105 | } | |
106 | ||
107 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, | |
108 | relation, &index); | |
109 | freq_req = dbs_info->freq_table[index].frequency; | |
4471a34f | 110 | freq_reduc = freq_req * od_tuners.powersave_bias / 1000; |
05ca0350 AS |
111 | freq_avg = freq_req - freq_reduc; |
112 | ||
113 | /* Find freq bounds for freq_avg in freq_table */ | |
114 | index = 0; | |
115 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, | |
116 | CPUFREQ_RELATION_H, &index); | |
117 | freq_lo = dbs_info->freq_table[index].frequency; | |
118 | index = 0; | |
119 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, | |
120 | CPUFREQ_RELATION_L, &index); | |
121 | freq_hi = dbs_info->freq_table[index].frequency; | |
122 | ||
123 | /* Find out how long we have to be in hi and lo freqs */ | |
124 | if (freq_hi == freq_lo) { | |
125 | dbs_info->freq_lo = 0; | |
126 | dbs_info->freq_lo_jiffies = 0; | |
127 | return freq_lo; | |
128 | } | |
4471a34f | 129 | jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); |
05ca0350 AS |
130 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; |
131 | jiffies_hi += ((freq_hi - freq_lo) / 2); | |
132 | jiffies_hi /= (freq_hi - freq_lo); | |
133 | jiffies_lo = jiffies_total - jiffies_hi; | |
134 | dbs_info->freq_lo = freq_lo; | |
135 | dbs_info->freq_lo_jiffies = jiffies_lo; | |
136 | dbs_info->freq_hi_jiffies = jiffies_hi; | |
137 | return freq_hi; | |
138 | } | |
139 | ||
140 | static void ondemand_powersave_bias_init(void) | |
141 | { | |
142 | int i; | |
143 | for_each_online_cpu(i) { | |
5a75c828 | 144 | ondemand_powersave_bias_init_cpu(i); |
05ca0350 AS |
145 | } |
146 | } | |
147 | ||
4471a34f VK |
148 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) |
149 | { | |
150 | if (od_tuners.powersave_bias) | |
151 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); | |
152 | else if (p->cur == p->max) | |
153 | return; | |
0e625ac1 | 154 | |
4471a34f VK |
155 | __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? |
156 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); | |
157 | } | |
158 | ||
159 | /* | |
160 | * Every sampling_rate, we check, if current idle time is less than 20% | |
161 | * (default), then we try to increase frequency Every sampling_rate, we look for | |
162 | * a the lowest frequency which can sustain the load while keeping idle time | |
163 | * over 30%. If such a frequency exist, we try to decrease to this frequency. | |
164 | * | |
165 | * Any frequency increase takes it to the maximum frequency. Frequency reduction | |
166 | * happens at minimum steps of 5% (default) of current frequency | |
167 | */ | |
168 | static void od_check_cpu(int cpu, unsigned int load_freq) | |
1da177e4 | 169 | { |
4471a34f VK |
170 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
171 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; | |
172 | ||
173 | dbs_info->freq_lo = 0; | |
174 | ||
175 | /* Check for frequency increase */ | |
176 | if (load_freq > od_tuners.up_threshold * policy->cur) { | |
177 | /* If switching to max speed, apply sampling_down_factor */ | |
178 | if (policy->cur < policy->max) | |
179 | dbs_info->rate_mult = | |
180 | od_tuners.sampling_down_factor; | |
181 | dbs_freq_increase(policy, policy->max); | |
182 | return; | |
183 | } | |
184 | ||
185 | /* Check for frequency decrease */ | |
186 | /* if we cannot reduce the frequency anymore, break out early */ | |
187 | if (policy->cur == policy->min) | |
188 | return; | |
189 | ||
190 | /* | |
191 | * The optimal frequency is the frequency that is the lowest that can | |
192 | * support the current CPU usage without triggering the up policy. To be | |
193 | * safe, we focus 10 points under the threshold. | |
194 | */ | |
195 | if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) * | |
196 | policy->cur) { | |
197 | unsigned int freq_next; | |
198 | freq_next = load_freq / (od_tuners.up_threshold - | |
199 | od_tuners.down_differential); | |
200 | ||
201 | /* No longer fully busy, reset rate_mult */ | |
202 | dbs_info->rate_mult = 1; | |
203 | ||
204 | if (freq_next < policy->min) | |
205 | freq_next = policy->min; | |
206 | ||
207 | if (!od_tuners.powersave_bias) { | |
208 | __cpufreq_driver_target(policy, freq_next, | |
209 | CPUFREQ_RELATION_L); | |
210 | } else { | |
211 | int freq = powersave_bias_target(policy, freq_next, | |
212 | CPUFREQ_RELATION_L); | |
213 | __cpufreq_driver_target(policy, freq, | |
214 | CPUFREQ_RELATION_L); | |
215 | } | |
216 | } | |
1da177e4 LT |
217 | } |
218 | ||
da53d61e FB |
219 | static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample, |
220 | struct delayed_work *dw) | |
4471a34f | 221 | { |
4471a34f VK |
222 | unsigned int cpu = dbs_info->cdbs.cpu; |
223 | int delay, sample_type = dbs_info->sample_type; | |
1da177e4 | 224 | |
4471a34f VK |
225 | /* Common NORMAL_SAMPLE setup */ |
226 | dbs_info->sample_type = OD_NORMAL_SAMPLE; | |
227 | if (sample_type == OD_SUB_SAMPLE) { | |
228 | delay = dbs_info->freq_lo_jiffies; | |
da53d61e FB |
229 | if (sample) |
230 | __cpufreq_driver_target(dbs_info->cdbs.cur_policy, | |
231 | dbs_info->freq_lo, | |
232 | CPUFREQ_RELATION_H); | |
4471a34f | 233 | } else { |
da53d61e FB |
234 | if (sample) |
235 | dbs_check_cpu(&od_dbs_data, cpu); | |
4471a34f VK |
236 | if (dbs_info->freq_lo) { |
237 | /* Setup timer for SUB_SAMPLE */ | |
238 | dbs_info->sample_type = OD_SUB_SAMPLE; | |
239 | delay = dbs_info->freq_hi_jiffies; | |
240 | } else { | |
d3c31a77 FB |
241 | delay = delay_for_sampling_rate(od_tuners.sampling_rate |
242 | * dbs_info->rate_mult); | |
4471a34f VK |
243 | } |
244 | } | |
245 | ||
da53d61e FB |
246 | schedule_delayed_work_on(smp_processor_id(), dw, delay); |
247 | } | |
248 | ||
249 | static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local, | |
250 | struct delayed_work *dw) | |
251 | { | |
252 | struct od_cpu_dbs_info_s *dbs_info; | |
253 | ktime_t time_now; | |
254 | s64 delta_us; | |
255 | bool sample = true; | |
256 | ||
257 | /* use leader CPU's dbs_info */ | |
258 | dbs_info = &per_cpu(od_cpu_dbs_info, dbs_info_local->cdbs.cpu); | |
259 | mutex_lock(&dbs_info->cdbs.timer_mutex); | |
260 | ||
261 | time_now = ktime_get(); | |
262 | delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp); | |
263 | ||
264 | /* Do nothing if we recently have sampled */ | |
265 | if (delta_us < (s64)(od_tuners.sampling_rate / 2)) | |
266 | sample = false; | |
267 | else | |
268 | dbs_info->cdbs.time_stamp = time_now; | |
269 | ||
270 | od_timer_update(dbs_info, sample, dw); | |
4471a34f VK |
271 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
272 | } | |
273 | ||
da53d61e FB |
274 | static void od_dbs_timer(struct work_struct *work) |
275 | { | |
276 | struct delayed_work *dw = to_delayed_work(work); | |
277 | struct od_cpu_dbs_info_s *dbs_info = | |
278 | container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); | |
279 | ||
280 | if (dbs_sw_coordinated_cpus(&dbs_info->cdbs)) { | |
281 | od_timer_coordinated(dbs_info, dw); | |
282 | } else { | |
283 | mutex_lock(&dbs_info->cdbs.timer_mutex); | |
284 | od_timer_update(dbs_info, true, dw); | |
285 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | |
286 | } | |
287 | } | |
288 | ||
4471a34f VK |
289 | /************************** sysfs interface ************************/ |
290 | ||
291 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | |
292 | struct attribute *attr, char *buf) | |
293 | { | |
294 | return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); | |
1da177e4 | 295 | } |
1da177e4 | 296 | |
fd0ef7a0 MH |
297 | /** |
298 | * update_sampling_rate - update sampling rate effective immediately if needed. | |
299 | * @new_rate: new sampling rate | |
300 | * | |
301 | * If new rate is smaller than the old, simply updaing | |
4471a34f VK |
302 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the |
303 | * original sampling_rate was 1 second and the requested new sampling rate is 10 | |
304 | * ms because the user needs immediate reaction from ondemand governor, but not | |
305 | * sure if higher frequency will be required or not, then, the governor may | |
306 | * change the sampling rate too late; up to 1 second later. Thus, if we are | |
307 | * reducing the sampling rate, we need to make the new value effective | |
308 | * immediately. | |
fd0ef7a0 MH |
309 | */ |
310 | static void update_sampling_rate(unsigned int new_rate) | |
311 | { | |
312 | int cpu; | |
313 | ||
4471a34f VK |
314 | od_tuners.sampling_rate = new_rate = max(new_rate, |
315 | od_dbs_data.min_sampling_rate); | |
fd0ef7a0 MH |
316 | |
317 | for_each_online_cpu(cpu) { | |
318 | struct cpufreq_policy *policy; | |
4471a34f | 319 | struct od_cpu_dbs_info_s *dbs_info; |
fd0ef7a0 MH |
320 | unsigned long next_sampling, appointed_at; |
321 | ||
322 | policy = cpufreq_cpu_get(cpu); | |
323 | if (!policy) | |
324 | continue; | |
3e33ee9e FB |
325 | if (policy->governor != &cpufreq_gov_ondemand) { |
326 | cpufreq_cpu_put(policy); | |
327 | continue; | |
328 | } | |
fd0ef7a0 MH |
329 | dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); |
330 | cpufreq_cpu_put(policy); | |
331 | ||
4471a34f | 332 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
fd0ef7a0 | 333 | |
4471a34f VK |
334 | if (!delayed_work_pending(&dbs_info->cdbs.work)) { |
335 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | |
fd0ef7a0 MH |
336 | continue; |
337 | } | |
338 | ||
4471a34f VK |
339 | next_sampling = jiffies + usecs_to_jiffies(new_rate); |
340 | appointed_at = dbs_info->cdbs.work.timer.expires; | |
fd0ef7a0 MH |
341 | |
342 | if (time_before(next_sampling, appointed_at)) { | |
343 | ||
4471a34f VK |
344 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
345 | cancel_delayed_work_sync(&dbs_info->cdbs.work); | |
346 | mutex_lock(&dbs_info->cdbs.timer_mutex); | |
fd0ef7a0 | 347 | |
4471a34f VK |
348 | schedule_delayed_work_on(dbs_info->cdbs.cpu, |
349 | &dbs_info->cdbs.work, | |
350 | usecs_to_jiffies(new_rate)); | |
fd0ef7a0 MH |
351 | |
352 | } | |
4471a34f | 353 | mutex_unlock(&dbs_info->cdbs.timer_mutex); |
fd0ef7a0 MH |
354 | } |
355 | } | |
356 | ||
0e625ac1 TR |
357 | static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, |
358 | const char *buf, size_t count) | |
1da177e4 LT |
359 | { |
360 | unsigned int input; | |
361 | int ret; | |
ffac80e9 | 362 | ret = sscanf(buf, "%u", &input); |
5a75c828 | 363 | if (ret != 1) |
364 | return -EINVAL; | |
fd0ef7a0 | 365 | update_sampling_rate(input); |
1da177e4 LT |
366 | return count; |
367 | } | |
368 | ||
19379b11 AV |
369 | static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, |
370 | const char *buf, size_t count) | |
371 | { | |
372 | unsigned int input; | |
373 | int ret; | |
374 | ||
375 | ret = sscanf(buf, "%u", &input); | |
376 | if (ret != 1) | |
377 | return -EINVAL; | |
4471a34f | 378 | od_tuners.io_is_busy = !!input; |
19379b11 AV |
379 | return count; |
380 | } | |
381 | ||
0e625ac1 TR |
382 | static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, |
383 | const char *buf, size_t count) | |
1da177e4 LT |
384 | { |
385 | unsigned int input; | |
386 | int ret; | |
ffac80e9 | 387 | ret = sscanf(buf, "%u", &input); |
1da177e4 | 388 | |
32ee8c3e | 389 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || |
c29f1403 | 390 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
1da177e4 LT |
391 | return -EINVAL; |
392 | } | |
4471a34f | 393 | od_tuners.up_threshold = input; |
1da177e4 LT |
394 | return count; |
395 | } | |
396 | ||
3f78a9f7 DN |
397 | static ssize_t store_sampling_down_factor(struct kobject *a, |
398 | struct attribute *b, const char *buf, size_t count) | |
399 | { | |
400 | unsigned int input, j; | |
401 | int ret; | |
402 | ret = sscanf(buf, "%u", &input); | |
403 | ||
404 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | |
405 | return -EINVAL; | |
4471a34f | 406 | od_tuners.sampling_down_factor = input; |
3f78a9f7 DN |
407 | |
408 | /* Reset down sampling multiplier in case it was active */ | |
409 | for_each_online_cpu(j) { | |
4471a34f VK |
410 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
411 | j); | |
3f78a9f7 DN |
412 | dbs_info->rate_mult = 1; |
413 | } | |
3f78a9f7 DN |
414 | return count; |
415 | } | |
416 | ||
0e625ac1 TR |
417 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, |
418 | const char *buf, size_t count) | |
3d5ee9e5 DJ |
419 | { |
420 | unsigned int input; | |
421 | int ret; | |
422 | ||
423 | unsigned int j; | |
32ee8c3e | 424 | |
ffac80e9 | 425 | ret = sscanf(buf, "%u", &input); |
2b03f891 | 426 | if (ret != 1) |
3d5ee9e5 DJ |
427 | return -EINVAL; |
428 | ||
2b03f891 | 429 | if (input > 1) |
3d5ee9e5 | 430 | input = 1; |
32ee8c3e | 431 | |
4471a34f | 432 | if (input == od_tuners.ignore_nice) { /* nothing to do */ |
3d5ee9e5 DJ |
433 | return count; |
434 | } | |
4471a34f | 435 | od_tuners.ignore_nice = input; |
3d5ee9e5 | 436 | |
ccb2fe20 | 437 | /* we need to re-evaluate prev_cpu_idle */ |
dac1c1a5 | 438 | for_each_online_cpu(j) { |
4471a34f | 439 | struct od_cpu_dbs_info_s *dbs_info; |
245b2e70 | 440 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
4471a34f VK |
441 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
442 | &dbs_info->cdbs.prev_cpu_wall); | |
443 | if (od_tuners.ignore_nice) | |
444 | dbs_info->cdbs.prev_cpu_nice = | |
445 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
1ca3abdb | 446 | |
3d5ee9e5 | 447 | } |
3d5ee9e5 DJ |
448 | return count; |
449 | } | |
450 | ||
0e625ac1 TR |
451 | static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, |
452 | const char *buf, size_t count) | |
05ca0350 AS |
453 | { |
454 | unsigned int input; | |
455 | int ret; | |
456 | ret = sscanf(buf, "%u", &input); | |
457 | ||
458 | if (ret != 1) | |
459 | return -EINVAL; | |
460 | ||
461 | if (input > 1000) | |
462 | input = 1000; | |
463 | ||
4471a34f | 464 | od_tuners.powersave_bias = input; |
05ca0350 | 465 | ondemand_powersave_bias_init(); |
05ca0350 AS |
466 | return count; |
467 | } | |
468 | ||
4471a34f VK |
469 | show_one(od, sampling_rate, sampling_rate); |
470 | show_one(od, io_is_busy, io_is_busy); | |
471 | show_one(od, up_threshold, up_threshold); | |
472 | show_one(od, sampling_down_factor, sampling_down_factor); | |
473 | show_one(od, ignore_nice_load, ignore_nice); | |
474 | show_one(od, powersave_bias, powersave_bias); | |
475 | ||
6dad2a29 | 476 | define_one_global_rw(sampling_rate); |
07d77759 | 477 | define_one_global_rw(io_is_busy); |
6dad2a29 | 478 | define_one_global_rw(up_threshold); |
3f78a9f7 | 479 | define_one_global_rw(sampling_down_factor); |
6dad2a29 BP |
480 | define_one_global_rw(ignore_nice_load); |
481 | define_one_global_rw(powersave_bias); | |
4471a34f | 482 | define_one_global_ro(sampling_rate_min); |
1da177e4 | 483 | |
2b03f891 | 484 | static struct attribute *dbs_attributes[] = { |
1da177e4 LT |
485 | &sampling_rate_min.attr, |
486 | &sampling_rate.attr, | |
1da177e4 | 487 | &up_threshold.attr, |
3f78a9f7 | 488 | &sampling_down_factor.attr, |
001893cd | 489 | &ignore_nice_load.attr, |
05ca0350 | 490 | &powersave_bias.attr, |
19379b11 | 491 | &io_is_busy.attr, |
1da177e4 LT |
492 | NULL |
493 | }; | |
494 | ||
4471a34f | 495 | static struct attribute_group od_attr_group = { |
1da177e4 LT |
496 | .attrs = dbs_attributes, |
497 | .name = "ondemand", | |
498 | }; | |
499 | ||
500 | /************************** sysfs end ************************/ | |
501 | ||
4471a34f | 502 | define_get_cpu_dbs_routines(od_cpu_dbs_info); |
6b8fcd90 | 503 | |
4471a34f VK |
504 | static struct od_ops od_ops = { |
505 | .io_busy = should_io_be_busy, | |
506 | .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, | |
507 | .powersave_bias_target = powersave_bias_target, | |
508 | .freq_increase = dbs_freq_increase, | |
509 | }; | |
2f8a835c | 510 | |
4471a34f VK |
511 | static struct dbs_data od_dbs_data = { |
512 | .governor = GOV_ONDEMAND, | |
513 | .attr_group = &od_attr_group, | |
514 | .tuners = &od_tuners, | |
515 | .get_cpu_cdbs = get_cpu_cdbs, | |
516 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, | |
517 | .gov_dbs_timer = od_dbs_timer, | |
518 | .gov_check_cpu = od_check_cpu, | |
519 | .gov_ops = &od_ops, | |
520 | }; | |
1da177e4 | 521 | |
4471a34f VK |
522 | static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
523 | unsigned int event) | |
1da177e4 | 524 | { |
4471a34f | 525 | return cpufreq_governor_dbs(&od_dbs_data, policy, event); |
1da177e4 LT |
526 | } |
527 | ||
4471a34f VK |
528 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
529 | static | |
19379b11 | 530 | #endif |
4471a34f VK |
531 | struct cpufreq_governor cpufreq_gov_ondemand = { |
532 | .name = "ondemand", | |
533 | .governor = od_cpufreq_governor_dbs, | |
534 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | |
535 | .owner = THIS_MODULE, | |
536 | }; | |
1da177e4 | 537 | |
1da177e4 LT |
538 | static int __init cpufreq_gov_dbs_init(void) |
539 | { | |
4f6e6b9f AR |
540 | u64 idle_time; |
541 | int cpu = get_cpu(); | |
80800913 | 542 | |
4471a34f | 543 | mutex_init(&od_dbs_data.mutex); |
21f2e3c8 | 544 | idle_time = get_cpu_idle_time_us(cpu, NULL); |
4f6e6b9f | 545 | put_cpu(); |
80800913 | 546 | if (idle_time != -1ULL) { |
547 | /* Idle micro accounting is supported. Use finer thresholds */ | |
4471a34f VK |
548 | od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
549 | od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | |
cef9615a | 550 | /* |
bd74b32b | 551 | * In nohz/micro accounting case we set the minimum frequency |
cef9615a TR |
552 | * not depending on HZ, but fixed (very low). The deferred |
553 | * timer might skip some samples if idle/sleeping as needed. | |
554 | */ | |
4471a34f | 555 | od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; |
cef9615a TR |
556 | } else { |
557 | /* For correct statistics, we need 10 ticks for each measure */ | |
4471a34f VK |
558 | od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
559 | jiffies_to_usecs(10); | |
80800913 | 560 | } |
888a794c | 561 | |
57df5573 | 562 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
1da177e4 LT |
563 | } |
564 | ||
565 | static void __exit cpufreq_gov_dbs_exit(void) | |
566 | { | |
1c256245 | 567 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); |
1da177e4 LT |
568 | } |
569 | ||
ffac80e9 VP |
570 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); |
571 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); | |
572 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " | |
2b03f891 | 573 | "Low Latency Frequency Transition capable processors"); |
ffac80e9 | 574 | MODULE_LICENSE("GPL"); |
1da177e4 | 575 | |
6915719b JW |
576 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
577 | fs_initcall(cpufreq_gov_dbs_init); | |
578 | #else | |
1da177e4 | 579 | module_init(cpufreq_gov_dbs_init); |
6915719b | 580 | #endif |
1da177e4 | 581 | module_exit(cpufreq_gov_dbs_exit); |