#endif
EXPORT_SYMBOL_GPL(nr_threads);
EXPORT_SYMBOL_GPL(nr_running);
-EXPORT_SYMBOL_GPL(nr_iowait);
+EXPORT_SYMBOL_GPL(nr_iowait_acct);
os_data->nr_threads = nr_threads;
os_data->nr_running = nr_running();
- os_data->nr_iowait = nr_iowait();
+ os_data->nr_iowait = nr_iowait_acct();
os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
(unsigned long long)boottime.tv_sec,
total_forks,
nr_running(),
- nr_iowait());
+ nr_iowait_acct());
seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq);
/* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
+ /* task is in iowait */
unsigned in_iowait:1;
+ /*
+ * task is in iowait and should be accounted as such. can only be set
+ * if ->in_iowait is also set.
+ */
+ unsigned in_iowait_acct:1;
#ifndef TIF_RESTORE_SIGMASK
unsigned restore_sigmask:1;
#endif
extern int nr_processes(void);
extern unsigned int nr_running(void);
extern bool single_task_running(void);
-extern unsigned int nr_iowait(void);
-extern unsigned int nr_iowait_cpu(int cpu);
+unsigned int nr_iowait_acct(void);
+unsigned int nr_iowait_acct_cpu(int cpu);
+unsigned int nr_iowait_cpu(int cpu);
static inline int sched_info_on(void)
{
#endif /* !CONFIG_SMP */
+/* Shift half the size of the type, atomic_long_t */
+#define IOWAIT_SHIFT (4 * sizeof(atomic_long_t))
+
+/*
+ * Store iowait and iowait_acct state in the same variable. The lower bits
+ * hold the iowait state, and the upper bits hold the iowait_acct state.
+ */
static void task_iowait_inc(struct task_struct *p)
{
- atomic_long_inc(&task_rq(p)->nr_iowait);
+ long val = 1 + ((long) p->in_iowait_acct << IOWAIT_SHIFT);
+ atomic_long_add(val, &task_rq(p)->nr_iowait);
}
static void task_iowait_dec(struct task_struct *p)
{
- atomic_long_dec(&task_rq(p)->nr_iowait);
+ long val = 1 + ((long) p->in_iowait_acct << IOWAIT_SHIFT);
+ atomic_long_sub(val, &task_rq(p)->nr_iowait);
}
int rq_iowait(struct rq *rq)
{
- return atomic_long_read(&rq->nr_iowait);
+ return atomic_long_read(&rq->nr_iowait) & ((1UL << IOWAIT_SHIFT) - 1);
+}
+
+int rq_iowait_acct(struct rq *rq)
+{
+ return atomic_long_read(&rq->nr_iowait) >> IOWAIT_SHIFT;
}
static void
* it does become runnable.
*/
-unsigned int nr_iowait_cpu(int cpu)
+unsigned int nr_iowait_acct_cpu(int cpu)
+{
+ return rq_iowait_acct(cpu_rq(cpu));
+}
+
+unsigned nr_iowait_cpu(int cpu)
{
return rq_iowait(cpu_rq(cpu));
}
* Task CPU affinities can make all that even more 'interesting'.
*/
-unsigned int nr_iowait(void)
+unsigned int nr_iowait_acct(void)
{
unsigned int i, sum = 0;
for_each_possible_cpu(i)
- sum += nr_iowait_cpu(i);
+ sum += nr_iowait_acct_cpu(i);
return sum;
}
#endif /* CONFIG_PREEMPT_DYNAMIC */
+/*
+ * Returns a token which is comprised of the two bits of iowait wait state -
+ * one is whether we're making ourselves as in iowait for cpufreq reasons,
+ * and the other is if the task should be accounted as such.
+ */
long io_schedule_prepare(void)
{
- long old_iowait = current->in_iowait;
-
+ long token = current->in_iowait + ((long) current->in_iowait_acct << IOWAIT_SHIFT);
current->in_iowait = 1;
+ current->in_iowait_acct = 1;
blk_flush_plug(current->plug, true);
- return old_iowait;
+ return token;
}
void io_schedule_finish(long token)
{
- current->in_iowait = token;
+ current->in_iowait = token & 0x01;
+ current->in_iowait_acct = token >> IOWAIT_SHIFT;
}
/*
extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
int rq_iowait(struct rq *rq);
+int rq_iowait_acct(struct rq *rq);
#ifdef CONFIG_RT_MUTEXES
delta = ktime_sub(now, ts->idle_entrytime);
write_seqcount_begin(&ts->idle_sleeptime_seq);
- if (nr_iowait_cpu(smp_processor_id()) > 0)
+ if (nr_iowait_acct_cpu(smp_processor_id()) > 0)
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
else
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime,
- !nr_iowait_cpu(cpu), last_update_time);
+ !nr_iowait_acct_cpu(cpu), last_update_time);
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime,
- nr_iowait_cpu(cpu), last_update_time);
+ nr_iowait_acct_cpu(cpu), last_update_time);
}
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);