#endif
EXPORT_SYMBOL_GPL(nr_threads);
EXPORT_SYMBOL_GPL(nr_running);
-EXPORT_SYMBOL_GPL(nr_iowait);
+EXPORT_SYMBOL_GPL(nr_iowait_acct);
os_data->nr_threads = nr_threads;
os_data->nr_running = nr_running();
- os_data->nr_iowait = nr_iowait();
+ os_data->nr_iowait = nr_iowait_acct();
os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
(unsigned long long)boottime.tv_sec,
total_forks,
nr_running(),
- nr_iowait());
+ nr_iowait_acct());
seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq);
/* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
+ /* task is in iowait */
unsigned in_iowait:1;
+ /*
+ * task is in iowait and should be accounted as such. can only be set
+ * if ->in_iowait is also set.
+ */
+ unsigned in_iowait_acct:1;
#ifndef TIF_RESTORE_SIGMASK
unsigned restore_sigmask:1;
#endif
extern int nr_processes(void);
extern unsigned int nr_running(void);
extern bool single_task_running(void);
-extern unsigned int nr_iowait(void);
-extern unsigned int nr_iowait_cpu(int cpu);
+extern unsigned int nr_iowait_acct(void);
+extern unsigned int nr_iowait_acct_cpu(int cpu);
+unsigned int nr_iowait_cpu(int cpu);
+
+enum {
+ TASK_IOWAIT = 1,
+ TASK_IOWAIT_ACCT = 2,
+};
static inline int sched_info_on(void)
{
if (p->in_iowait) {
delayacct_blkio_end(p);
task_rq(p)->nr_iowait--;
+ if (p->in_iowait_acct)
+ task_rq(p)->nr_iowait_acct--;
}
activate_task(rq, p, en_flags);
delayacct_blkio_end(p);
atomic_inc(&__rq->nr_iowait_remote);
+ if (p->in_iowait_acct)
+ atomic_inc(&__rq->nr_iowait_acct_remote);
}
wake_flags |= WF_MIGRATED;
* it does become runnable.
*/
-unsigned int nr_iowait_cpu(int cpu)
+unsigned int nr_iowait_acct_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- return rq->nr_iowait - atomic_read(&rq->nr_iowait_remote);
+ return rq->nr_iowait_acct - atomic_read(&rq->nr_iowait_acct_remote);
}
/*
* Task CPU affinities can make all that even more 'interesting'.
*/
-unsigned int nr_iowait(void)
+unsigned int nr_iowait_acct(void)
{
unsigned int i, sum = 0;
for_each_possible_cpu(i)
- sum += nr_iowait_cpu(i);
+ sum += nr_iowait_acct_cpu(i);
return sum;
}
+unsigned int nr_iowait_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return rq->nr_iowait - atomic_read(&rq->nr_iowait_remote);
+}
+
#ifdef CONFIG_SMP
/*
if (prev->in_iowait) {
rq->nr_iowait++;
+ if (prev->in_iowait_acct)
+ rq->nr_iowait_acct++;
delayacct_blkio_start();
}
}
}
EXPORT_SYMBOL_GPL(yield_to);
+/*
+ * Returns a token which is comprised of the two bits of iowait wait state -
+ * one is whether we're making ourselves as in iowait for cpufreq reasons,
+ * and the other is if the task should be accounted as such.
+ */
int io_schedule_prepare(void)
{
- int old_iowait = current->in_iowait;
+ int old_wait_flags = 0;
+
+ if (current->in_iowait)
+ old_wait_flags |= TASK_IOWAIT;
+ if (current->in_iowait_acct)
+ old_wait_flags |= TASK_IOWAIT_ACCT;
current->in_iowait = 1;
+ current->in_iowait_acct = 1;
blk_flush_plug(current->plug, true);
- return old_iowait;
+ return old_wait_flags;
}
-void io_schedule_finish(int token)
+void io_schedule_finish(int old_wait_flags)
{
- current->in_iowait = token;
+ if (!(old_wait_flags & TASK_IOWAIT))
+ current->in_iowait = 0;
+ if (!(old_wait_flags & TASK_IOWAIT_ACCT))
+ current->in_iowait_acct = 0;
}
/*
#endif
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
+ rq->nr_iowait_acct = 0;
+ atomic_set(&rq->nr_iowait_acct_remote, 0);
rq->nr_iowait = 0;
atomic_set(&rq->nr_iowait_remote, 0);
* modified under the rq lock (nr_iowait), and if we don't have the rq
* lock, then nr_iowait_remote is used.
*/
+ unsigned int nr_iowait_acct;
+ atomic_t nr_iowait_acct_remote;
unsigned int nr_iowait;
atomic_t nr_iowait_remote;
delta = ktime_sub(now, ts->idle_entrytime);
write_seqcount_begin(&ts->idle_sleeptime_seq);
- if (nr_iowait_cpu(smp_processor_id()) > 0)
+ if (nr_iowait_acct_cpu(smp_processor_id()) > 0)
ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
else
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime,
- !nr_iowait_cpu(cpu), last_update_time);
+ !nr_iowait_acct_cpu(cpu), last_update_time);
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime,
- nr_iowait_cpu(cpu), last_update_time);
+ nr_iowait_acct_cpu(cpu), last_update_time);
}
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);