Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / kernel / sched_fair.c
index 990b188803ced00f8b3554bb1fe570b14b716c69..ecc637a0d591b74ced8c17eea315631296b0b205 100644 (file)
@@ -710,31 +710,28 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        if (initial && sched_feat(START_DEBIT))
                vruntime += sched_vslice(cfs_rq, se);
 
-       if (!initial) {
-               /* sleeps upto a single latency don't count. */
-               if (sched_feat(FAIR_SLEEPERS)) {
-                       unsigned long thresh = sysctl_sched_latency;
+       /* sleeps up to a single latency don't count. */
+       if (!initial && sched_feat(FAIR_SLEEPERS)) {
+               unsigned long thresh = sysctl_sched_latency;
 
-                       /*
-                        * Convert the sleeper threshold into virtual time.
-                        * SCHED_IDLE is a special sub-class.  We care about
-                        * fairness only relative to other SCHED_IDLE tasks,
-                        * all of which have the same weight.
-                        */
-                       if (sched_feat(NORMALIZED_SLEEPER) &&
-                                       (!entity_is_task(se) ||
-                                        task_of(se)->policy != SCHED_IDLE))
-                               thresh = calc_delta_fair(thresh, se);
+               /*
+                * Convert the sleeper threshold into virtual time.
+                * SCHED_IDLE is a special sub-class.  We care about
+                * fairness only relative to other SCHED_IDLE tasks,
+                * all of which have the same weight.
+                */
+               if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
+                                task_of(se)->policy != SCHED_IDLE))
+                       thresh = calc_delta_fair(thresh, se);
 
-                       /*
-                        * Halve their sleep time's effect, to allow
-                        * for a gentler effect of sleepers:
-                        */
-                       if (sched_feat(GENTLE_FAIR_SLEEPERS))
-                               thresh >>= 1;
+               /*
+                * Halve their sleep time's effect, to allow
+                * for a gentler effect of sleepers:
+                */
+               if (sched_feat(GENTLE_FAIR_SLEEPERS))
+                       thresh >>= 1;
 
-                       vruntime -= thresh;
-               }
+               vruntime -= thresh;
        }
 
        /* ensure we never gain time by being placed backwards. */
@@ -1343,7 +1340,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
        int sync = wake_flags & WF_SYNC;
 
        if (sd_flag & SD_BALANCE_WAKE) {
-               if (sched_feat(AFFINE_WAKEUPS))
+               if (sched_feat(AFFINE_WAKEUPS) &&
+                   cpumask_test_cpu(cpu, &p->cpus_allowed))
                        want_affine = 1;
                new_cpu = prev_cpu;
        }
@@ -1941,6 +1939,25 @@ static void moved_group_fair(struct task_struct *p)
 }
 #endif
 
+unsigned int get_rr_interval_fair(struct task_struct *task)
+{
+       struct sched_entity *se = &task->se;
+       unsigned long flags;
+       struct rq *rq;
+       unsigned int rr_interval = 0;
+
+       /*
+        * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
+        * idle runqueue:
+        */
+       rq = task_rq_lock(task, &flags);
+       if (rq->cfs.load.weight)
+               rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+       task_rq_unlock(rq, &flags);
+
+       return rr_interval;
+}
+
 /*
  * All the scheduling class methods:
  */
@@ -1969,6 +1986,8 @@ static const struct sched_class fair_sched_class = {
        .prio_changed           = prio_changed_fair,
        .switched_to            = switched_to_fair,
 
+       .get_rr_interval        = get_rr_interval_fair,
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        .moved_group            = moved_group_fair,
 #endif