Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Mar 2010 22:46:18 +0000 (14:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Mar 2010 22:46:18 +0000 (14:46 -0800)
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched: Fix pick_next_highest_task_rt() for cgroups
  sched: Cleanup: remove unused variable in try_to_wake_up()
  x86: Fix sched_clock_cpu for systems with unsynchronized TSC

1  2 
kernel/sched.c
kernel/sched_rt.c

diff --combined kernel/sched.c
index 150b6988de49f4b63dd7226479cec6846205e9b2,2c1db81f80eb87ee12f68e0a020aa61d287e6cec..9ab3cd7858d38001cc4ed0586bf187438e685cd7
@@@ -1521,7 -1521,7 +1521,7 @@@ static unsigned long cpu_avg_load_per_t
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
  
 -static __read_mostly unsigned long *update_shares_data;
 +static __read_mostly unsigned long __percpu *update_shares_data;
  
  static void __set_se_shares(struct sched_entity *se, unsigned long shares);
  
@@@ -2359,7 -2359,7 +2359,7 @@@ static int try_to_wake_up(struct task_s
  {
        int cpu, orig_cpu, this_cpu, success = 0;
        unsigned long flags;
-       struct rq *rq, *orig_rq;
+       struct rq *rq;
  
        if (!sched_feat(SYNC_WAKEUPS))
                wake_flags &= ~WF_SYNC;
        this_cpu = get_cpu();
  
        smp_wmb();
-       rq = orig_rq = task_rq_lock(p, &flags);
+       rq = task_rq_lock(p, &flags);
        update_rq_clock(rq);
        if (!(p->state & state))
                goto out;
@@@ -4353,7 -4353,7 +4353,7 @@@ int can_nice(const struct task_struct *
        /* convert nice value [19,-20] to rlimit style value [1,40] */
        int nice_rlim = 20 - nice;
  
 -      return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
 +      return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
  }
  
@@@ -4530,7 -4530,7 +4530,7 @@@ recheck
  
                        if (!lock_task_sighand(p, &flags))
                                return -ESRCH;
 -                      rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
 +                      rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
                        unlock_task_sighand(p, &flags);
  
                        /* can't set/change the rt policy */
@@@ -7406,13 -7406,11 +7406,13 @@@ static ssize_t sched_power_savings_stor
  
  #ifdef CONFIG_SCHED_MC
  static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
 +                                         struct sysdev_class_attribute *attr,
                                           char *page)
  {
        return sprintf(page, "%u\n", sched_mc_power_savings);
  }
  static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
 +                                          struct sysdev_class_attribute *attr,
                                            const char *buf, size_t count)
  {
        return sched_power_savings_store(buf, count, 0);
@@@ -7424,13 -7422,11 +7424,13 @@@ static SYSDEV_CLASS_ATTR(sched_mc_power
  
  #ifdef CONFIG_SCHED_SMT
  static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
 +                                          struct sysdev_class_attribute *attr,
                                            char *page)
  {
        return sprintf(page, "%u\n", sched_smt_power_savings);
  }
  static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
 +                                           struct sysdev_class_attribute *attr,
                                             const char *buf, size_t count)
  {
        return sched_power_savings_store(buf, count, 1);
@@@ -8817,7 -8813,7 +8817,7 @@@ struct cgroup_subsys cpu_cgroup_subsys 
  struct cpuacct {
        struct cgroup_subsys_state css;
        /* cpuusage holds pointer to a u64-type object on every cpu */
 -      u64 *cpuusage;
 +      u64 __percpu *cpuusage;
        struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
        struct cpuacct *parent;
  };
diff --combined kernel/sched_rt.c
index 5a6ed1f0990a5cd5f128d4f61cd8522a40ff3466,c4fb42a66cab2afd9972ba17beeb110be8e283b3..b5b920ae2ea7fe83ca17d2c94d0a7b638574144c
@@@ -1146,7 -1146,12 +1146,12 @@@ static struct task_struct *pick_next_hi
                if (next && next->prio < idx)
                        continue;
                list_for_each_entry(rt_se, array->queue + idx, run_list) {
-                       struct task_struct *p = rt_task_of(rt_se);
+                       struct task_struct *p;
+                       if (!rt_entity_is_task(rt_se))
+                               continue;
+                       p = rt_task_of(rt_se);
                        if (pick_rt_task(rq, p, cpu)) {
                                next = p;
                                break;
@@@ -1662,9 -1667,8 +1667,9 @@@ static void watchdog(struct rq *rq, str
        if (!p->signal)
                return;
  
 -      soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
 -      hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
 +      /* max may change after cur was read, this will be fixed next tick */
 +      soft = task_rlimit(p, RLIMIT_RTTIME);
 +      hard = task_rlimit_max(p, RLIMIT_RTTIME);
  
        if (soft != RLIM_INFINITY) {
                unsigned long next;