Merge branch 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Jun 2015 22:09:40 +0000 (15:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Jun 2015 22:09:40 +0000 (15:09 -0700)
Pull scheduler updates from Thomas Gleixner:
 "This series of scheduler updates depends on sched/core and timers/core
  branches, which are already in your tree:

   - Scheduler balancing overhaul to plug a hard to trigger race which
     causes an oops in the balancer (Peter Zijlstra)

   - Lockdep updates which are related to the balancing updates (Peter
     Zijlstra)"

* 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched,lockdep: Employ lock pinning
  lockdep: Implement lock pinning
  lockdep: Simplify lock_release()
  sched: Streamline the task migration locking a little
  sched: Move code around
  sched,dl: Fix sched class hopping CBS hole
  sched, dl: Convert switched_{from, to}_dl() / prio_changed_dl() to balance callbacks
  sched,dl: Remove return value from pull_dl_task()
  sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks
  sched,rt: Remove return value from pull_rt_task()
  sched: Allow balance callbacks for check_class_changed()
  sched: Use replace normalize_task() with __sched_setscheduler()
  sched: Replace post_schedule with a balance callback list

1  2 
include/linux/lockdep.h
kernel/locking/lockdep.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/sched.h

Simple merge
Simple merge
index c86935a7f1f813664476d311ec43d115efdd2a30,c74191aa4e6acdec19409c73dcb71758b5238b2c..b803e1b8ab0cf7eb381600a32855b822be3c8c89
@@@ -2372,8 -2556,10 +2573,9 @@@ context_switch(struct rq *rq, struct ta
         * of the scheduler it's an obvious special-case), so we
         * do an early lockdep release here:
         */
+       lockdep_unpin_lock(&rq->lock);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  
 -      context_tracking_task_switch(prev, next);
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
        barrier();
index eac20c557a55cc83f8e9d7e62578868ba9436aff,e8146415a688ff49c10543e8c033b5f15333ca12..0a17af35670a6d4ba3fb69404f29d1ade28fdfe6
@@@ -676,10 -737,10 +737,10 @@@ static void update_curr_dl(struct rq *r
        sched_rt_avg_update(rq, delta_exec);
  
        dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
 -      if (dl_runtime_exceeded(rq, dl_se)) {
 +      if (dl_runtime_exceeded(dl_se)) {
                dl_se->dl_throttled = 1;
                __dequeue_task_dl(rq, curr, 0);
-               if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
+               if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
  
                if (!is_leftmost(curr, &rq->dl))
Simple merge
Simple merge