Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Jul 2018 19:13:56 +0000 (12:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Jul 2018 19:13:56 +0000 (12:13 -0700)
Pull scheduler fixes from Ingo Molnar:
 "Misc fixes:

   - a deadline scheduler related bug fix which triggered a kernel
     warning

   - an RT_RUNTIME_SHARE fix

   - a stop_machine preemption fix

   - a potential NULL dereference fix in sched_domain_debug_one()"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/rt: Restore rt_runtime after disabling RT_RUNTIME_SHARE
  sched/deadline: Update rq_clock of later_rq when pushing a task
  stop_machine: Disable preemption after queueing stopper threads
  sched/topology: Check variable group before dereferencing it

kernel/sched/deadline.c
kernel/sched/rt.c
kernel/sched/topology.c
kernel/stop_machine.c

index 10c7b51c0d1fd4be1399f802d1c82e40f60709b5..b5fbdde6afa937096234959351947633694bba3e 100644 (file)
@@ -2090,8 +2090,14 @@ retry:
        sub_rq_bw(&next_task->dl, &rq->dl);
        set_task_cpu(next_task, later_rq->cpu);
        add_rq_bw(&next_task->dl, &later_rq->dl);
+
+       /*
+        * Update the later_rq clock here, because the clock is used
+        * by the cpufreq_update_util() inside __add_running_bw().
+        */
+       update_rq_clock(later_rq);
        add_running_bw(&next_task->dl, &later_rq->dl);
-       activate_task(later_rq, next_task, 0);
+       activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
        ret = 1;
 
        resched_curr(later_rq);
index 572567078b60b59dd14e3a608c6e9207f1c7e024..eaaec8364f96f65854e8ee3e3e8aa2033297c67b 100644 (file)
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                 * can be time-consuming. Try to avoid it when possible.
                 */
                raw_spin_lock(&rt_rq->rt_runtime_lock);
+               if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
+                       rt_rq->rt_runtime = rt_b->rt_runtime;
                skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                if (skip)
index 05a831427bc741e8d6290a6189bedbfda2457ded..56a0fed30c0a8786a47c600e90c4016140b1fb58 100644 (file)
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
        }
-       if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
+       if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
                printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
        }
 
index 1ff523dae6e2b7c0980162549db02bdd350620cc..e190d1ef3a23b005279fb3e950c0db355895eeec 100644 (file)
@@ -260,6 +260,15 @@ retry:
        err = 0;
        __cpu_stop_queue_work(stopper1, work1, &wakeq);
        __cpu_stop_queue_work(stopper2, work2, &wakeq);
+       /*
+        * The waking up of stopper threads has to happen
+        * in the same scheduling context as the queueing.
+        * Otherwise, there is a possibility of one of the
+        * above stoppers being woken up by another CPU,
+        * and preempting us. This will cause us to n ot
+        * wake up the other stopper forever.
+        */
+       preempt_disable();
 unlock:
        raw_spin_unlock(&stopper2->lock);
        raw_spin_unlock_irq(&stopper1->lock);
@@ -271,7 +280,6 @@ unlock:
        }
 
        if (!err) {
-               preempt_disable();
                wake_up_q(&wakeq);
                preempt_enable();
        }