hrtimer: reduce calls to hrtimer_get_softirq_time()
authorDimitri Sivanich <sivanich@sgi.com>
Fri, 18 Apr 2008 20:39:00 +0000 (13:39 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 21 Apr 2008 05:59:51 +0000 (07:59 +0200)
It seems that hrtimer_run_queues() is calling hrtimer_get_softirq_time() more
often than it needs to.  This can cause frequent contention on systems with
large numbers of processors/cores.

With this patch, hrtimer_run_queues only calls hrtimer_get_softirq_time() if
there is a pending timer in one of the hrtimer bases, and only once.

This also combines hrtimer_run_queues() and the inline run_hrtimer_queue()
into one function.

[ tglx@linutronix.de: coding style ]

Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/hrtimer.c

index c642ef75069f28f4b230bea197dcbf86e3ea4b95..70d4adc74639f56b039f0dcfdacd7eda284784da 100644 (file)
@@ -1238,51 +1238,51 @@ void hrtimer_run_pending(void)
 /*
  * Called from hardirq context every jiffy
  */
-static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
-                                    int index)
+void hrtimer_run_queues(void)
 {
        struct rb_node *node;
-       struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_clock_base *base;
+       int index, gettime = 1;
 
-       if (!base->first)
+       if (hrtimer_hres_active())
                return;
 
-       if (base->get_softirq_time)
-               base->softirq_time = base->get_softirq_time();
-
-       spin_lock(&cpu_base->lock);
+       for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+               base = &cpu_base->clock_base[index];
 
-       while ((node = base->first)) {
-               struct hrtimer *timer;
-
-               timer = rb_entry(node, struct hrtimer, node);
-               if (base->softirq_time.tv64 <= timer->expires.tv64)
-                       break;
-
-               if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
-                       __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
-                       list_add_tail(&timer->cb_entry,
-                                       &base->cpu_base->cb_pending);
+               if (!base->first)
                        continue;
+
+               if (gettime) {
+                       hrtimer_get_softirq_time(cpu_base);
+                       gettime = 0;
                }
 
-               __run_hrtimer(timer);
-       }
-       spin_unlock(&cpu_base->lock);
-}
+               if (base->get_softirq_time)
+                       base->softirq_time = base->get_softirq_time();
 
-void hrtimer_run_queues(void)
-{
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-       int i;
+               spin_lock(&cpu_base->lock);
 
-       if (hrtimer_hres_active())
-               return;
+               while ((node = base->first)) {
+                       struct hrtimer *timer;
 
-       hrtimer_get_softirq_time(cpu_base);
+                       timer = rb_entry(node, struct hrtimer, node);
+                       if (base->softirq_time.tv64 <= timer->expires.tv64)
+                               break;
 
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
-               run_hrtimer_queue(cpu_base, i);
+                       if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
+                               __remove_hrtimer(timer, base,
+                                       HRTIMER_STATE_PENDING, 0);
+                               list_add_tail(&timer->cb_entry,
+                                       &base->cpu_base->cb_pending);
+                               continue;
+                       }
+
+                       __run_hrtimer(timer);
+               }
+               spin_unlock(&cpu_base->lock);
+       }
 }
 
 /*