ACPI: acpi_idle: touch TS_POLLING only in the non-MWAIT case
authorLen Brown <len.brown@intel.com>
Mon, 24 May 2010 18:27:44 +0000 (14:27 -0400)
committerLen Brown <len.brown@intel.com>
Fri, 28 May 2010 17:58:57 +0000 (13:58 -0400)
commit d306ebc28649b89877a22158fe0076f06cc46f60
(ACPI: Be in TS_POLLING state during mwait based C-state entry)
fixed an important power & performance issue where ACPI c2 and c3 C-states
were clearing TS_POLLING even when using MWAIT (ACPI_STATE_FFH).
That bug had been causing us to receive redundant scheduling interrups
when we had already been woken up by MONITOR/MWAIT.

Following up on that...

In the MWAIT case, we don't have to subsequently
check need_resched(), as that c heck was there
for the TS_POLLING-clearing case.

Note that not only does the cpuidle calling function
already check need_resched() before calling us, the
low-level entry into monitor/mwait calls it twice --
guaranteeing that a write to the trigger address
can not go un-noticed.

Also, in this case, we don't have to set TS_POLLING
when we wake, because we never cleared it.

Signed-off-by: Len Brown <len.brown@intel.com>
Acked-by: Venkatesh Pallipadi <venki@google.com>
drivers/acpi/processor_idle.c

index 5939e7f7d8e9004ea794408c61b6665d52f92851..a4166e2abb92817af05b8ed6fa1347008497a0e2 100644 (file)
@@ -881,6 +881,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
                return(acpi_idle_enter_c1(dev, state));
 
        local_irq_disable();
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
@@ -888,12 +889,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
                 * NEED_RESCHED:
                 */
                smp_mb();
-       }
 
-       if (unlikely(need_resched())) {
-               current_thread_info()->status |= TS_POLLING;
-               local_irq_enable();
-               return 0;
+               if (unlikely(need_resched())) {
+                       current_thread_info()->status |= TS_POLLING;
+                       local_irq_enable();
+                       return 0;
+               }
        }
 
        /*
@@ -918,7 +919,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
        sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
 
        local_irq_enable();
-       current_thread_info()->status |= TS_POLLING;
+       if (cx->entry_method != ACPI_CSTATE_FFH)
+               current_thread_info()->status |= TS_POLLING;
 
        cx->usage++;
 
@@ -968,6 +970,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        }
 
        local_irq_disable();
+
        if (cx->entry_method != ACPI_CSTATE_FFH) {
                current_thread_info()->status &= ~TS_POLLING;
                /*
@@ -975,12 +978,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
                 * NEED_RESCHED:
                 */
                smp_mb();
-       }
 
-       if (unlikely(need_resched())) {
-               current_thread_info()->status |= TS_POLLING;
-               local_irq_enable();
-               return 0;
+               if (unlikely(need_resched())) {
+                       current_thread_info()->status |= TS_POLLING;
+                       local_irq_enable();
+                       return 0;
+               }
        }
 
        acpi_unlazy_tlb(smp_processor_id());
@@ -1032,7 +1035,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
 
        local_irq_enable();
-       current_thread_info()->status |= TS_POLLING;
+       if (cx->entry_method != ACPI_CSTATE_FFH)
+               current_thread_info()->status |= TS_POLLING;
 
        cx->usage++;