Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Mar 2010 22:10:38 +0000 (15:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Mar 2010 22:10:38 +0000 (15:10 -0700)
* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  time: Fix accumulation bug triggered by long delay.
  posix-cpu-timers: Reset expire cache when no timer is running
  timer stats: Fix del_timer_sync() and try_to_del_timer_sync()
  clockevents: Sanitize min_delta_ns adjustment and prevent overflows

1  2 
kernel/posix-cpu-timers.c

index 1a22dfd42df9b4089e9630f2635a425a26d4959b,edec25a68e9ebe4126302cd1620d11f77386a5f2..bc7704b3a4431434e15bacb3127cfb9a5e1517a9
@@@ -982,7 -982,6 +982,7 @@@ static void check_thread_timers(struct 
        int maxfire;
        struct list_head *timers = tsk->cpu_timers;
        struct signal_struct *const sig = tsk->signal;
 +      unsigned long soft;
  
        maxfire = 20;
        tsk->cputime_expires.prof_exp = cputime_zero;
        /*
         * Check for the special case thread timers.
         */
 -      if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
 -              unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
 -              unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
 +      soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
 +      if (soft != RLIM_INFINITY) {
 +              unsigned long hard =
 +                      ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
  
                if (hard != RLIM_INFINITY &&
                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
 -              if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
 +              if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
                        /*
                         * At the soft limit, send a SIGXCPU every second.
                         */
 -                      if (sig->rlim[RLIMIT_RTTIME].rlim_cur
 -                          < sig->rlim[RLIMIT_RTTIME].rlim_max) {
 -                              sig->rlim[RLIMIT_RTTIME].rlim_cur +=
 -                                                              USEC_PER_SEC;
 +                      if (soft < hard) {
 +                              soft += USEC_PER_SEC;
 +                              sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
                        }
                        printk(KERN_INFO
                                "RT Watchdog Timeout: %s[%d]\n",
        }
  }
  
- static void stop_process_timers(struct task_struct *tsk)
+ static void stop_process_timers(struct signal_struct *sig)
  {
-       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       struct thread_group_cputimer *cputimer = &sig->cputimer;
        unsigned long flags;
  
        if (!cputimer->running)
        spin_lock_irqsave(&cputimer->lock, flags);
        cputimer->running = 0;
        spin_unlock_irqrestore(&cputimer->lock, flags);
+       sig->cputime_expires.prof_exp = cputime_zero;
+       sig->cputime_expires.virt_exp = cputime_zero;
+       sig->cputime_expires.sched_exp = 0;
  }
  
  static u32 onecputick;
@@@ -1122,7 -1125,6 +1126,7 @@@ static void check_process_timers(struc
        unsigned long long sum_sched_runtime, sched_expires;
        struct list_head *timers = sig->cpu_timers;
        struct task_cputime cputime;
 +      unsigned long soft;
  
        /*
         * Don't sample the current process CPU clocks if there are no timers.
            list_empty(&timers[CPUCLOCK_VIRT]) &&
            cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
            list_empty(&timers[CPUCLOCK_SCHED])) {
-               stop_process_timers(tsk);
+               stop_process_timers(sig);
                return;
        }
  
                         SIGPROF);
        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
                         SIGVTALRM);
 -
 -      if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
 +      soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
 +      if (soft != RLIM_INFINITY) {
                unsigned long psecs = cputime_to_secs(ptime);
 +              unsigned long hard =
 +                      ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
                cputime_t x;
 -              if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
 +              if (psecs >= hard) {
                        /*
                         * At the hard limit, we just die.
                         * No need to calculate anything else now.
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
 -              if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
 +              if (psecs >= soft) {
                        /*
                         * At the soft limit, send a SIGXCPU every second.
                         */
                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
 -                      if (sig->rlim[RLIMIT_CPU].rlim_cur
 -                          < sig->rlim[RLIMIT_CPU].rlim_max) {
 -                              sig->rlim[RLIMIT_CPU].rlim_cur++;
 +                      if (soft < hard) {
 +                              soft++;
 +                              sig->rlim[RLIMIT_CPU].rlim_cur = soft;
                        }
                }
 -              x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
 +              x = secs_to_cputime(soft);
                if (cputime_eq(prof_expires, cputime_zero) ||
                    cputime_lt(x, prof_expires)) {
                        prof_expires = x;