Merge tag 'v6.8-rc5' into timers/core, to resolve conflict
authorIngo Molnar <mingo@kernel.org>
Mon, 19 Feb 2024 20:13:31 +0000 (21:13 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 19 Feb 2024 21:27:57 +0000 (22:27 +0100)
There's a conflict between this recent upstream fix:

  dad6a09f3148 ("hrtimer: Report offline hrtimer enqueue")

and a pending commit in the timers tree:

  1a4729ecafc2 ("hrtimers: Move hrtimer base related definitions into hrtimer_defs.h")

Resolve it by applying the upstream fix to the new <linux/hrtimer_defs.h> header.

 Conflict:
include/linux/hrtimer.h
 Semantic conflict:
include/linux/hrtimer_defs.h

Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/hrtimer.h
include/linux/hrtimer_defs.h
include/linux/jiffies.h
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timer.c

index 641c4567cfa7aee830f8ad0b52abb24bcbe353a8..aa1e65ccb6158414fed519c984e8b0ca8dc11dcf 100644 (file)
 #include <linux/list.h>
 #include <linux/percpu-defs.h>
 #include <linux/rbtree.h>
-#include <linux/seqlock.h>
 #include <linux/timer.h>
 
-struct hrtimer_clock_base;
-struct hrtimer_cpu_base;
-
 /*
  * Mode arguments of xxx_hrtimer functions:
  *
@@ -98,107 +94,6 @@ struct hrtimer_sleeper {
        struct task_struct *task;
 };
 
-#ifdef CONFIG_64BIT
-# define __hrtimer_clock_base_align    ____cacheline_aligned
-#else
-# define __hrtimer_clock_base_align
-#endif
-
-/**
- * struct hrtimer_clock_base - the timer base for a specific clock
- * @cpu_base:          per cpu clock base
- * @index:             clock type index for per_cpu support when moving a
- *                     timer to a base on another cpu.
- * @clockid:           clock id for per_cpu support
- * @seq:               seqcount around __run_hrtimer
- * @running:           pointer to the currently running hrtimer
- * @active:            red black tree root node for the active timers
- * @get_time:          function to retrieve the current time of the clock
- * @offset:            offset of this clock to the monotonic base
- */
-struct hrtimer_clock_base {
-       struct hrtimer_cpu_base *cpu_base;
-       unsigned int            index;
-       clockid_t               clockid;
-       seqcount_raw_spinlock_t seq;
-       struct hrtimer          *running;
-       struct timerqueue_head  active;
-       ktime_t                 (*get_time)(void);
-       ktime_t                 offset;
-} __hrtimer_clock_base_align;
-
-enum  hrtimer_base_type {
-       HRTIMER_BASE_MONOTONIC,
-       HRTIMER_BASE_REALTIME,
-       HRTIMER_BASE_BOOTTIME,
-       HRTIMER_BASE_TAI,
-       HRTIMER_BASE_MONOTONIC_SOFT,
-       HRTIMER_BASE_REALTIME_SOFT,
-       HRTIMER_BASE_BOOTTIME_SOFT,
-       HRTIMER_BASE_TAI_SOFT,
-       HRTIMER_MAX_CLOCK_BASES,
-};
-
-/**
- * struct hrtimer_cpu_base - the per cpu clock bases
- * @lock:              lock protecting the base and associated clock bases
- *                     and timers
- * @cpu:               cpu number
- * @active_bases:      Bitfield to mark bases with active timers
- * @clock_was_set_seq: Sequence counter of clock was set events
- * @hres_active:       State of high resolution mode
- * @in_hrtirq:         hrtimer_interrupt() is currently executing
- * @hang_detected:     The last hrtimer interrupt detected a hang
- * @softirq_activated: displays, if the softirq is raised - update of softirq
- *                     related settings is not required then.
- * @nr_events:         Total number of hrtimer interrupt events
- * @nr_retries:                Total number of hrtimer interrupt retries
- * @nr_hangs:          Total number of hrtimer interrupt hangs
- * @max_hang_time:     Maximum time spent in hrtimer_interrupt
- * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
- *                      expired
- * @online:            CPU is online from an hrtimers point of view
- * @timer_waiters:     A hrtimer_cancel() invocation waits for the timer
- *                     callback to finish.
- * @expires_next:      absolute time of the next event, is required for remote
- *                     hrtimer enqueue; it is the total first expiry time (hard
- *                     and soft hrtimer are taken into account)
- * @next_timer:                Pointer to the first expiring timer
- * @softirq_expires_next: Time to check, if soft queues needs also to be expired
- * @softirq_next_timer: Pointer to the first expiring softirq based timer
- * @clock_base:                array of clock bases for this cpu
- *
- * Note: next_timer is just an optimization for __remove_hrtimer().
- *      Do not dereference the pointer because it is not reliable on
- *      cross cpu removals.
- */
-struct hrtimer_cpu_base {
-       raw_spinlock_t                  lock;
-       unsigned int                    cpu;
-       unsigned int                    active_bases;
-       unsigned int                    clock_was_set_seq;
-       unsigned int                    hres_active             : 1,
-                                       in_hrtirq               : 1,
-                                       hang_detected           : 1,
-                                       softirq_activated       : 1,
-                                       online                  : 1;
-#ifdef CONFIG_HIGH_RES_TIMERS
-       unsigned int                    nr_events;
-       unsigned short                  nr_retries;
-       unsigned short                  nr_hangs;
-       unsigned int                    max_hang_time;
-#endif
-#ifdef CONFIG_PREEMPT_RT
-       spinlock_t                      softirq_expiry_lock;
-       atomic_t                        timer_waiters;
-#endif
-       ktime_t                         expires_next;
-       struct hrtimer                  *next_timer;
-       ktime_t                         softirq_expires_next;
-       struct hrtimer                  *softirq_next_timer;
-       struct hrtimer_clock_base       clock_base[HRTIMER_MAX_CLOCK_BASES];
-} ____cacheline_aligned;
-
 static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
 {
        timer->node.expires = time;
@@ -447,20 +342,12 @@ extern u64
 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
 
 /**
- * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * hrtimer_forward_now() - forward the timer expiry so it expires after now
  * @timer:     hrtimer to forward
  * @interval:  the interval to forward
  *
- * Forward the timer expiry so it will expire after the current time
- * of the hrtimer clock base. Returns the number of overruns.
- *
- * Can be safely called from the callback function of @timer. If
- * called from other contexts @timer must neither be enqueued nor
- * running the callback and the caller needs to take care of
- * serialization.
- *
- * Note: This only updates the timer expiry value and does not requeue
- * the timer.
+ * It is a variant of hrtimer_forward(). The timer will expire after the current
+ * time of the hrtimer clock base. See hrtimer_forward() for details.
  */
 static inline u64 hrtimer_forward_now(struct hrtimer *timer,
                                      ktime_t interval)
index 2d3e3c5fb94662cd497171e758b05364475d2ca8..c3b4b7ed7c163fa4cbcba9168266566357f5865e 100644 (file)
@@ -3,6 +3,8 @@
 #define _LINUX_HRTIMER_DEFS_H
 
 #include <linux/ktime.h>
+#include <linux/timerqueue.h>
+#include <linux/seqlock.h>
 
 #ifdef CONFIG_HIGH_RES_TIMERS
 
 
 #endif
 
+#ifdef CONFIG_64BIT
+# define __hrtimer_clock_base_align    ____cacheline_aligned
+#else
+# define __hrtimer_clock_base_align
+#endif
+
+/**
+ * struct hrtimer_clock_base - the timer base for a specific clock
+ * @cpu_base:          per cpu clock base
+ * @index:             clock type index for per_cpu support when moving a
+ *                     timer to a base on another cpu.
+ * @clockid:           clock id for per_cpu support
+ * @seq:               seqcount around __run_hrtimer
+ * @running:           pointer to the currently running hrtimer
+ * @active:            red black tree root node for the active timers
+ * @get_time:          function to retrieve the current time of the clock
+ * @offset:            offset of this clock to the monotonic base
+ */
+struct hrtimer_clock_base {
+       struct hrtimer_cpu_base *cpu_base;
+       unsigned int            index;
+       clockid_t               clockid;
+       seqcount_raw_spinlock_t seq;
+       struct hrtimer          *running;
+       struct timerqueue_head  active;
+       ktime_t                 (*get_time)(void);
+       ktime_t                 offset;
+} __hrtimer_clock_base_align;
+
+enum  hrtimer_base_type {
+       HRTIMER_BASE_MONOTONIC,
+       HRTIMER_BASE_REALTIME,
+       HRTIMER_BASE_BOOTTIME,
+       HRTIMER_BASE_TAI,
+       HRTIMER_BASE_MONOTONIC_SOFT,
+       HRTIMER_BASE_REALTIME_SOFT,
+       HRTIMER_BASE_BOOTTIME_SOFT,
+       HRTIMER_BASE_TAI_SOFT,
+       HRTIMER_MAX_CLOCK_BASES,
+};
+
+/**
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock:              lock protecting the base and associated clock bases
+ *                     and timers
+ * @cpu:               cpu number
+ * @active_bases:      Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @hres_active:       State of high resolution mode
+ * @in_hrtirq:         hrtimer_interrupt() is currently executing
+ * @hang_detected:     The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ *                     related settings is not required then.
+ * @nr_events:         Total number of hrtimer interrupt events
+ * @nr_retries:                Total number of hrtimer interrupt retries
+ * @nr_hangs:          Total number of hrtimer interrupt hangs
+ * @max_hang_time:     Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ *                      expired
+ * @online:            CPU is online from an hrtimers point of view
+ * @timer_waiters:     A hrtimer_cancel() invocation waits for the timer
+ *                     callback to finish.
+ * @expires_next:      absolute time of the next event, is required for remote
+ *                     hrtimer enqueue; it is the total first expiry time (hard
+ *                     and soft hrtimer are taken into account)
+ * @next_timer:                Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
+ * @clock_base:                array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ *      Do not dereference the pointer because it is not reliable on
+ *      cross cpu removals.
+ */
+struct hrtimer_cpu_base {
+       raw_spinlock_t                  lock;
+       unsigned int                    cpu;
+       unsigned int                    active_bases;
+       unsigned int                    clock_was_set_seq;
+       unsigned int                    hres_active             : 1,
+                                       in_hrtirq               : 1,
+                                       hang_detected           : 1,
+                                       softirq_activated       : 1,
+                                       online                  : 1;
+#ifdef CONFIG_HIGH_RES_TIMERS
+       unsigned int                    nr_events;
+       unsigned short                  nr_retries;
+       unsigned short                  nr_hangs;
+       unsigned int                    max_hang_time;
+#endif
+#ifdef CONFIG_PREEMPT_RT
+       spinlock_t                      softirq_expiry_lock;
+       atomic_t                        timer_waiters;
+#endif
+       ktime_t                         expires_next;
+       struct hrtimer                  *next_timer;
+       ktime_t                         softirq_expires_next;
+       struct hrtimer                  *softirq_next_timer;
+       struct hrtimer_clock_base       clock_base[HRTIMER_MAX_CLOCK_BASES];
+} ____cacheline_aligned;
+
+
 #endif
index e0ae2a43e0ebdd22d17680477c2bb152b6ba767b..d9f1435a5a13cfe4d74bfb945265bad8ab7cb772 100644 (file)
@@ -102,12 +102,15 @@ static inline u64 get_jiffies_64(void)
 }
 #endif
 
-/*
- *     These inlines deal with timer wrapping correctly. You are
- *     strongly encouraged to use them:
- *     1. Because people otherwise forget
- *     2. Because if the timer wrap changes in future you won't have to
- *        alter your driver code.
+/**
+ * DOC: General information about time_* inlines
+ *
+ * These inlines deal with timer wrapping correctly. You are strongly encouraged
+ * to use them:
+ *
+ * #. Because people otherwise forget
+ * #. Because if the timer wrap changes in future you won't have to alter your
+ *    driver code.
  */
 
 /**
index 960143b183cdb38c088936664937c3d98d9e92a5..a7ca458cdd9cd6ebf138ca466b3f197d9e4e619f 100644 (file)
@@ -659,7 +659,7 @@ void tick_cleanup_dead_cpu(int cpu)
 #endif
 
 #ifdef CONFIG_SYSFS
-static struct bus_type clockevents_subsys = {
+static const struct bus_type clockevents_subsys = {
        .name           = "clockevents",
        .dev_name       = "clockevent",
 };
index 3052b1f1168e29c4432ba3b068488af11029018d..4ef06651ad079f7bd28fb19cabfcb3b477dab53f 100644 (file)
@@ -1468,7 +1468,7 @@ static struct attribute *clocksource_attrs[] = {
 };
 ATTRIBUTE_GROUPS(clocksource);
 
-static struct bus_type clocksource_subsys = {
+static const struct bus_type clocksource_subsys = {
        .name = "clocksource",
        .dev_name = "clocksource",
 };
index edb0f821dceaa1720ac94fc53f4002a1e5f7bdd3..5a98b35b05767ff1c780a4864ec07326be499046 100644 (file)
@@ -1021,21 +1021,23 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 }
 
 /**
- * hrtimer_forward - forward the timer expiry
+ * hrtimer_forward() - forward the timer expiry
  * @timer:     hrtimer to forward
  * @now:       forward past this time
  * @interval:  the interval to forward
  *
  * Forward the timer expiry so it will expire in the future.
- * Returns the number of overruns.
  *
- * Can be safely called from the callback function of @timer. If
- * called from other contexts @timer must neither be enqueued nor
- * running the callback and the caller needs to take care of
- * serialization.
+ * .. note::
+ *  This only updates the timer expiry value and does not requeue the timer.
  *
- * Note: This only updates the timer expiry value and does not requeue
- * the timer.
+ * There is also a variant of the function hrtimer_forward_now().
+ *
+ * Context: Can be safely called from the callback function of @timer. If called
+ *          from other contexts @timer must neither be enqueued nor running the
+ *          callback and the caller needs to take care of serialization.
+ *
+ * Return: The number of overruns are returned.
  */
 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 {
index 01fb50c1b17e4f1b33285ae2ce2690f0747f8ee8..7c9efe3d9d5620eecf353c1cf2c5327c47eb5404 100644 (file)
@@ -799,6 +799,16 @@ static inline bool local_timer_softirq_pending(void)
        return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 }
 
+/**
+ * tick_nohz_next_event() - return the clock monotonic based next event
+ * @ts:                pointer to tick_sched struct
+ * @cpu:       CPU number
+ *
+ * Return:
+ * *%0         - When the next event is a maximum of TICK_NSEC in the future
+ *               and the tick is not stopped yet
+ * *%next_event        - Next event based on clock monotonic
+ */
 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
 {
        u64 basemono, next_tick, delta, expires;
index 266d02809dbb1dceabfb2a05276385c6a3be9b0c..8aab7ed414907e8b11d89bba99fe85bf534725a0 100644 (file)
@@ -1180,13 +1180,15 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
 }
 
 /*
- * cycle_between - true if test occurs chronologically between before and after
+ * timestamp_in_interval - true if ts is chronologically in [start, end]
+ *
+ * True if ts occurs chronologically at or after start, and before or at end.
  */
-static bool cycle_between(u64 before, u64 test, u64 after)
+static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
 {
-       if (test > before && test < after)
+       if (ts >= start && ts <= end)
                return true;
-       if (test < before && before > after)
+       if (start > end && (ts >= start || ts <= end))
                return true;
        return false;
 }
@@ -1246,7 +1248,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
                 */
                now = tk_clock_read(&tk->tkr_mono);
                interval_start = tk->tkr_mono.cycle_last;
-               if (!cycle_between(interval_start, cycles, now)) {
+               if (!timestamp_in_interval(interval_start, now, cycles)) {
                        clock_was_set_seq = tk->clock_was_set_seq;
                        cs_was_changed_seq = tk->cs_was_changed_seq;
                        cycles = interval_start;
@@ -1259,10 +1261,8 @@ int get_device_system_crosststamp(int (*get_time_fn)
                                      tk_core.timekeeper.offs_real);
                base_raw = tk->tkr_raw.base;
 
-               nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
-                                                    system_counterval.cycles);
-               nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
-                                                   system_counterval.cycles);
+               nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
+               nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
        xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
@@ -1277,13 +1277,13 @@ int get_device_system_crosststamp(int (*get_time_fn)
                bool discontinuity;
 
                /*
-                * Check that the counter value occurs after the provided
+                * Check that the counter value is not before the provided
                 * history reference and that the history doesn't cross a
                 * clocksource change
                 */
                if (!history_begin ||
-                   !cycle_between(history_begin->cycles,
-                                  system_counterval.cycles, cycles) ||
+                   !timestamp_in_interval(history_begin->cycles,
+                                          cycles, system_counterval.cycles) ||
                    history_begin->cs_was_changed_seq != cs_was_changed_seq)
                        return -EINVAL;
                partial_history_cycles = cycles - system_counterval.cycles;
index 352b161113cda6856a6fac08e9cc5eb01818a056..d44dba1d4af0ddc1ef7e89eef810cf86423aee43 100644 (file)
@@ -196,6 +196,51 @@ EXPORT_SYMBOL(jiffies_64);
 # define BASE_DEF      0
 #endif
 
+/**
+ * struct timer_base - Per CPU timer base (number of base depends on config)
+ * @lock:              Lock protecting the timer_base
+ * @running_timer:     When expiring timers, the lock is dropped. To make
+ *                     sure not to race agains deleting/modifying a
+ *                     currently running timer, the pointer is set to the
+ *                     timer, which expires at the moment. If no timer is
+ *                     running, the pointer is NULL.
+ * @expiry_lock:       PREEMPT_RT only: Lock is taken in softirq around
+ *                     timer expiry callback execution and when trying to
+ *                     delete a running timer and it wasn't successful in
+ *                     the first glance. It prevents priority inversion
+ *                     when callback was preempted on a remote CPU and a
+ *                     caller tries to delete the running timer. It also
+ *                     prevents a life lock, when the task which tries to
+ *                     delete a timer preempted the softirq thread which
+ *                     is running the timer callback function.
+ * @timer_waiters:     PREEMPT_RT only: Tells, if there is a waiter
+ *                     waiting for the end of the timer callback function
+ *                     execution.
+ * @clk:               clock of the timer base; is updated before enqueue
+ *                     of a timer; during expiry, it is 1 offset ahead of
+ *                     jiffies to avoid endless requeuing to current
+ *                     jiffies
+ * @next_expiry:       expiry value of the first timer; it is updated when
+ *                     finding the next timer and during enqueue; the
+ *                     value is not valid, when next_expiry_recalc is set
+ * @cpu:               Number of CPU the timer base belongs to
+ * @next_expiry_recalc: States, whether a recalculation of next_expiry is
+ *                     required. Value is set true, when a timer was
+ *                     deleted.
+ * @is_idle:           Is set, when timer_base is idle. It is triggered by NOHZ
+ *                     code. This state is only used in standard
+ *                     base. Deferrable timers, which are enqueued remotely
+ *                     never wake up an idle CPU. So no matter of supporting it
+ *                     for this base.
+ * @timers_pending:    Is set, when a timer is pending in the base. It is only
+ *                     reliable when next_expiry_recalc is not set.
+ * @pending_map:       bitmap of the timer wheel; each bit reflects a
+ *                     bucket of the wheel. When a bit is set, at least a
+ *                     single timer is enqueued in the related bucket.
+ * @vectors:           Array of lists; Each array member reflects a bucket
+ *                     of the timer wheel. The list contains all timers
+ *                     which are enqueued into a specific bucket.
+ */
 struct timer_base {
        raw_spinlock_t          lock;
        struct timer_list       *running_timer;