sched/fair: Remove cfs_rq::nr_spread_over and cfs_rq::exec_clock
authorChuyi Zhou <zhouchuyi@bytedance.com>
Wed, 17 Jul 2024 14:33:42 +0000 (22:33 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 29 Jul 2024 10:22:34 +0000 (12:22 +0200)
nr_spread_over tracks the number of instances where the difference
between a scheduling entity's virtual runtime and the minimum virtual
runtime in the runqueue exceeds three times the scheduler latency,
indicating significant disparity in task scheduling.
Commit that removed its usage: 5e963f2bd: sched/fair: Commit to EEVDF

cfs_rq->exec_clock was used to account for time spent executing tasks.
Commit that removed its usage: 5d69eca542ee1 sched: Unify runtime
accounting across classes

cfs_rq::nr_spread_over and cfs_rq::exec_clock are not used anymore in
eevdf. Remove them from struct cfs_rq.

Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
Acked-by: Vishal Chourasia <vishalc@linux.ibm.com>
Link: https://lore.kernel.org/r/20240717143342.593262-1-zhouchuyi@bytedance.com
kernel/sched/debug.c
kernel/sched/sched.h

index c1eb9a1afd13e647d0eb5c31908f4725fa4d0042..90c4a9998377c743afc40d623974c1b059461bf4 100644 (file)
@@ -641,8 +641,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        SEQ_printf(m, "\n");
        SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
 #endif
-       SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
-                       SPLIT_NS(cfs_rq->exec_clock));
 
        raw_spin_rq_lock_irqsave(rq, flags);
        root = __pick_root_entity(cfs_rq);
@@ -669,8 +667,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        SPLIT_NS(right_vruntime));
        spread = right_vruntime - left_vruntime;
        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
-       SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
-                       cfs_rq->nr_spread_over);
        SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
        SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
        SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
index 4c36cc680361720cf070b612be523b8f8ed09f01..8a071022bdec87e4d417e139bd83680d9ba24725 100644 (file)
@@ -599,7 +599,6 @@ struct cfs_rq {
        s64                     avg_vruntime;
        u64                     avg_load;
 
-       u64                     exec_clock;
        u64                     min_vruntime;
 #ifdef CONFIG_SCHED_CORE
        unsigned int            forceidle_seq;
@@ -619,10 +618,6 @@ struct cfs_rq {
        struct sched_entity     *curr;
        struct sched_entity     *next;
 
-#ifdef CONFIG_SCHED_DEBUG
-       unsigned int            nr_spread_over;
-#endif
-
 #ifdef CONFIG_SMP
        /*
         * CFS load tracking
@@ -1158,7 +1153,6 @@ struct rq {
        /* latency stats */
        struct sched_info       rq_sched_info;
        unsigned long long      rq_cpu_time;
-       /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 
        /* sys_sched_yield() stats */
        unsigned int            yld_count;