sched_ext: Add an event, SCX_EV_DISPATCH_KEEP_LAST
authorChangwoo Min <changwoo@igalia.com>
Fri, 31 Jan 2025 07:09:31 +0000 (16:09 +0900)
committerTejun Heo <tj@kernel.org>
Sun, 2 Feb 2025 17:23:18 +0000 (07:23 -1000)
Add a core event, SCX_EV_DISPATCH_KEEP_LAST, which represents how many
times a task is continued to run without ops.enqueue() when
SCX_OPS_ENQ_LAST is not set.

__scx_add_event() is used since the caller holds an rq lock,
so the preemption has already been disabled.

Signed-off-by: Changwoo Min <changwoo@igalia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c

index d1447f18381ee40e4636a3d46bd7d51ee9d99786..cbb3d059072fb649eccd737dc30125fb28fd338a 100644 (file)
@@ -1455,6 +1455,12 @@ struct scx_event_stats {
         * the meantime. In this case, the task is bounced to the global DSQ.
         */
        u64             SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
+
+       /*
+        * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
+        * continued to run because there were no other tasks on the CPU.
+        */
+       u64             SCX_EV_DISPATCH_KEEP_LAST;
 };
 
 /*
@@ -2907,6 +2913,7 @@ no_tasks:
        if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
             scx_rq_bypassing(rq))) {
                rq->scx.flags |= SCX_RQ_BAL_KEEP;
+               __scx_add_event(SCX_EV_DISPATCH_KEEP_LAST, 1);
                goto has_tasks;
        }
        rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
@@ -4977,6 +4984,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
        scx_bpf_events(&events, sizeof(events));
        scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
        scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
+       scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
 
        if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
                memcpy(ei->dump + dump_len - sizeof(trunc_marker),
@@ -7112,6 +7120,7 @@ __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
                e_cpu = per_cpu_ptr(&event_stats_cpu, cpu);
                scx_agg_event(&e_sys, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
                scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
+               scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
        }
 
        /*