sched_ext: Replace bpf_ktime_get_ns() to scx_bpf_now()
authorChangwoo Min <changwoo@igalia.com>
Thu, 9 Jan 2025 13:14:55 +0000 (22:14 +0900)
committerTejun Heo <tj@kernel.org>
Fri, 10 Jan 2025 18:04:40 +0000 (08:04 -1000)
In the BPF schedulers that use bpf_ktime_get_ns() -- scx_central and
scx_flatcg, replace bpf_ktime_get_ns() calls to scx_bpf_now().

Signed-off-by: Changwoo Min <changwoo@igalia.com>
Acked-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
tools/sched_ext/scx_central.bpf.c
tools/sched_ext/scx_flatcg.bpf.c

index 2907df78241e7f16433c52efd62547a453579e40..4239034ad5933f54e6b999346900f073902379bb 100644 (file)
@@ -245,7 +245,7 @@ void BPF_STRUCT_OPS(central_running, struct task_struct *p)
        s32 cpu = scx_bpf_task_cpu(p);
        u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
        if (started_at)
-               *started_at = bpf_ktime_get_ns() ?: 1;  /* 0 indicates idle */
+               *started_at = scx_bpf_now() ?: 1;       /* 0 indicates idle */
 }
 
 void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
@@ -258,7 +258,7 @@ void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
 
 static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
 {
-       u64 now = bpf_ktime_get_ns();
+       u64 now = scx_bpf_now();
        u64 nr_to_kick = nr_queued;
        s32 i, curr_cpu;
 
index 3dbfa82883be7edb51d33a50b99b978fb164b0f3..5f588963fb2fc17598df6e200649337fa0337eb2 100644 (file)
@@ -734,7 +734,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
        struct fcg_cpu_ctx *cpuc;
        struct fcg_cgrp_ctx *cgc;
        struct cgroup *cgrp;
-       u64 now = bpf_ktime_get_ns();
+       u64 now = scx_bpf_now();
        bool picked_next = false;
 
        cpuc = find_cpu_ctx();