__type(value, struct central_timer);
} central_timer SEC(".maps");
-static bool vtime_before(u64 a, u64 b)
-{
- return (s64)(a - b) < 0;
-}
-
s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
s32 prev_cpu, u64 wake_flags)
{
/* kick iff the current one exhausted its slice */
started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
if (started_at && *started_at &&
- vtime_before(now, *started_at + slice_ns))
+ time_before(now, *started_at + slice_ns))
continue;
/* and there's something pending */
return (dividend + divisor - 1) / divisor;
}
-static bool vtime_before(u64 a, u64 b)
-{
- return (s64)(a - b) < 0;
-}
-
static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct cgv_node *cgc_a, *cgc_b;
*/
max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
(2 * FCG_HWEIGHT_ONE);
- if (vtime_before(cvtime, cvtime_now - max_budget))
+ if (time_before(cvtime, cvtime_now - max_budget))
cvtime = cvtime_now - max_budget;
cgv_node->cvtime = cvtime;
* Limit the amount of budget that an idling task can accumulate
* to one slice.
*/
- if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
+ if (time_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
scx_bpf_dsq_insert_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
* from multiple CPUs and thus racy. Any error should be
* contained and temporary. Let's just live with it.
*/
- if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime))
+ if (time_before(cgc->tvtime_now, p->scx.dsq_vtime))
cgc->tvtime_now = p->scx.dsq_vtime;
}
bpf_cgroup_release(cgrp);
cgv_node = container_of(rb_node, struct cgv_node, rb_node);
cgid = cgv_node->cgid;
- if (vtime_before(cvtime_now, cgv_node->cvtime))
+ if (time_before(cvtime_now, cgv_node->cvtime))
cvtime_now = cgv_node->cvtime;
/*
if (!cpuc->cur_cgid)
goto pick_next_cgroup;
- if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
+ if (time_before(now, cpuc->cur_at + cgrp_slice_ns)) {
if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) {
stat_inc(FCG_STAT_CNS_KEEP);
return;
struct cgroup *from, struct cgroup *to)
{
struct fcg_cgrp_ctx *from_cgc, *to_cgc;
- s64 vtime_delta;
+ s64 delta;
/* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
return;
- vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now;
- p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta;
+ delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now);
+ p->scx.dsq_vtime = to_cgc->tvtime_now + delta;
}
s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
(*cnt_p)++;
}
-static inline bool vtime_before(u64 a, u64 b)
-{
- return (s64)(a - b) < 0;
-}
-
s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
{
bool is_idle = false;
* Limit the amount of budget that an idling task can accumulate
* to one slice.
*/
- if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL))
+ if (time_before(vtime, vtime_now - SCX_SLICE_DFL))
vtime = vtime_now - SCX_SLICE_DFL;
scx_bpf_dsq_insert_vtime(p, SHARED_DSQ, SCX_SLICE_DFL, vtime,
* thus racy. Any error should be contained and temporary. Let's just
* live with it.
*/
- if (vtime_before(vtime_now, p->scx.dsq_vtime))
+ if (time_before(vtime_now, p->scx.dsq_vtime))
vtime_now = p->scx.dsq_vtime;
}