Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

scx_layered: Add helper for layer slice duration #918

Merged
merged 1 commit into from
Nov 11, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 20 additions & 15 deletions scheds/rust/scx_layered/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,11 @@ static struct layer *lookup_layer(int idx)
return &layers[idx];
}

static __always_inline u64 layer_slice_ns(struct layer *layer)
{
return layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
}

static __always_inline
int rotate_layer_id(u32 base_layer_id, u32 rotation)
{
Expand Down Expand Up @@ -776,8 +781,8 @@ s32 BPF_STRUCT_OPS(layered_select_cpu, struct task_struct *p, s32 prev_cpu, u64

if (cpu >= 0) {
lstat_inc(LSTAT_SEL_LOCAL, layer, cctx);
u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, layer_slice_ns, 0);
u64 slice_ns = layer_slice_ns(layer);
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, 0);
return cpu;
} else {
return prev_cpu;
Expand Down Expand Up @@ -1066,7 +1071,7 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)

try_preempt_first = cctx->try_preempt_first;
cctx->try_preempt_first = false;
u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
u64 slice_ns = layer_slice_ns(layer);

if (cctx->yielding) {
lstat_inc(LSTAT_YIELD, layer, cctx);
Expand All @@ -1086,8 +1091,8 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
* Limit the amount of budget that an idling task can accumulate
* to one slice.
*/
if (vtime_before(vtime, layer->vtime_now - layer_slice_ns))
vtime = layer->vtime_now - layer_slice_ns;
if (vtime_before(vtime, layer->vtime_now - slice_ns))
vtime = layer->vtime_now - slice_ns;

/*
* Special-case per-cpu kthreads which aren't in a preempting layer so
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this could potentially change the slice duration here as slice_ns variable is reused for kthreads. However, it should get set to the appropriate layers slice_ns vs the default which I think is reasonable.

Expand Down Expand Up @@ -1138,13 +1143,13 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)

if (disable_topology) {
tctx->last_dsq = tctx->layer;
scx_bpf_dispatch_vtime(p, tctx->layer, layer_slice_ns, vtime, enq_flags);
scx_bpf_dispatch_vtime(p, tctx->layer, slice_ns, vtime, enq_flags);
} else {
u32 llc_id = cpu_to_llc_id(tctx->last_cpu >= 0 ? tctx->last_cpu :
bpf_get_smp_processor_id());
idx = layer_dsq_id(layer->idx, llc_id);
tctx->last_dsq = idx;
scx_bpf_dispatch_vtime(p, idx, layer_slice_ns, vtime, enq_flags);
scx_bpf_dispatch_vtime(p, idx, slice_ns, vtime, enq_flags);
}

preempt:
Expand All @@ -1166,9 +1171,9 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
if (!(tctx = lookup_task_ctx(p)) || !(layer = lookup_layer(tctx->layer)))
goto no;

u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
u64 slice_ns = layer_slice_ns(layer);
/* @p has fully consumed its slice and still wants to run */
cctx->ran_current_for += layer_slice_ns;
cctx->ran_current_for += slice_ns;

/*
* There wasn't anything in the local or global DSQ, but there may be
Expand All @@ -1193,7 +1198,7 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
*/
if (disable_topology) {
if (!scx_bpf_dsq_nr_queued(layer->idx)) {
p->scx.slice = layer_slice_ns;
p->scx.slice = slice_ns;
lstat_inc(LSTAT_KEEP, layer, cctx);
return true;
}
Expand All @@ -1202,7 +1207,7 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
tctx->last_cpu :
bpf_get_smp_processor_id());
if (!scx_bpf_dsq_nr_queued(dsq_id)) {
p->scx.slice = layer_slice_ns;
p->scx.slice = slice_ns;
lstat_inc(LSTAT_KEEP, layer, cctx);
return true;
}
Expand All @@ -1229,7 +1234,7 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
scx_bpf_put_idle_cpumask(idle_cpumask);

if (has_idle) {
p->scx.slice = layer_slice_ns;
p->scx.slice = slice_ns;
lstat_inc(LSTAT_KEEP, layer, cctx);
return true;
}
Expand Down Expand Up @@ -2059,11 +2064,11 @@ void BPF_STRUCT_OPS(layered_stopping, struct task_struct *p, bool runnable)
cctx->current_preempt = false;
cctx->prev_exclusive = cctx->current_exclusive;
cctx->current_exclusive = false;
u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
u64 slice_ns = layer_slice_ns(layer);

/* scale the execution time by the inverse of the weight and charge */
if (cctx->yielding && used < layer_slice_ns)
used = layer_slice_ns;
if (cctx->yielding && used < slice_ns)
used = slice_ns;
p->scx.dsq_vtime += used * 100 / p->scx.weight;
cctx->maybe_idle = true;
}
Expand Down