Skip to content

Commit

Permalink
scx_layered: Add flag to control llc iteration on dispatch
Browse files Browse the repository at this point in the history
Signed-off-by: Daniel Hodges <[email protected]>
  • Loading branch information
hodgesds committed Nov 13, 2024
1 parent 775d09a commit 4fc0509
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 20 deletions.
112 changes: 92 additions & 20 deletions scheds/rust/scx_layered/src/bpf/main.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ const volatile bool smt_enabled = true;
const volatile bool has_little_cores = true;
const volatile bool disable_topology = false;
const volatile bool xnuma_preemption = false;
const volatile bool local_llc_iteration = true;
const volatile s32 __sibling_cpu[MAX_CPUS];
const volatile bool monitor_disable = false;
const volatile unsigned char all_cpus[MAX_CPUS_U8];
Expand Down Expand Up @@ -1462,20 +1463,40 @@ __weak int consume_preempting(struct cost *costc, u32 my_llc_id)
if (!costc)
return -EINVAL;

bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
if (local_llc_iteration) {
bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
bpf_for(idx, 0, nr_layers) {
layer_idx = rotate_layer_id(costc->pref_layer, idx);
if (layer_idx >= nr_layers) {
scx_bpf_error("can't happen");
return -EINVAL;
}
layer = MEMBER_VPTR(layers, [layer_idx]);
if (has_budget(costc, layer) == 0)
continue;
dsq_id = layer_dsq_id(layer_idx, llc_id);
if (scx_bpf_consume(dsq_id))
return 0;
}
}
} else {
bpf_for(idx, 0, nr_layers) {
layer_idx = rotate_layer_id(costc->pref_layer, idx);
if (layer_idx >= nr_layers) {
scx_bpf_error("can't happen");
return -EINVAL;
}
layer = MEMBER_VPTR(layers, [layer_idx]);
if (has_budget(costc, layer) == 0)
if (!layer->preempt || has_budget(costc, layer) == 0)
continue;
dsq_id = layer_dsq_id(layer_idx, llc_id);
if (scx_bpf_consume(dsq_id))
return 0;

bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
dsq_id = layer_dsq_id(layer_idx, llc_id);
if (scx_bpf_consume(dsq_id))
return 0;
}
}
}

Expand All @@ -1491,8 +1512,35 @@ static __noinline int consume_non_open(struct cost *costc, s32 cpu, u32 my_llc_i
if (!costc)
return -EINVAL;

bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
if (local_llc_iteration) {
bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
bpf_for(idx, 0, nr_layers) {
layer_idx = rotate_layer_id(costc->pref_layer, idx);
if (layer_idx >= nr_layers) {
scx_bpf_error("can't happen");
return -EINVAL;
}
layer = MEMBER_VPTR(layers, [layer_idx]);
if (has_budget(costc, layer) == 0)
continue;

struct cpumask *layer_cpumask;
if (!(layer_cpumask = lookup_layer_cpumask(layer_idx)))
return -ENOENT;

if (!bpf_cpumask_test_cpu(cpu, layer_cpumask) &&
(cpu > nr_possible_cpus ||
cpu != fallback_cpu ||
layer->nr_cpus != 0))
continue;

dsq_id = layer_dsq_id(layer_idx, llc_id);
if (scx_bpf_consume(dsq_id))
return 0;
}
}
} else {
bpf_for(idx, 0, nr_layers) {
layer_idx = rotate_layer_id(costc->pref_layer, idx);
if (layer_idx >= nr_layers) {
Expand All @@ -1506,16 +1554,18 @@ static __noinline int consume_non_open(struct cost *costc, s32 cpu, u32 my_llc_i
struct cpumask *layer_cpumask;
if (!(layer_cpumask = lookup_layer_cpumask(layer_idx)))
return -ENOENT;

if (!bpf_cpumask_test_cpu(cpu, layer_cpumask) &&
(cpu > nr_possible_cpus ||
cpu != fallback_cpu ||
layer->nr_cpus != 0))
(cpu > nr_possible_cpus || cpu != fallback_cpu ||
layer->nr_cpus != 0))
continue;

dsq_id = layer_dsq_id(layer_idx, llc_id);
if (scx_bpf_consume(dsq_id))
return 0;
bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
dsq_id = layer_dsq_id(layer_idx, llc_id);

if (scx_bpf_consume(dsq_id))
return 0;
}
}
}

Expand All @@ -1531,8 +1581,27 @@ __weak int consume_open_no_preempt(struct cost *costc, u32 my_llc_id)
if (!costc)
return -EINVAL;

bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
if (local_llc_iteration) {
bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
bpf_for(idx, 0, nr_layers) {
layer_idx = rotate_layer_id(costc->pref_layer, idx);
if (layer_idx >= nr_layers) {
scx_bpf_error("can't happen");
return -EINVAL;
}
layer = MEMBER_VPTR(layers, [layer_idx]);
if (has_budget(costc, layer) == 0)
continue;
if (layer->preempt || layer->kind == LAYER_KIND_CONFINED)
continue;

dsq_id = layer_dsq_id(layer_idx, llc_id);
if (scx_bpf_consume(dsq_id))
return 0;
}
}
} else {
bpf_for(idx, 0, nr_layers) {
layer_idx = rotate_layer_id(costc->pref_layer, idx);
if (layer_idx >= nr_layers) {
Expand All @@ -1544,10 +1613,13 @@ __weak int consume_open_no_preempt(struct cost *costc, u32 my_llc_id)
continue;
if (layer->preempt || layer->kind == LAYER_KIND_CONFINED)
continue;
bpf_for(llc_idx, 0, nr_llcs) {
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
dsq_id = layer_dsq_id(layer_idx, llc_id);

dsq_id = layer_dsq_id(layer_idx, llc_id);
if (scx_bpf_consume(dsq_id))
return 0;
if (scx_bpf_consume(dsq_id))
return 0;
}
}
}

Expand Down
5 changes: 5 additions & 0 deletions scheds/rust/scx_layered/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,10 @@ struct Opts {
#[clap(long)]
run_example: bool,

/// Enables iteration over local LLCs first for dispatch.
#[clap(long, default_value = "false")]
local_llc_iteration: bool,

/// Disable antistall
#[clap(long, default_value = "false")]
disable_antistall: bool,
Expand Down Expand Up @@ -1494,6 +1498,7 @@ impl<'a> Scheduler<'a> {
skel.maps.rodata_data.has_little_cores = topo.has_little_cores();
skel.maps.rodata_data.disable_topology = disable_topology;
skel.maps.rodata_data.xnuma_preemption = opts.xnuma_preemption;
skel.maps.rodata_data.local_llc_iteration = opts.local_llc_iteration;
skel.maps.rodata_data.antistall_sec = opts.antistall_sec;
if opts.monitor_disable {
skel.maps.rodata_data.monitor_disable = opts.monitor_disable;
Expand Down

0 comments on commit 4fc0509

Please sign in to comment.