Skip to content

Commit

Permalink
lint: enable cargo fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
JakeHillion committed Sep 10, 2024
1 parent ffe8ca3 commit e94cff4
Show file tree
Hide file tree
Showing 24 changed files with 204 additions and 222 deletions.
15 changes: 15 additions & 0 deletions .github/workflows/build-scheds.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,21 @@ name: build-scheds
run-name: ${{ github.actor }} PR run
on: [pull_request]
jobs:
lint:
runs-on: ubuntu-22.04
steps:
- name: Install deps
run: |
curl https://sh.rustup.rs -sSf | RUSTUP_INIT_SKIP_PATH_CHECK=yes sh -s -- -y
rustup install nightly-2024-09-10
export PATH="~/.cargo/bin:$PATH"
- uses: actions/checkout@v4

# Lint code
- run: cargo +nightly-2024-09-10 fmt
- run: git diff --exit-code

build-schedulers:
runs-on: ubuntu-22.04
steps:
Expand Down
3 changes: 1 addition & 2 deletions rust/scx_loader/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,7 @@ async fn worker_loop(mut receiver: UnboundedReceiver<ScxMessage>) -> Result<()>
// setup channel for scheduler runner
let (runner_tx, runner_rx) = tokio::sync::mpsc::channel::<RunnerMessage>(1);

let run_sched_future =
tokio::spawn(async move { handle_child_process(runner_rx).await });
let run_sched_future = tokio::spawn(async move { handle_child_process(runner_rx).await });

// prepare future for tokio
tokio::pin!(run_sched_future);
Expand Down
2 changes: 1 addition & 1 deletion rust/scx_rustland_core/src/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2.

use std::sync::Mutex;
use std::alloc::{GlobalAlloc, Layout};
use std::fs::File;
use std::io::{BufRead, BufReader, Write};
use std::num::ParseIntError;
use std::sync::Mutex;

/// Buddy allocator
///
Expand Down
5 changes: 4 additions & 1 deletion rust/scx_rustland_core/src/rustland_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,10 @@ impl RustLandBuilder {
pub fn build(&mut self) -> Result<()> {
// Embed the BPF source files.
let intf = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/bpf/intf.h"));
let skel = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/bpf/main.bpf.c"));
let skel = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/assets/bpf/main.bpf.c"
));
let bpf = include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/bpf.rs"));

// Generate BPF backend code (C).
Expand Down
8 changes: 3 additions & 5 deletions rust/scx_utils/src/bpf_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -590,11 +590,9 @@ mod tests {

println!("vmlinux.h: ver={:?} sha1={:?}", &ver, &sha1,);

assert!(
regex::Regex::new(r"^([1-9][0-9]*\.[1-9][0-9][a-z0-9-]*)$")
.unwrap()
.is_match(&ver)
);
assert!(regex::Regex::new(r"^([1-9][0-9]*\.[1-9][0-9][a-z0-9-]*)$")
.unwrap()
.is_match(&ver));
assert!(regex::Regex::new(r"^[0-9a-z]{12}$")
.unwrap()
.is_match(&sha1));
Expand Down
6 changes: 3 additions & 3 deletions rust/scx_utils/src/cpumask.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,12 @@ impl Cpumask {
"none" => {
let mask = bitvec![u64, Lsb0; 0; *NR_CPU_IDS];
return Ok(Self { mask });
},
}
"all" => {
let mask = bitvec![u64, Lsb0; 1; *NR_CPU_IDS];
return Ok(Self { mask });
},
_ => {},
}
_ => {}
}
let hex_str = {
let mut tmp_str = cpumask
Expand Down
2 changes: 1 addition & 1 deletion rust/scx_utils/src/infeasible.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@
//!```rust
//! use scx_utils::LoadAggregator;
//! use log::info;
//!
//!
//! let mut aggregator = LoadAggregator::new(32, false);
//! // Create a LoadAggregator object, specifying the number of CPUs on the
//! // system, and whether it should only aggregate duty cycle.
Expand Down
1 change: 0 additions & 1 deletion rust/scx_utils/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,3 @@ pub use log_recorder::LogRecorderBuilder;
mod misc;
pub use misc::monitor_stats;
pub use misc::set_rlimit_infinity;

6 changes: 3 additions & 3 deletions rust/scx_utils/src/misc.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
use anyhow::Result;
use libc;
use log::info;
use scx_stats::prelude::*;
use serde::Deserialize;
use std::thread::sleep;
use std::time::Duration;
use libc;

pub fn monitor_stats<T>(
stats_args: &Vec<(String, String)>,
Expand Down Expand Up @@ -59,7 +59,7 @@ where
Ok(())
}

pub fn set_rlimit_infinity(){
pub fn set_rlimit_infinity() {
unsafe {
// Call setrlimit to set the locked-in-memory limit to unlimited.
let new_rlimit = libc::rlimit {
Expand All @@ -71,4 +71,4 @@ pub fn set_rlimit_infinity(){
panic!("setrlimit failed with error code: {}", res);
}
};
}
}
12 changes: 7 additions & 5 deletions rust/scx_utils/src/topology.rs
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,9 @@ fn create_gpus() -> BTreeMap<usize, Vec<Gpu>> {
let Ok(nvidia_gpu) = nvml.device_by_index(i) else {
continue;
};
let graphics_boost_clock = nvidia_gpu.max_customer_boost_clock(Clock::Graphics).unwrap_or(0);
let graphics_boost_clock = nvidia_gpu
.max_customer_boost_clock(Clock::Graphics)
.unwrap_or(0);
let sm_boost_clock = nvidia_gpu.max_customer_boost_clock(Clock::SM).unwrap_or(0);
let Ok(memory_info) = nvidia_gpu.memory_info() else {
continue;
Expand All @@ -629,8 +631,8 @@ fn create_gpus() -> BTreeMap<usize, Vec<Gpu>> {
let numa_path = format!("/sys/bus/pci/devices/{}/numa_node", fixed_bus_id);
let numa_node = read_file_usize(&Path::new(&numa_path)).unwrap_or(0);

let gpu = Gpu{
index: GpuIndex::Nvidia{nvml_id: index},
let gpu = Gpu {
index: GpuIndex::Nvidia { nvml_id: index },
node_id: numa_node as usize,
max_graphics_clock: graphics_boost_clock as usize,
max_sm_clock: sm_boost_clock as usize,
Expand Down Expand Up @@ -661,7 +663,7 @@ fn create_default_node(online_mask: &Cpumask) -> Result<Vec<Node>> {
node_gpus.insert(gpu.index, gpu.clone());
}
}
_ => {},
_ => {}
};

let mut node = Node {
Expand Down Expand Up @@ -708,7 +710,7 @@ fn create_numa_nodes(online_mask: &Cpumask) -> Result<Vec<Node>> {
node_gpus.insert(gpu.index, gpu.clone());
}
}
_ => {},
_ => {}
};

let mut node = Node {
Expand Down
79 changes: 28 additions & 51 deletions scheds/rust/scx_bpfland/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@ pub mod bpf_intf;
pub use bpf_intf::*;

mod stats;
use stats::Metrics;

use std::collections::HashMap;
use std::ffi::c_int;
use std::fs::File;
Expand All @@ -28,30 +26,27 @@ use anyhow::Context;
use anyhow::Result;
use clap::Parser;
use crossbeam::channel::RecvTimeoutError;
use log::info;

use log::warn;

use libbpf_rs::skel::OpenSkel;
use libbpf_rs::skel::Skel;
use libbpf_rs::skel::SkelBuilder;
use libbpf_rs::OpenObject;
use libbpf_rs::ProgramInput;

use log::info;
use log::warn;
use scx_stats::prelude::*;

use scx_utils::build_id;
use scx_utils::scx_ops_attach;
use scx_utils::scx_ops_load;
use scx_utils::scx_ops_open;
use scx_utils::set_rlimit_infinity;
use scx_utils::uei_exited;
use scx_utils::uei_report;
use scx_utils::CoreType;
use scx_utils::Cpumask;
use scx_utils::Topology;
use scx_utils::UserExitInfo;
use scx_utils::NR_CPU_IDS;
use scx_utils::set_rlimit_infinity;
use stats::Metrics;

const SCHEDULER_NAME: &'static str = "scx_bpfland";

Expand Down Expand Up @@ -88,7 +83,7 @@ fn get_primary_cpus(mode: Powermode) -> std::io::Result<Vec<usize>> {
// Convert an array of CPUs to the corresponding cpumask of any arbitrary size.
fn cpus_to_cpumask(cpus: &Vec<usize>) -> String {
if cpus.is_empty() {
return String::from("none");
return String::from("none");
}

// Determine the maximum CPU ID to create a sufficiently large byte vector.
Expand Down Expand Up @@ -293,8 +288,7 @@ impl<'a> Scheduler<'a> {

// Initialize the primary scheduling domain and the preferred domain.
let energy_profile = Self::read_energy_profile();
if let Err(err) = Self::init_preferred_domain(&mut skel, &opts.preferred_domain)
{
if let Err(err) = Self::init_preferred_domain(&mut skel, &opts.preferred_domain) {
warn!("failed to initialize preferred domain: error {}", err);
}
if let Err(err) = Self::init_energy_domain(&mut skel, &opts.primary_domain, &energy_profile)
Expand Down Expand Up @@ -408,17 +402,10 @@ impl<'a> Scheduler<'a> {
Cpumask::from_str(&cpus_to_cpumask(&cpus))
}

fn init_preferred_domain(
skel: &mut BpfSkel<'_>,
preferred_domain: &String,
) -> Result<()> {
fn init_preferred_domain(skel: &mut BpfSkel<'_>, preferred_domain: &String) -> Result<()> {
let domain = match preferred_domain.as_str() {
"auto" => {
Self::epp_to_cpumask(Powermode::Turbo)?
}
&_ => {
Cpumask::from_str(&preferred_domain)?
}
"auto" => Self::epp_to_cpumask(Powermode::Turbo)?,
&_ => Cpumask::from_str(&preferred_domain)?,
};

info!("preferred CPU domain = 0x{:x}", domain);
Expand All @@ -430,7 +417,10 @@ impl<'a> Scheduler<'a> {
for cpu in 0..*NR_CPU_IDS {
if domain.test_cpu(cpu) {
if let Err(err) = Self::enable_preferred_cpu(skel, cpu as i32) {
warn!("failed to add CPU {} to preferred domain: error {}", cpu, err);
warn!(
"failed to add CPU {} to preferred domain: error {}",
cpu, err
);
}
}
}
Expand All @@ -444,31 +434,19 @@ impl<'a> Scheduler<'a> {
energy_profile: &String,
) -> Result<()> {
let domain = match primary_domain.as_str() {
"powersave" => {
Self::epp_to_cpumask(Powermode::Powersave)?
}
"performance" => {
Self::epp_to_cpumask(Powermode::Performance)?
}
"auto" => {
match energy_profile.as_str() {
"power" | "balance_power" | "powersave" => {
Self::epp_to_cpumask(Powermode::Powersave)?
}
"balance_performance" | "performance" => {
Self::epp_to_cpumask(Powermode::Performance)?
}
&_ => {
Self::epp_to_cpumask(Powermode::Any)?
}
"powersave" => Self::epp_to_cpumask(Powermode::Powersave)?,
"performance" => Self::epp_to_cpumask(Powermode::Performance)?,
"auto" => match energy_profile.as_str() {
"power" | "balance_power" | "powersave" => {
Self::epp_to_cpumask(Powermode::Powersave)?
}
}
"all" => {
Self::epp_to_cpumask(Powermode::Any)?
}
&_ => {
Cpumask::from_str(&primary_domain)?
}
"balance_performance" | "performance" => {
Self::epp_to_cpumask(Powermode::Performance)?
}
&_ => Self::epp_to_cpumask(Powermode::Any)?,
},
"all" => Self::epp_to_cpumask(Powermode::Any)?,
&_ => Cpumask::from_str(&primary_domain)?,
};

info!("primary CPU domain = 0x{:x}", domain);
Expand Down Expand Up @@ -526,10 +504,9 @@ impl<'a> Scheduler<'a> {
self.energy_profile = energy_profile.clone();

if self.opts.primary_domain == "auto" {
if let Err(err) = Self::init_preferred_domain(
&mut self.skel,
&self.opts.preferred_domain,
) {
if let Err(err) =
Self::init_preferred_domain(&mut self.skel, &self.opts.preferred_domain)
{
warn!("failed to refresh preferred domain: error {}", err);
}
if let Err(err) = Self::init_energy_domain(
Expand Down
11 changes: 6 additions & 5 deletions scheds/rust/scx_bpfland/src/stats.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
use anyhow::Result;
use scx_stats::prelude::*;
use scx_stats_derive::Stats;
use serde::Deserialize;
use serde::Serialize;
use std::io::Write;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;

use anyhow::Result;
use scx_stats::prelude::*;
use scx_stats_derive::Stats;
use serde::Deserialize;
use serde::Serialize;

#[derive(Clone, Debug, Default, Serialize, Deserialize, Stats)]
#[stat(top)]
pub struct Metrics {
Expand Down
Loading

0 comments on commit e94cff4

Please sign in to comment.