Skip to content

Commit

Permalink
add benchmark for concurrent check
Browse files Browse the repository at this point in the history
  • Loading branch information
lalitb committed Dec 10, 2024
1 parent 56cd4ef commit 6f6d08e
Showing 1 changed file with 77 additions and 19 deletions.
96 changes: 77 additions & 19 deletions opentelemetry-user-events-logs/benches/benchmark.rs
Original file line number Diff line number Diff line change
@@ -1,34 +1,92 @@
// Run as root user as access to `tracefs` is required, or ensure the current user has the necessary permissions.
// To run benchmarks with root privileges, execute:
// sudo -E /home/<username>/.cargo/bin/cargo bench
// Replace <username> with your actual username.
//
// System Information:
// Processor: AMD EPYC 7763 64-Core Processor
// CPU Cores: 8
// Logical Processors: 16
// Memory: 64 GB
// time: [4.1429 ns 4.1514 ns 4.1621 ns]

use criterion::{criterion_group, criterion_main, Criterion};
use eventheader_dynamic::{Provider, ProviderOptions};
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::thread;

fn benchmark_find_set(c: &mut Criterion) {
/// Benchmark `find_set` with no concurrent threads
fn benchmark_find_set_single(c: &mut Criterion) {
// Setup the Provider
let mut options = ProviderOptions::new();
options = *options.group_name("testprovider");
let mut provider = Provider::new("testprovider", &options);

// Register some dummy events with specific levels and keywords
let keyword = 0x01; // Example keyword
let mut level = 4; // Example level (Informational)
let level = 4; // Example level (Informational)
provider.register_set(eventheader::Level::Informational, keyword);
level = level.into();
// Benchmark the `find_set` method
c.bench_function("provider_find_set", |b| {
b.iter(|| provider.find_set(level.into(), keyword));

// Benchmark the `find_set` method with `enabled` check
c.bench_function("provider_find_set_single", |b| {
b.iter(|| {
if let Some(event_set) = provider.find_set(level.into(), keyword) {
event_set.enabled(); // Check if the tracepoint is being listened to
}
});
});
}

criterion_group!(benches, benchmark_find_set);
/// Benchmark `find_set` with a parameterized number of concurrent threads
fn benchmark_find_set_concurrent(c: &mut Criterion) {
let thread_counts = [2, 4, 8]; // Test with 2, 4, and 8 threads

for &thread_count in &thread_counts {
// Setup the Provider
let mut options = ProviderOptions::new();
options = *options.group_name("testprovider");
let mut provider = Provider::new("testprovider", &options);

// Register some dummy events with specific levels and keywords
let keyword = 0x01; // Example keyword
let level = 4; // Example level (Informational)
provider.register_set(eventheader::Level::Informational, keyword);

// Shared Provider and stop flag
let provider = Arc::new(provider);
let stop_flag = Arc::new(AtomicBool::new(false));

// Spawn worker threads
let mut worker_handles = Vec::new();
for _ in 0..thread_count {
let provider_clone = Arc::clone(&provider);
let stop_flag_clone = Arc::clone(&stop_flag);
worker_handles.push(thread::spawn(move || {
while !stop_flag_clone.load(Ordering::Relaxed) {
if let Some(event_set) = provider_clone.find_set(level.into(), keyword) {
event_set.enabled(); // Check if tracepoint is being listened to
}
}
}));
}

// Dereference the `Arc` once before the benchmark to reduce overhead
let provider_ref: &Provider = &provider;

// Benchmark the `find_set` method with `enabled` check
let benchmark_name = format!("provider_find_set_concurrent_{}threads", thread_count);
c.bench_function(&benchmark_name, |b| {
b.iter(|| {
if let Some(event_set) = provider_ref.find_set(level.into(), keyword) {
event_set.enabled(); // Check if tracepoint is being listened to
}
});
});

// Signal worker threads to stop
stop_flag.store(true, Ordering::Relaxed);

// Wait for all worker threads to complete
for handle in worker_handles {
let _ = handle.join();
}
}
}

criterion_group!(
benches,
benchmark_find_set_single,
benchmark_find_set_concurrent
);
criterion_main!(benches);

0 comments on commit 6f6d08e

Please sign in to comment.