diff --git a/Cargo.lock b/Cargo.lock index 0949ddb..6aff976 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,6 +119,8 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" name = "covert_channels_benchmark" version = "0.1.0" dependencies = [ + "basic_timing_cache_channel", + "cache_utils", "covert_channels_evaluation", "flush_flush", "flush_reload", diff --git a/basic_timing_cache_channel/src/lib.rs b/basic_timing_cache_channel/src/lib.rs index 019a21e..96bb332 100644 --- a/basic_timing_cache_channel/src/lib.rs +++ b/basic_timing_cache_channel/src/lib.rs @@ -9,7 +9,7 @@ // Should be used by F+F and non Naive F+R -use crate::naive::NaiveTimingChannelHandle; +//use crate::naive::NaiveTimingChannelHandle; use cache_side_channel::SideChannelError::AddressNotReady; use cache_side_channel::{ CacheStatus, ChannelFatalError, ChannelHandle, CoreSpec, MultipleAddrCacheSideChannel, diff --git a/covert_channels_benchmark/Cargo.toml b/covert_channels_benchmark/Cargo.toml index 7ce0f33..03bad90 100644 --- a/covert_channels_benchmark/Cargo.toml +++ b/covert_channels_benchmark/Cargo.toml @@ -11,5 +11,7 @@ covert_channels_evaluation = { path = "../covert_channels_evaluation" } flush_flush = { path = "../flush_flush" } flush_reload = { path = "../flush_reload" } nix = "0.18.0" +basic_timing_cache_channel = { path = "../basic_timing_cache_channel" } +cache_utils = { path = "../cache_utils" } diff --git a/covert_channels_benchmark/src/bin/main-naive.rs b/covert_channels_benchmark/src/bin/main-naive.rs index c556834..814bed4 100644 --- a/covert_channels_benchmark/src/bin/main-naive.rs +++ b/covert_channels_benchmark/src/bin/main-naive.rs @@ -1,6 +1,6 @@ #![feature(unsafe_block_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)] - +/* use std::io::{stdout, Write}; use covert_channels_evaluation::{benchmark_channel, CovertChannel, CovertChannelBenchmarkResult}; @@ -113,8 +113,9 @@ fn run_benchmark( var_T, } } - +*/ fn main() { + /* let old = sched_getaffinity(Pid::from_raw(0)).unwrap(); println!( "Detailed:Benchmark,Pages,{},C,T", @@ -147,4 +148,6 @@ fn main() { old, ); } + + */ } diff --git a/covert_channels_benchmark/src/main.rs b/covert_channels_benchmark/src/main.rs index cf778f9..723b3a6 100644 --- a/covert_channels_benchmark/src/main.rs +++ b/covert_channels_benchmark/src/main.rs @@ -3,12 +3,17 @@ use std::io::{stdout, Write}; +//use basic_timing_cache_channel::{naive::NaiveTimingChannel, TopologyAwareTimingChannel}; +use basic_timing_cache_channel::{TopologyAwareError, TopologyAwareTimingChannel}; +use cache_utils::calibration::Threshold; use covert_channels_evaluation::{benchmark_channel, CovertChannel, CovertChannelBenchmarkResult}; use flush_flush::naive::NaiveFlushAndFlush; -use flush_flush::{FlushAndFlush, SingleFlushAndFlush}; -use flush_reload::naive::NaiveFlushAndReload; +use flush_flush::{FFPrimitives, FlushAndFlush, SingleFlushAndFlush}; +use flush_reload::naive::{NaiveFRPrimitives, NaiveFlushAndReload}; +use nix::sched::{sched_getaffinity, CpuSet}; +use nix::unistd::Pid; -const NUM_BYTES: usize = 1 << 14; //20 +const NUM_BYTES: usize = 1 << 12; const NUM_PAGES: usize = 1; @@ -16,10 +21,10 @@ const NUM_PAGES_2: usize = 4; const NUM_PAGE_MAX: usize = 32; -const NUM_ITER: usize = 32; +const NUM_ITER: usize = 16; struct BenchmarkStats { - raw_res: Vec, + raw_res: Vec<(CovertChannelBenchmarkResult, usize, usize)>, average_p: f64, var_p: f64, average_C: f64, @@ -30,34 +35,59 @@ struct BenchmarkStats { fn run_benchmark( name: &str, - constructor: impl Fn() -> T, + constructor: impl Fn(usize, usize) -> (T, usize, usize), num_iter: usize, num_pages: usize, + old: CpuSet, ) -> BenchmarkStats { let mut results = Vec::new(); print!("Benchmarking {} with {} pages", name, num_pages); - for _ in 0..num_iter { - print!("."); - stdout().flush().expect("Failed to flush"); - let channel = constructor(); - let r = benchmark_channel(channel, num_pages, NUM_BYTES); - results.push(r); + let mut count = 0; + for i in 0..CpuSet::count() { + for j in 0..CpuSet::count() { + if old.is_set(i).unwrap() && old.is_set(j).unwrap() && i != j { + for _ in 0..num_iter { + count += 1; + print!("."); + stdout().flush().expect("Failed to flush"); + let (channel, main_core, helper_core) = constructor(i, j); + let r = benchmark_channel(channel, num_pages, NUM_BYTES); + results.push((r, main_core, helper_core)); + } + } + } } println!(); let mut average_p = 0.0; let mut average_C = 0.0; let mut average_T = 0.0; for result in results.iter() { - println!("{:?}", result); - println!("C: {}, T: {}", result.capacity(), result.true_capacity()); - println!("Detailed:\"{}\",{},{},{},{}", name, num_pages, result.csv(), result.capacity(), result.true_capacity()); - average_p += result.error_rate; - average_C += result.capacity(); - average_T += result.true_capacity() + println!( + "main: {} helper: {} result: {:?}", + result.1, result.2, result.0 + ); + println!( + "C: {}, T: {}", + result.0.capacity(), + result.0.true_capacity() + ); + println!( + "Detailed:\"{}\",{},{},{},{},{},{}", + name, + num_pages, + result.1, + result.2, + result.0.csv(), + result.0.capacity(), + result.0.true_capacity() + ); + average_p += result.0.error_rate; + average_C += result.0.capacity(); + average_T += result.0.true_capacity() } - average_p /= num_iter as f64; - average_C /= num_iter as f64; - average_T /= num_iter as f64; + average_p /= count as f64; + average_C /= count as f64; + average_T /= count as f64; println!( "{} - {} Average p: {} C: {}, T: {}", name, num_pages, average_p, average_C, average_T @@ -66,21 +96,25 @@ fn run_benchmark( let mut var_C = 0.0; let mut var_T = 0.0; for result in results.iter() { - let p = result.error_rate - average_p; + let p = result.0.error_rate - average_p; var_p += p * p; - let C = result.capacity() - average_C; + let C = result.0.capacity() - average_C; var_C += C * C; - let T = result.true_capacity() - average_T; + let T = result.0.true_capacity() - average_T; var_T += T * T; } - var_p /= num_iter as f64; - var_C /= num_iter as f64; - var_T /= num_iter as f64; + var_p /= count as f64; + var_C /= count as f64; + var_T /= count as f64; println!( "{} - {} Variance of p: {}, C: {}, T:{}", name, num_pages, var_p, var_C, var_T ); - println!("CSV:\"{}\",{},{},{},{},{},{},{}",name,num_pages,average_p, average_C, average_T, var_p, var_C, var_T); + println!( + "CSV:\"{}\",{},{},{},{},{},{},{}", + name, num_pages, average_p, average_C, average_T, var_p, var_C, var_T + ); + BenchmarkStats { raw_res: results, average_p, @@ -93,9 +127,72 @@ fn run_benchmark( } fn main() { - println!("Detailed:Benchmark,Pages,{},C,T",CovertChannelBenchmarkResult::csv_header()); - println!("CSV:Benchmark,Pages,p,C,T,var_p,var_C,var_T"); + let old = sched_getaffinity(Pid::from_raw(0)).unwrap(); + println!( + "Detailed:Benchmark,Pages,main_core,helper_core,{},C,T", + CovertChannelBenchmarkResult::csv_header() + ); + println!("CSV:Benchmark,Pages,main_core,helper_core,p,C,T,var_p,var_C,var_T"); + for num_pages in 1..=32 { + let naive_ff = run_benchmark( + "Naive F+F", + |i, j| { + let mut r = NaiveFlushAndFlush::new( + Threshold { + bucket_index: 202, + miss_faster_than_hit: true, + }, + FFPrimitives {}, + ); + r.set_cores(i, j); + (r, i, j) + }, + NUM_ITER, + num_pages, + old, + ); + + let fr = run_benchmark( + "F+R", + |i, j| { + let mut r = NaiveFlushAndReload::new( + Threshold { + bucket_index: 250, + miss_faster_than_hit: false, + }, + NaiveFRPrimitives {}, + ); + r.set_cores(i, j); + (r, i, j) + }, + NUM_ITER, + num_pages, + old, + ); + + let ff = run_benchmark( + "Better F+F", + |i, j| { + let (mut r, i, j) = match FlushAndFlush::new_any_two_core(true, FFPrimitives {}) { + Ok((channel, _old, main_core, helper_core)) => { + (channel, main_core, helper_core) + } + Err(e) => { + panic!("{:?}", e); + } + }; + (r, i, j) + }, + 1, + num_pages, + old, + ); + } +} +/* +fn main() { + for num_pages in 1..=32 { /*println!("Benchmarking F+F"); for _ in 0..16 { // TODO Use the best possible ASV, not best possible AV @@ -142,3 +239,4 @@ fn main() { ); } } +*/