From 168f81a19e5282b68bfc9803dfc9185a1dfd641f Mon Sep 17 00:00:00 2001 From: Guillume DIDIER Date: Tue, 24 Nov 2020 10:25:32 +0100 Subject: [PATCH] Final experiments Code for final experiments. --- Cargo.lock | 1 + covert_channels_benchmark/Cargo.toml | 5 +- covert_channels_benchmark/src/main.rs | 147 +++++++++++++++++++++++--- covert_channels_evaluation/src/lib.rs | 22 +++- flush_flush/src/lib.rs | 61 +++++++++++ flush_flush/src/naive.rs | 114 ++++++++++++++++++++ flush_reload/src/naive.rs | 53 ++++++---- 7 files changed, 363 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d711ae..3460145 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -114,6 +114,7 @@ version = "0.1.0" dependencies = [ "covert_channels_evaluation", "flush_flush", + "flush_reload", ] [[package]] diff --git a/covert_channels_benchmark/Cargo.toml b/covert_channels_benchmark/Cargo.toml index 578b486..0249f0c 100644 --- a/covert_channels_benchmark/Cargo.toml +++ b/covert_channels_benchmark/Cargo.toml @@ -7,5 +7,6 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -covert_channels_evaluation = {path = "../covert_channels_evaluation"} -flush_flush = {path = "../flush_flush"} +covert_channels_evaluation = { path = "../covert_channels_evaluation" } +flush_flush = { path = "../flush_flush" } +flush_reload = { path = "../flush_reload" } diff --git a/covert_channels_benchmark/src/main.rs b/covert_channels_benchmark/src/main.rs index 116e607..cace066 100644 --- a/covert_channels_benchmark/src/main.rs +++ b/covert_channels_benchmark/src/main.rs @@ -1,21 +1,140 @@ #![feature(unsafe_block_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)] -use covert_channels_evaluation::benchmark_channel; -use flush_flush::FlushAndFlush; +use std::io::{stdout, Write}; -fn main() { - for _ in 0..16 { - //let sender = 0; - //let receiver = 2; - let (channel, old, receiver, sender) = match FlushAndFlush::new_any_two_core(true) { - Err(e) => { - panic!("{:?}", e); - } - Ok(r) => r, - }; +use covert_channels_evaluation::{benchmark_channel, CovertChannel, CovertChannelBenchmarkResult}; +use flush_flush::naive::NaiveFlushAndFlush; +use flush_flush::{FlushAndFlush, SingleFlushAndFlush}; +use flush_reload::naive::NaiveFlushAndReload; - let r = benchmark_channel(channel, 1, 1 << 15); - println!("{:?}", r); +const NUM_BYTES: usize = 1 << 14; //20 + +const NUM_PAGES: usize = 1; + +const NUM_PAGES_2: usize = 4; + +const NUM_PAGE_MAX: usize = 32; + +const NUM_ITER: usize = 32; + +struct BenchmarkStats { + raw_res: Vec, + average_p: f64, + var_p: f64, + average_C: f64, + var_C: f64, + average_T: f64, + var_T: f64, +} + +fn run_benchmark( + name: &str, + constructor: impl Fn() -> T, + num_iter: usize, + num_pages: usize, +) -> BenchmarkStats { + let mut results = Vec::new(); + print!("Benchmarking {} with {} pages", name, num_pages); + for _ in 0..num_iter { + print!("."); + stdout().flush().expect("Failed to flush"); + let channel = constructor(); + let r = benchmark_channel(channel, num_pages, NUM_BYTES); + results.push(r); + } + println!(); + let mut average_p = 0.0; + let mut average_C = 0.0; + let mut average_T = 0.0; + for result in results.iter() { + println!("{:?}", result); + println!("C: {}, T: {}", result.capacity(), result.true_capacity()); + average_p += result.error_rate; + average_C += result.capacity(); + average_T += result.true_capacity() + } + average_p /= num_iter as f64; + average_C /= num_iter as f64; + average_T /= num_iter as f64; + println!( + "{} - {} Average p: {} C: {}, T: {}", + name, num_pages, average_p, average_C, average_T + ); + let mut var_p = 0.0; + let mut var_C = 0.0; + let mut var_T = 0.0; + for result in results.iter() { + let p = result.error_rate - average_p; + var_p += p * p; + let C = result.capacity() - average_C; + var_C += C * C; + let T = result.true_capacity() - average_T; + var_T += T * T; + } + var_p /= num_iter as f64; + var_C /= num_iter as f64; + var_T /= num_iter as f64; + println!( + "{} - {} Variance of p: {}, C: {}, T:{}", + name, num_pages, var_p, var_C, var_T + ); + BenchmarkStats { + raw_res: results, + average_p, + var_p, + average_C, + var_C, + average_T, + var_T, + } +} + +fn main() { + for num_pages in 1..=32 { + /*println!("Benchmarking F+F"); + for _ in 0..16 { + // TODO Use the best possible ASV, not best possible AV + let (channel, old, receiver, sender) = match SingleFlushAndFlush::new_any_two_core(true) { + Err(e) => { + panic!("{:?}", e); + } + Ok(r) => r, + }; + + let r = benchmark_channel(channel, NUM_PAGES, NUM_BYTES); + println!("{:?}", r); + println!("C: {}, T: {}", r.capacity(), r.true_capacity()); + + }*/ + + let naive_ff = run_benchmark( + "Naive F+F", + || NaiveFlushAndFlush::from_threshold(202), + NUM_ITER << 4, + num_pages, + ); + + let better_ff = run_benchmark( + "Better F+F", + || { + match FlushAndFlush::new_any_two_core(true) { + Err(e) => { + panic!("{:?}", e); + } + Ok(r) => r, + } + .0 + }, + NUM_ITER, + num_pages, + ); + + let fr = run_benchmark( + "F+R", + || NaiveFlushAndReload::from_threshold(230), + NUM_ITER, + num_pages, + ); } } diff --git a/covert_channels_evaluation/src/lib.rs b/covert_channels_evaluation/src/lib.rs index be1519c..e30d793 100644 --- a/covert_channels_evaluation/src/lib.rs +++ b/covert_channels_evaluation/src/lib.rs @@ -43,7 +43,18 @@ pub struct CovertChannelBenchmarkResult { pub num_bit_errors: usize, pub error_rate: f64, pub time_rdtsc: u64, - //pub time_seconds: todo + pub time_seconds: std::time::Duration, +} + +impl CovertChannelBenchmarkResult { + pub fn capacity(&self) -> f64 { + (self.num_bytes_transmitted * 8) as f64 / self.time_seconds.as_secs_f64() + } + + pub fn true_capacity(&self) -> f64 { + let p = self.error_rate; + self.capacity() * (1.0 + ((1.0 - p) * f64::log2(1.0 - p) + p * f64::log2(p))) + } } pub struct BitIterator<'a> { @@ -97,7 +108,7 @@ unsafe impl Send for CovertChannelParams { fn transmit_thread( num_bytes: usize, mut params: CovertChannelParams, -) -> (u64, Vec) { +) -> (u64, std::time::Instant, Vec) { let old_affinity = set_affinity(&(*params.covert_channel).helper_core()); let mut result = Vec::new(); @@ -109,6 +120,7 @@ fn transmit_thread( let mut bit_sent = 0; let mut bit_iter = BitIterator::new(&result); + let start_time = std::time::Instant::now(); let start = unsafe { rdtsc_fence() }; while !bit_iter.atEnd() { for page in params.pages.iter_mut() { @@ -121,7 +133,7 @@ fn transmit_thread( } } } - (start, result) + (start, start_time, result) } pub fn benchmark_channel( @@ -194,8 +206,9 @@ pub fn benchmark_channel( } let stop = unsafe { rdtsc_fence() }; + let stop_time = std::time::Instant::now(); let r = helper.join(); - let (start, sent_bytes) = match r { + let (start, start_time, sent_bytes) = match r { Ok(r) => r, Err(e) => panic!("Join Error: {:?#}"), }; @@ -216,6 +229,7 @@ pub fn benchmark_channel( num_bit_errors: num_bit_error, error_rate, time_rdtsc: stop - start, + time_seconds: stop_time - start_time, } } diff --git a/flush_flush/src/lib.rs b/flush_flush/src/lib.rs index cc0360c..800618d 100644 --- a/flush_flush/src/lib.rs +++ b/flush_flush/src/lib.rs @@ -516,6 +516,67 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush { unsafe impl Send for FlushAndFlush {} unsafe impl Sync for FlushAndFlush {} +impl CovertChannel for SingleFlushAndFlush { + const BIT_PER_PAGE: usize = 1; //PAGE_SHIFT - 6; // FIXME MAGIC cache line size + + unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) { + let mut offset = 0; + + let page = self.0.preferred_address[&page]; + + if let Some(b) = bits.next() { + //println!("Transmitting {} on page {:p}", b, page); + if b { + unsafe { only_reload(page) }; + } else { + unsafe { only_flush(page) }; + } + } + } + + unsafe fn receive(&self, page: *const u8) -> Vec { + let addresses: Vec<*const u8> = vec![self.0.preferred_address[&page]]; + let r = unsafe { self.0.test_impl(&mut addresses.iter(), u32::max_value()) }; + match r { + Err(e) => panic!("{:?}", e), + Ok(status_vec) => { + assert_eq!(status_vec.len(), 1); + let received = status_vec[0].1 == Hit; + return vec![received]; + } + } + } + + unsafe fn ready_page(&mut self, page: *const u8) { + let r = unsafe { self.0.calibrate(vec![page].into_iter()) }.unwrap(); + let mut best_error_rate = 1.0; + let mut best_slice = 0; + for (sp, threshold_error) in self + .0 + .thresholds + .iter() + .filter(|kv| kv.0.page == page as VPN) + { + if threshold_error.error.error_rate() < best_error_rate { + best_error_rate = threshold_error.error.error_rate(); + best_slice = sp.slice; + } + } + for i in 0..PAGE_LEN { + let addr = unsafe { page.offset(i as isize) }; + if self.0.get_slice(addr) == best_slice { + self.0.preferred_address.insert(page, addr); + let r = unsafe { + self.0 + .prepare_impl(&mut vec![addr].iter(), u32::max_value()) + } + .unwrap(); + break; + } + } + } +} + impl CovertChannel for FlushAndFlush { const BIT_PER_PAGE: usize = 1; //PAGE_SHIFT - 6; // FIXME MAGIC cache line size diff --git a/flush_flush/src/naive.rs b/flush_flush/src/naive.rs index e69de29..6f76c70 100644 --- a/flush_flush/src/naive.rs +++ b/flush_flush/src/naive.rs @@ -0,0 +1,114 @@ +use cache_side_channel::{ + CacheStatus, ChannelFatalError, CoreSpec, SideChannelError, SingleAddrCacheSideChannel, +}; +use cache_utils::calibration::{get_vpn, only_flush, only_reload, VPN}; +use cache_utils::flush; +use covert_channels_evaluation::{BitIterator, CovertChannel}; +use nix::sched::{sched_getaffinity, CpuSet}; +use nix::unistd::Pid; +use std::collections::HashMap; +use std::thread::current; + +#[derive(Debug)] +pub struct NaiveFlushAndFlush { + pub threshold: u64, + current: HashMap, +} + +impl NaiveFlushAndFlush { + pub fn from_threshold(threshold: u64) -> Self { + NaiveFlushAndFlush { + threshold, + current: Default::default(), + } + } + unsafe fn test_impl(&self, addr: *const u8) -> Result { + let vpn = get_vpn(addr); + if self.current.get(&vpn) != Some(&addr) { + return Err(SideChannelError::AddressNotReady(addr)); + } + let t = unsafe { only_flush(addr) }; + if t < self.threshold { + Ok(CacheStatus::Miss) + } else { + Ok(CacheStatus::Hit) + } + } +} + +impl SingleAddrCacheSideChannel for NaiveFlushAndFlush { + /// # Safety + /// + /// addr needs to be a valid pointer + unsafe fn test_single(&mut self, addr: *const u8) -> Result { + unsafe { self.test_impl(addr) } + } + + /// # Safety: + /// + /// addr needs to be a valid pointer + unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> { + unsafe { flush(addr) }; + let vpn = get_vpn(addr); + self.current.insert(vpn, addr); + Ok(()) + } + + fn victim_single(&mut self, operation: &dyn Fn()) { + operation() + } + + /// # Safety + /// + /// addr needs to be a valid pointer + unsafe fn calibrate_single( + &mut self, + _addresses: impl IntoIterator, + ) -> Result<(), ChannelFatalError> { + Ok(()) + } +} + +unsafe impl Send for NaiveFlushAndFlush {} +unsafe impl Sync for NaiveFlushAndFlush {} + +impl CoreSpec for NaiveFlushAndFlush { + fn main_core(&self) -> CpuSet { + sched_getaffinity(Pid::from_raw(0)).unwrap() + } + + fn helper_core(&self) -> CpuSet { + sched_getaffinity(Pid::from_raw(0)).unwrap() + } +} + +impl CovertChannel for NaiveFlushAndFlush { + const BIT_PER_PAGE: usize = 1; + + unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) { + let vpn = get_vpn(page); + let addr = self.current.get(&vpn).unwrap(); + if let Some(b) = bits.next() { + if b { + unsafe { only_reload(*addr) }; + } else { + unsafe { only_flush(*addr) }; + } + } + } + + unsafe fn receive(&self, page: *const u8) -> Vec { + let r = unsafe { self.test_impl(page) }; + match r { + Err(e) => panic!(), + Ok(status) => match status { + CacheStatus::Hit => vec![true], + CacheStatus::Miss => vec![false], + }, + } + } + + unsafe fn ready_page(&mut self, page: *const u8) { + unsafe { self.prepare_single(page) }; + } +} diff --git a/flush_reload/src/naive.rs b/flush_reload/src/naive.rs index ed1dff6..deb6f17 100644 --- a/flush_reload/src/naive.rs +++ b/flush_reload/src/naive.rs @@ -1,23 +1,38 @@ use cache_side_channel::{ CacheStatus, ChannelFatalError, CoreSpec, SideChannelError, SingleAddrCacheSideChannel, }; -use cache_utils::calibration::only_reload; +use cache_utils::calibration::{get_vpn, only_flush, only_reload, VPN}; use cache_utils::flush; use covert_channels_evaluation::{BitIterator, CovertChannel}; use nix::sched::{sched_getaffinity, CpuSet}; use nix::unistd::Pid; +use std::collections::HashMap; +use std::thread::current; #[derive(Debug)] pub struct NaiveFlushAndReload { pub threshold: u64, - current: Option<*const u8>, + current: HashMap, } impl NaiveFlushAndReload { pub fn from_threshold(threshold: u64) -> Self { NaiveFlushAndReload { threshold, - current: None, + current: Default::default(), + } + } + unsafe fn test_impl(&self, addr: *const u8) -> Result { + let vpn = get_vpn(addr); + if self.current.get(&vpn) != Some(&addr) { + return Err(SideChannelError::AddressNotReady(addr)); + } + let t = unsafe { only_reload(addr) }; + unsafe { flush(addr) }; + if t > self.threshold { + Ok(CacheStatus::Miss) + } else { + Ok(CacheStatus::Hit) } } } @@ -27,15 +42,7 @@ impl SingleAddrCacheSideChannel for NaiveFlushAndReload { /// /// addr needs to be a valid pointer unsafe fn test_single(&mut self, addr: *const u8) -> Result { - if self.current != Some(addr) { - return Err(SideChannelError::AddressNotReady(addr)); - } - let t = unsafe { only_reload(addr) }; - if t > self.threshold { - Ok(CacheStatus::Miss) - } else { - Ok(CacheStatus::Hit) - } + unsafe { self.test_impl(addr) } } /// # Safety: @@ -43,7 +50,8 @@ impl SingleAddrCacheSideChannel for NaiveFlushAndReload { /// addr needs to be a valid pointer unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> { unsafe { flush(addr) }; - self.current = Some(addr); + let vpn = get_vpn(addr); + self.current.insert(vpn, addr); Ok(()) } @@ -79,24 +87,29 @@ impl CovertChannel for NaiveFlushAndReload { const BIT_PER_PAGE: usize = 1; unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) { - unimplemented!() + let vpn = get_vpn(page); + let addr = self.current.get(&vpn).unwrap(); + if let Some(b) = bits.next() { + if b { + unsafe { only_reload(*addr) }; + } else { + unsafe { only_flush(*addr) }; + } + } } unsafe fn receive(&self, page: *const u8) -> Vec { - unimplemented!() - /* - let r = self.test_single(page); + let r = unsafe { self.test_impl(page) }; match r { - Err(e) => unimplemented!(), + Err(e) => panic!(), Ok(status) => match status { CacheStatus::Hit => vec![true], CacheStatus::Miss => vec![false], }, } - */ } unsafe fn ready_page(&mut self, page: *const u8) { - unimplemented!() + unsafe { self.prepare_single(page) }; } }