diff --git a/.idea/DendrobatesTinctoriusAzureus.iml b/.idea/DendrobatesTinctoriusAzureus.iml
index 1f78c4a..e13e218 100644
--- a/.idea/DendrobatesTinctoriusAzureus.iml
+++ b/.idea/DendrobatesTinctoriusAzureus.iml
@@ -38,6 +38,7 @@
+
diff --git a/Cargo.lock b/Cargo.lock
index b5872b0..4d711ae 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -76,6 +76,9 @@ dependencies = [
[[package]]
name = "cache_side_channel"
version = "0.1.0"
+dependencies = [
+ "nix",
+]
[[package]]
name = "cache_utils"
@@ -105,12 +108,22 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+[[package]]
+name = "covert_channels_benchmark"
+version = "0.1.0"
+dependencies = [
+ "covert_channels_evaluation",
+ "flush_flush",
+]
+
[[package]]
name = "covert_channels_evaluation"
version = "0.1.0"
dependencies = [
"bit_field 0.10.1",
+ "cache_side_channel",
"cache_utils",
+ "nix",
"rand",
"turn_lock",
]
@@ -149,6 +162,7 @@ version = "0.1.0"
dependencies = [
"cache_side_channel",
"cache_utils",
+ "covert_channels_evaluation",
"nix",
]
@@ -158,6 +172,8 @@ version = "0.1.0"
dependencies = [
"cache_side_channel",
"cache_utils",
+ "covert_channels_evaluation",
+ "nix",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index f6756fc..548d148 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,6 +6,7 @@ members = [
"cache_utils",
"cpuid",
"aes-t-tables",
+ "covert_channels_benchmark",
"covert_channels_evaluation",
"cache_side_channel",
"flush_reload",
diff --git a/aes-t-tables/src/lib.rs b/aes-t-tables/src/lib.rs
index 23e6ca3..f3688ed 100644
--- a/aes-t-tables/src/lib.rs
+++ b/aes-t-tables/src/lib.rs
@@ -6,7 +6,7 @@ use openssl::aes;
use crate::CacheStatus::Miss;
use cache_side_channel::table_side_channel::TableCacheSideChannel;
-use cache_side_channel::CacheStatus;
+use cache_side_channel::{restore_affinity, set_affinity, CacheStatus};
use memmap2::Mmap;
use openssl::aes::aes_ige;
use openssl::symm::Mode;
@@ -58,6 +58,8 @@ pub unsafe fn attack_t_tables_poc(
side_channel: &mut impl TableCacheSideChannel,
parameters: AESTTableParams,
) {
+ let old_affinity = set_affinity(&side_channel.main_core());
+
// Note : This function doesn't handle the case where the address space is not shared. (Additionally you have the issue of complicated eviction sets due to complex addressing)
// TODO
@@ -133,4 +135,6 @@ pub unsafe fn attack_t_tables_poc(
}
println!();
}
+
+ restore_affinity(&old_affinity);
}
diff --git a/aes-t-tables/src/main.rs b/aes-t-tables/src/main.rs
index 8aa3ba4..f9d7971 100644
--- a/aes-t-tables/src/main.rs
+++ b/aes-t-tables/src/main.rs
@@ -25,53 +25,62 @@ fn main() {
let openssl_path = Path::new(env!("OPENSSL_DIR")).join("lib/libcrypto.so");
let mut side_channel = NaiveFlushAndReload::from_threshold(220);
let te = TE_CITRON_VERT;
- unsafe {
- attack_t_tables_poc(
- &mut side_channel,
- AESTTableParams {
- num_encryptions: 1 << 12,
- key: [0; 32],
- te: te, // adjust me (should be in decreasing order)
- openssl_path: &openssl_path,
- },
- )
- }; /**/
- unsafe {
- attack_t_tables_poc(
- &mut side_channel,
- AESTTableParams {
- num_encryptions: 1 << 12,
- key: KEY2,
- te: te,
- openssl_path: &openssl_path,
- },
- )
- };
- let (mut side_channel_ff, old, core) = FlushAndFlush::new_any_single_core().unwrap();
- unsafe {
- attack_t_tables_poc(
- &mut side_channel_ff,
- AESTTableParams {
- num_encryptions: 1 << 12,
- key: [0; 32],
- te: te, // adjust me (should be in decreasing order)
- openssl_path: &openssl_path,
- },
- )
- };
+ for i in 0..4 {
+ println!("AES attack with Naive F+R, key 0");
+ unsafe {
+ attack_t_tables_poc(
+ &mut side_channel,
+ AESTTableParams {
+ num_encryptions: 1 << 12,
+ key: [0; 32],
+ te: te, // adjust me (should be in decreasing order)
+ openssl_path: &openssl_path,
+ },
+ )
+ };
+ println!("AES attack with Naive F+R, key 1");
+ unsafe {
+ attack_t_tables_poc(
+ &mut side_channel,
+ AESTTableParams {
+ num_encryptions: 1 << 12,
+ key: KEY2,
+ te: te,
+ openssl_path: &openssl_path,
+ },
+ )
+ };
+ println!("AES attack with Multiple F+F (limit = 3), key 0");
+ {
+ let (mut side_channel_ff, old, core) = FlushAndFlush::new_any_single_core().unwrap();
+ unsafe {
+ attack_t_tables_poc(
+ &mut side_channel_ff,
+ AESTTableParams {
+ num_encryptions: 1 << 12,
+ key: [0; 32],
+ te: te, // adjust me (should be in decreasing order)
+ openssl_path: &openssl_path,
+ },
+ )
+ };
+ }
- sched_setaffinity(Pid::from_raw(0), &old);
- let (mut side_channel_ff, old, core) = SingleFlushAndFlush::new_any_single_core().unwrap();
- unsafe {
- attack_t_tables_poc(
- &mut side_channel_ff,
- AESTTableParams {
- num_encryptions: 1 << 12,
- key: KEY2,
- te: te, // adjust me (should be in decreasing order)
- openssl_path: &openssl_path,
- },
- )
- };
- sched_setaffinity(Pid::from_raw(0), &old);
+ println!("AES attack with Single F+F , key 1");
+ {
+ let (mut side_channel_ff, old, core) =
+ SingleFlushAndFlush::new_any_single_core().unwrap();
+ unsafe {
+ attack_t_tables_poc(
+ &mut side_channel_ff,
+ AESTTableParams {
+ num_encryptions: 1 << 12,
+ key: KEY2,
+ te: te, // adjust me (should be in decreasing order)
+ openssl_path: &openssl_path,
+ },
+ )
+ }
+ }
+ }
}
diff --git a/cache_side_channel/Cargo.toml b/cache_side_channel/Cargo.toml
index 4a75860..551e884 100644
--- a/cache_side_channel/Cargo.toml
+++ b/cache_side_channel/Cargo.toml
@@ -7,3 +7,4 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
+nix = "0.18.0"
diff --git a/cache_side_channel/src/lib.rs b/cache_side_channel/src/lib.rs
index 2040155..220fa06 100644
--- a/cache_side_channel/src/lib.rs
+++ b/cache_side_channel/src/lib.rs
@@ -2,6 +2,8 @@
#![feature(unsafe_block_in_unsafe_fn)]
#![deny(unsafe_op_in_unsafe_fn)]
+use nix::sched::{sched_getaffinity, sched_setaffinity, CpuSet};
+use nix::unistd::Pid;
use std::fmt::Debug;
pub mod table_side_channel;
@@ -17,6 +19,7 @@ pub enum ChannelFatalError {
Oops,
}
+#[derive(Debug)]
pub enum SideChannelError {
NeedRecalibration,
FatalError(ChannelFatalError),
@@ -24,7 +27,23 @@ pub enum SideChannelError {
AddressNotCalibrated(*const u8),
}
-pub trait SingleAddrCacheSideChannel: Debug {
+pub trait CoreSpec {
+ fn main_core(&self) -> CpuSet;
+ fn helper_core(&self) -> CpuSet;
+}
+
+pub fn restore_affinity(cpu_set: &CpuSet) {
+ sched_setaffinity(Pid::from_raw(0), &cpu_set).unwrap();
+}
+
+#[must_use = "This result must be used to restore affinity"]
+pub fn set_affinity(cpu_set: &CpuSet) -> CpuSet {
+ let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
+ sched_setaffinity(Pid::from_raw(0), &cpu_set).unwrap();
+ old
+}
+
+pub trait SingleAddrCacheSideChannel: CoreSpec + Debug {
//type SingleChannelFatalError: Debug;
/// # Safety
///
@@ -44,7 +63,7 @@ pub trait SingleAddrCacheSideChannel: Debug {
) -> Result<(), ChannelFatalError>;
}
-pub trait MultipleAddrCacheSideChannel: Debug {
+pub trait MultipleAddrCacheSideChannel: CoreSpec + Debug {
const MAX_ADDR: u32;
/// # Safety
///
diff --git a/cache_side_channel/src/table_side_channel.rs b/cache_side_channel/src/table_side_channel.rs
index 5538f50..c2dcbd9 100644
--- a/cache_side_channel/src/table_side_channel.rs
+++ b/cache_side_channel/src/table_side_channel.rs
@@ -1,9 +1,10 @@
use crate::{
- CacheStatus, ChannelFatalError, MultipleAddrCacheSideChannel, SideChannelError,
+ CacheStatus, ChannelFatalError, CoreSpec, MultipleAddrCacheSideChannel, SideChannelError,
SingleAddrCacheSideChannel,
};
use std::collections::HashMap;
+use std::fmt::Debug;
pub struct TableAttackResult {
pub addr: *const u8,
@@ -20,7 +21,7 @@ impl TableAttackResult {
}
}
-pub trait TableCacheSideChannel {
+pub trait TableCacheSideChannel: CoreSpec + Debug {
//type ChannelFatalError: Debug;
/// # Safety
///
diff --git a/cache_utils/src/bin/two_thread_cal.rs b/cache_utils/src/bin/two_thread_cal.rs
index 4baa27e..65a52e3 100644
--- a/cache_utils/src/bin/two_thread_cal.rs
+++ b/cache_utils/src/bin/two_thread_cal.rs
@@ -252,7 +252,6 @@ fn main() {
let new_analysis: Result, nix::Error> =
calibration_result_to_ASVP(
r,
- pointer,
|cal_1t_res| {
ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
cal_1t_res, hit_index, miss_index,
@@ -645,8 +644,8 @@ fn main() {
// Print header
println!(
"AVAnalysis:Attacker,Victim,{},{}",
- error_header("AVSP_Best_AV_"),
- error_header("AV_Best_AV_")
+ error_header("AVSP_AVAverage_"),
+ error_header("AV_AVAverage_")
);
//print lines
@@ -668,9 +667,9 @@ fn main() {
println!(
"AttackerAnalysis:Attacker,{},{},{}",
- error_header("AVSP_Best_A_"),
- error_header("ASP_Best_A_"),
- error_header("AV_Best_A_"),
+ error_header("AVSP_AAverage_"),
+ error_header("ASP_AAverage_"),
+ error_header("AV_AAverage_"),
);
for attacker in keys {
@@ -682,15 +681,4 @@ fn main() {
AV = format_error(&av_best_a_erros[&attacker].0)
);
}
-
- /*
- println!(
- "analysis result: {:?}",
- asvp_threshold_errors.keys().copied().collect::>()
- );
- println!("Global Analysis: {:#?}", global_threshold_errors);
- println!(
- "Global thrshold total error rate :{}",
- global_threshold_errors.error.error_rate()
- );*/
}
diff --git a/cache_utils/src/calibration.rs b/cache_utils/src/calibration.rs
index 99c52a9..1bed49e 100644
--- a/cache_utils/src/calibration.rs
+++ b/cache_utils/src/calibration.rs
@@ -112,7 +112,8 @@ pub unsafe fn l3_and_reload(p: *const u8) -> u64 {
only_reload(p)
}
-pub const PAGE_LEN: usize = 1 << 12;
+pub const PAGE_SHIFT: usize = 12;
+pub const PAGE_LEN: usize = 1 << PAGE_SHIFT;
pub fn get_vpn(p: *const T) -> usize {
(p as usize) & (!(PAGE_LEN - 1)) // FIXME
@@ -246,6 +247,7 @@ pub fn calibrate_flush(
#[derive(Debug)]
pub struct CalibrateResult {
+ pub page: VPN,
pub offset: isize,
pub histogram: Vec>,
pub median: Vec,
@@ -383,6 +385,7 @@ fn calibrate_impl_fixed_freq(
// TODO add some useful impl to CalibrateResults
let mut calibrate_result = CalibrateResult {
+ page: get_vpn(pointer),
offset: i,
histogram: Vec::new(),
median: vec![0; operations.len()],
@@ -646,12 +649,12 @@ fn calibrate_fixed_freq_2_thread_impl>(
"Calibration for main_core {}, helper {}.",
main_core, helper_core
);
- }
- eprintln!(
- "Calibration for main_core {}, helper {}.",
- main_core, helper_core
- );
+ eprintln!(
+ "Calibration for main_core {}, helper {}.",
+ main_core, helper_core
+ );
+ }
let mut core = CpuSet::new();
match core.set(main_core) {
@@ -716,6 +719,7 @@ fn calibrate_fixed_freq_2_thread_impl>(
// TODO add some useful impl to CalibrateResults
let mut calibrate_result = CalibrateResult {
+ page: get_vpn(pointer),
offset: i,
histogram: Vec::new(),
median: vec![0; operations.len()],
@@ -1574,7 +1578,6 @@ impl Threshold {
pub fn calibration_result_to_ASVP T>(
results: Vec,
- base: *const u8,
analysis: Analysis,
slicing: &impl Fn(usize) -> u8,
) -> Result, nix::Error> {
@@ -1587,8 +1590,8 @@ pub fn calibration_result_to_ASVP T>(
Ok(calibrate_1t_results) => {
for result_1t in calibrate_1t_results {
let offset = result_1t.offset;
- let addr = unsafe { base.offset(offset) };
- let page = get_vpn(addr); //TODO
+ let page = result_1t.page;
+ let addr = page + offset as usize;
let slice = slicing(addr as usize);
let analysed = analysis(result_1t);
let asvp = ASVP {
diff --git a/cache_utils/src/mmap.rs b/cache_utils/src/mmap.rs
index 7727c60..d2ed871 100644
--- a/cache_utils/src/mmap.rs
+++ b/cache_utils/src/mmap.rs
@@ -26,7 +26,7 @@ pub struct MMappedMemory {
}
impl MMappedMemory {
- pub fn try_new(size: usize) -> Result, nix::Error> {
+ pub fn try_new(size: usize, huge: bool) -> Result, nix::Error> {
assert_ne!(size_of::(), 0);
if let Some(p) = unsafe {
let p = mman::mmap(
@@ -35,11 +35,16 @@ impl MMappedMemory {
mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE,
mman::MapFlags::MAP_PRIVATE
| mman::MapFlags::MAP_ANONYMOUS
- | mman::MapFlags::MAP_HUGETLB,
+ | if huge {
+ mman::MapFlags::MAP_HUGETLB
+ } else {
+ mman::MapFlags::MAP_ANONYMOUS
+ },
-1,
0,
)?;
- Unique::new(p as *mut T)
+ let pointer_T = p as *mut T;
+ Unique::new(pointer_T)
} {
Ok(MMappedMemory { pointer: p, size })
} else {
@@ -47,8 +52,8 @@ impl MMappedMemory {
}
}
- pub fn new(size: usize) -> MMappedMemory {
- Self::try_new(size).unwrap()
+ pub fn new(size: usize, huge: bool) -> MMappedMemory {
+ Self::try_new(size, huge).unwrap()
}
pub fn slice(&self) -> &[T] {
diff --git a/covert_channels_benchmark/.cargo/config b/covert_channels_benchmark/.cargo/config
new file mode 100644
index 0000000..2f05654
--- /dev/null
+++ b/covert_channels_benchmark/.cargo/config
@@ -0,0 +1,2 @@
+[build]
+target = "x86_64-unknown-linux-gnu"
diff --git a/covert_channels_benchmark/Cargo.toml b/covert_channels_benchmark/Cargo.toml
new file mode 100644
index 0000000..578b486
--- /dev/null
+++ b/covert_channels_benchmark/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "covert_channels_benchmark"
+version = "0.1.0"
+authors = ["Guillume DIDIER "]
+edition = "2018"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+covert_channels_evaluation = {path = "../covert_channels_evaluation"}
+flush_flush = {path = "../flush_flush"}
diff --git a/covert_channels_benchmark/src/main.rs b/covert_channels_benchmark/src/main.rs
new file mode 100644
index 0000000..116e607
--- /dev/null
+++ b/covert_channels_benchmark/src/main.rs
@@ -0,0 +1,21 @@
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use covert_channels_evaluation::benchmark_channel;
+use flush_flush::FlushAndFlush;
+
+fn main() {
+ for _ in 0..16 {
+ //let sender = 0;
+ //let receiver = 2;
+ let (channel, old, receiver, sender) = match FlushAndFlush::new_any_two_core(true) {
+ Err(e) => {
+ panic!("{:?}", e);
+ }
+ Ok(r) => r,
+ };
+
+ let r = benchmark_channel(channel, 1, 1 << 15);
+ println!("{:?}", r);
+ }
+}
diff --git a/covert_channels_evaluation/Cargo.toml b/covert_channels_evaluation/Cargo.toml
index 83c16a7..09bc917 100644
--- a/covert_channels_evaluation/Cargo.toml
+++ b/covert_channels_evaluation/Cargo.toml
@@ -11,3 +11,5 @@ rand = "0.7.3"
bit_field = "0.10.1"
turn_lock = { path = "../turn_lock" }
cache_utils = { path = "../cache_utils" }
+nix = "0.18.0"
+cache_side_channel = { path = "../cache_side_channel" }
diff --git a/covert_channels_evaluation/src/lib.rs b/covert_channels_evaluation/src/lib.rs
index 1bad8ce..be1519c 100644
--- a/covert_channels_evaluation/src/lib.rs
+++ b/covert_channels_evaluation/src/lib.rs
@@ -16,22 +16,28 @@ const PAGE_SIZE: usize = 1 << 12; // FIXME Magic
// Alos time in order to determine duration, in rdtsc and seconds.
use bit_field::BitField;
+use cache_side_channel::{restore_affinity, set_affinity, CoreSpec};
use cache_utils::mmap::MMappedMemory;
use cache_utils::rdtsc_fence;
+use nix::sched::sched_getaffinity;
+use nix::unistd::Pid;
use std::any::Any;
use std::collections::VecDeque;
+use std::fmt::Debug;
use std::sync::Arc;
use std::thread;
/**
* Safety considerations : Not ensure thread safety, need proper locking as needed.
*/
-pub trait CovertChannel: Send + Sync {
+pub trait CovertChannel: Send + Sync + CoreSpec + Debug {
const BIT_PER_PAGE: usize;
unsafe fn transmit(&self, page: *const u8, bits: &mut BitIterator);
unsafe fn receive(&self, page: *const u8) -> Vec;
+ unsafe fn ready_page(&mut self, page: *const u8);
}
+#[derive(Debug)]
pub struct CovertChannelBenchmarkResult {
pub num_bytes_transmitted: usize,
pub num_bit_errors: usize,
@@ -84,7 +90,6 @@ struct CovertChannelPage {
struct CovertChannelParams {
pages: Vec,
covert_channel: Arc,
- transmit_core: usize,
}
unsafe impl Send for CovertChannelParams {}
@@ -93,45 +98,58 @@ fn transmit_thread(
num_bytes: usize,
mut params: CovertChannelParams,
) -> (u64, Vec) {
+ let old_affinity = set_affinity(&(*params.covert_channel).helper_core());
+
let mut result = Vec::new();
result.reserve(num_bytes);
for _ in 0..num_bytes {
let byte = rand::random();
result.push(byte);
}
+
+ let mut bit_sent = 0;
let mut bit_iter = BitIterator::new(&result);
let start = unsafe { rdtsc_fence() };
while !bit_iter.atEnd() {
for page in params.pages.iter_mut() {
page.turn.wait();
unsafe { params.covert_channel.transmit(page.addr, &mut bit_iter) };
+ bit_sent += T::BIT_PER_PAGE;
page.turn.next();
+ if bit_iter.atEnd() {
+ break;
+ }
}
}
-
(start, result)
}
pub fn benchmark_channel(
- channel: T,
+ mut channel: T,
num_pages: usize,
num_bytes: usize,
- transmit_core: usize,
- receive_core: usize,
) -> CovertChannelBenchmarkResult {
// Allocate pages
+
+ let old_affinity = set_affinity(&channel.main_core());
+
let size = num_pages * PAGE_SIZE;
- let m = MMappedMemory::new(size);
+ let mut m = MMappedMemory::new(size, false);
let mut pages_transmit = Vec::new();
let mut pages_receive = Vec::new();
+ for i in 0..num_pages {
+ m.slice_mut()[i * PAGE_SIZE] = i as u8;
+ }
let array: &[u8] = m.slice();
for i in 0..num_pages {
let addr = &array[i * PAGE_SIZE] as *const u8;
let mut turns = TurnLock::new(2);
let mut t_iter = turns.drain(0..);
- let receive_lock = t_iter.next().unwrap();
let transmit_lock = t_iter.next().unwrap();
+ let receive_lock = t_iter.next().unwrap();
+
assert!(t_iter.next().is_none());
+ unsafe { channel.ready_page(addr) };
pages_transmit.push(CovertChannelPage {
turn: transmit_lock,
addr,
@@ -141,17 +159,13 @@ pub fn benchmark_channel(
addr,
});
}
+
let covert_channel_arc = Arc::new(channel);
let params = CovertChannelParams {
pages: pages_transmit,
covert_channel: covert_channel_arc.clone(),
- transmit_core,
};
- if transmit_core == receive_core {
- unimplemented!()
- }
-
let helper = thread::spawn(move || transmit_thread(num_bytes, params));
// Create the thread parameters
let mut received_bytes: Vec = Vec::new();
@@ -171,10 +185,14 @@ pub fn benchmark_channel(
}
received_bytes.push(byte);
}
+ if received_bytes.len() >= num_bytes {
+ break;
+ }
}
// TODO
// receiver thread
}
+
let stop = unsafe { rdtsc_fence() };
let r = helper.join();
let (start, sent_bytes) = match r {
@@ -184,13 +202,15 @@ pub fn benchmark_channel(
assert_eq!(sent_bytes.len(), received_bytes.len());
assert_eq!(num_bytes, received_bytes.len());
+ restore_affinity(&old_affinity);
+
let mut num_bit_error = 0;
for i in 0..num_bytes {
num_bit_error += (sent_bytes[i] ^ received_bytes[i]).count_ones() as usize;
}
let error_rate = (num_bit_error as f64) / ((num_bytes * u8::BIT_LENGTH) as f64);
- // Create transmit thread
+
CovertChannelBenchmarkResult {
num_bytes_transmitted: num_bytes,
num_bit_errors: num_bit_error,
diff --git a/flush_flush/Cargo.toml b/flush_flush/Cargo.toml
index 66fee6e..41fad20 100644
--- a/flush_flush/Cargo.toml
+++ b/flush_flush/Cargo.toml
@@ -10,3 +10,4 @@ edition = "2018"
cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
nix = "0.18.0"
+covert_channels_evaluation = {path = "../covert_channels_evaluation"}
diff --git a/flush_flush/src/lib.rs b/flush_flush/src/lib.rs
index c0765a9..cc0360c 100644
--- a/flush_flush/src/lib.rs
+++ b/flush_flush/src/lib.rs
@@ -5,13 +5,14 @@ pub mod naive;
use cache_side_channel::SideChannelError::{AddressNotCalibrated, AddressNotReady};
use cache_side_channel::{
- CacheStatus, ChannelFatalError, MultipleAddrCacheSideChannel, SideChannelError,
+ CacheStatus, ChannelFatalError, CoreSpec, MultipleAddrCacheSideChannel, SideChannelError,
SingleAddrCacheSideChannel,
};
use cache_utils::calibration::{
- calibrate_fixed_freq_2_thread, get_cache_slicing, get_vpn, only_flush, CalibrateOperation2T,
- CalibrationOptions, HistParams, Verbosity, CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE,
- CFLUSH_NUM_ITER, PAGE_LEN,
+ accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, get_cache_slicing,
+ get_vpn, only_flush, only_reload, CalibrateOperation2T, CalibrationOptions, ErrorPredictions,
+ HistParams, HistogramCumSum, PotentialThresholds, Verbosity, ASVP, CFLUSH_BUCKET_NUMBER,
+ CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER, PAGE_LEN, PAGE_SHIFT,
};
use cache_utils::calibration::{ErrorPrediction, Slice, Threshold, ThresholdError, AV, SP, VPN};
use cache_utils::complex_addressing::CacheSlicing;
@@ -28,11 +29,13 @@ pub struct FlushAndFlush {
slicing: CacheSlicing,
attacker_core: usize,
victim_core: usize,
+ preferred_address: HashMap<*const u8, *const u8>,
}
#[derive(Debug)]
pub enum FlushAndFlushError {
NoSlicing,
+ Nix(nix::Error),
}
#[derive(Debug)]
@@ -56,6 +59,16 @@ impl SingleFlushAndFlush {
}
}
+impl CoreSpec for SingleFlushAndFlush {
+ fn main_core(&self) -> CpuSet {
+ self.0.main_core()
+ }
+
+ fn helper_core(&self) -> CpuSet {
+ self.0.helper_core()
+ }
+}
+
impl SingleAddrCacheSideChannel for SingleFlushAndFlush {
unsafe fn test_single(&mut self, addr: *const u8) -> Result {
unsafe { self.0.test_single(addr) }
@@ -90,6 +103,7 @@ impl FlushAndFlush {
slicing,
attacker_core,
victim_core,
+ preferred_address: Default::default(),
};
Ok(ret)
} else {
@@ -126,6 +140,14 @@ impl FlushAndFlush {
let mut calibrate_results2t_vec = Vec::new();
+ let slicing = match get_cache_slicing(core_per_socket) {
+ Some(s) => s,
+ None => {
+ return Err(FlushAndFlushError::NoSlicing);
+ }
+ };
+ let h = |addr: usize| slicing.hash(addr).unwrap();
+
for page in pages {
// FIXME Cache line size is magic
let mut r = unsafe {
@@ -139,7 +161,7 @@ impl FlushAndFlush {
hist_params: HistParams {
bucket_number: CFLUSH_BUCKET_NUMBER,
bucket_size: CFLUSH_BUCKET_SIZE,
- iterations: CFLUSH_NUM_ITER << 1,
+ iterations: CFLUSH_NUM_ITER,
},
verbosity: Verbosity::NoOutput,
optimised_addresses: true,
@@ -149,25 +171,68 @@ impl FlushAndFlush {
};
calibrate_results2t_vec.append(&mut r);
}
- unimplemented!();
+ let analysis: HashMap = calibration_result_to_ASVP(
+ calibrate_results2t_vec,
+ |cal_1t_res| {
+ let e = ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
+ cal_1t_res, HIT_INDEX, MISS_INDEX,
+ ));
+ PotentialThresholds::minimizing_total_error(e)
+ .median()
+ .unwrap()
+ },
+ &h,
+ )
+ .map_err(|e| FlushAndFlushError::Nix(e))?;
+
+ let asvp_best_av_errors: HashMap)> =
+ accumulate(
+ analysis,
+ |asvp: ASVP| AV {
+ attacker: asvp.attacker,
+ victim: asvp.victim,
+ },
+ || (ErrorPrediction::default(), HashMap::new()),
+ |acc: &mut (ErrorPrediction, HashMap),
+ threshold_error,
+ asvp: ASVP,
+ av| {
+ assert_eq!(av.attacker, asvp.attacker);
+ assert_eq!(av.victim, asvp.victim);
+ let sp = SP {
+ slice: asvp.slice,
+ page: asvp.page,
+ };
+ acc.0 += threshold_error.error;
+ acc.1.insert(sp, threshold_error);
+ },
+ );
+ Ok(asvp_best_av_errors)
}
fn new_with_core_pairs(
core_pairs: impl Iterator- + Clone,
) -> Result<(Self, usize, usize), FlushAndFlushError> {
- let m = MMappedMemory::new(PAGE_LEN);
+ let m = MMappedMemory::new(PAGE_LEN, false);
let array: &[u8] = m.slice();
- let res = Self::calibration_for_core_pairs(core_pairs, vec![array].into_iter());
+ let mut res = Self::calibration_for_core_pairs(core_pairs, vec![array].into_iter())?;
- // Call the calibration function on a local page sized buffer.
-
- // Classical analysis flow to generate all ASVP, Threshold, Error.
-
- // Reduction to determine average / max error for each core.
+ let mut best_error_rate = 1.0;
+ let mut best_av = Default::default();
// Select the proper core
- unimplemented!();
+
+ for (av, (global_error_pred, thresholds)) in res.iter() {
+ if global_error_pred.error_rate() < best_error_rate {
+ best_av = *av;
+ best_error_rate = global_error_pred.error_rate();
+ }
+ }
+ Self::new(best_av.attacker, best_av.victim)
+ .map(|this| (this, best_av.attacker, best_av.victim))
+
+ // Set no threshold as calibrated on local array that will get dropped.
}
pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), FlushAndFlushError> {
@@ -224,7 +289,7 @@ impl FlushAndFlush {
self.slicing.hash(addr as usize).unwrap()
}
- pub fn set_cores(&mut self, attacker: usize, victim: usize) -> Result<(), nix::Error> {
+ pub fn set_cores(&mut self, attacker: usize, victim: usize) -> Result<(), FlushAndFlushError> {
let old_attacker = self.attacker_core;
let old_victim = self.victim_core;
@@ -247,34 +312,41 @@ impl FlushAndFlush {
}
}
- fn recalibrate(&mut self, pages: impl IntoIterator
- ) -> Result<(), nix::Error> {
+ fn recalibrate(
+ &mut self,
+ pages: impl IntoIterator
- ,
+ ) -> Result<(), FlushAndFlushError> {
// unset readiness status.
// Call calibration with core pairs with a single core pair
// Use results \o/ (or error out)
- unimplemented!();
+ self.addresses_ready.clear();
+
+ // Fixme refactor in depth core pairs to make explicit main vs helper.
+ let core_pairs = vec![(self.attacker_core, self.victim_core)];
+
+ let pages: HashSet<&[u8]> = self
+ .thresholds
+ .keys()
+ .map(|sp: &SP| unsafe { &*slice_from_raw_parts(sp.page as *const u8, PAGE_LEN) })
+ .collect();
+
+ let mut res = Self::calibration_for_core_pairs(core_pairs.into_iter(), pages.into_iter())?;
+ assert_eq!(res.keys().count(), 1);
+ self.thresholds = res
+ .remove(&AV {
+ attacker: self.attacker_core,
+ victim: self.victim_core,
+ })
+ .unwrap()
+ .1;
+ Ok(())
}
-}
-impl Debug for FlushAndFlush {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- f.debug_struct("FlushAndFlush")
- .field("thresholds", &self.thresholds)
- .field("addresses_ready", &self.addresses_ready)
- .field("slicing", &self.slicing)
- .finish()
- }
-}
-
-use cache_utils::calibration::cum_sum;
-use cache_utils::mmap::MMappedMemory;
-
-impl MultipleAddrCacheSideChannel for FlushAndFlush {
- const MAX_ADDR: u32 = 3;
-
- unsafe fn test<'a, 'b, 'c>(
- &'a mut self,
+ unsafe fn test_impl<'a, 'b, 'c>(
+ &'a self,
addresses: &'b mut (impl Iterator
- + Clone),
+ limit: u32,
) -> Result, SideChannelError> {
let mut result = Vec::new();
let mut tmp = Vec::new();
@@ -283,7 +355,7 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
i += 1;
let t = unsafe { only_flush(*addr) };
tmp.push((addr, t));
- if i == Self::MAX_ADDR {
+ if i == limit {
break;
}
}
@@ -304,9 +376,10 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
Ok(result)
}
- unsafe fn prepare<'a, 'b, 'c>(
+ unsafe fn prepare_impl<'a, 'b, 'c>(
&'a mut self,
addresses: &'b mut (impl Iterator
- + Clone),
+ limit: u32,
) -> Result<(), SideChannelError> {
use core::arch::x86_64 as arch_x86;
let mut i = 0;
@@ -321,7 +394,7 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
if !self.thresholds.contains_key(&SP { slice, page: vpn }) {
return Err(AddressNotCalibrated(*addr));
}
- if i == Self::MAX_ADDR {
+ if i == limit {
break;
}
}
@@ -329,14 +402,63 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
for addr in addresses {
i += 1;
unsafe { flush(*addr) };
+ //println!("{:p}", *addr);
self.addresses_ready.insert(*addr);
- if i == Self::MAX_ADDR {
+ if i == limit {
break;
}
}
unsafe { arch_x86::_mm_mfence() };
Ok(())
}
+}
+
+impl Debug for FlushAndFlush {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FlushAndFlush")
+ .field("thresholds", &self.thresholds)
+ .field("addresses_ready", &self.addresses_ready)
+ .field("slicing", &self.slicing)
+ .finish()
+ }
+}
+
+impl CoreSpec for FlushAndFlush {
+ fn main_core(&self) -> CpuSet {
+ let mut main = CpuSet::new();
+ main.set(self.attacker_core);
+ main
+ }
+
+ fn helper_core(&self) -> CpuSet {
+ let mut helper = CpuSet::new();
+ helper.set(self.victim_core);
+ helper
+ }
+}
+
+use cache_side_channel::CacheStatus::Hit;
+use cache_utils::calibration::cum_sum;
+use cache_utils::mmap::MMappedMemory;
+use covert_channels_evaluation::{BitIterator, CovertChannel};
+use std::ptr::slice_from_raw_parts;
+
+impl MultipleAddrCacheSideChannel for FlushAndFlush {
+ const MAX_ADDR: u32 = 3;
+
+ unsafe fn test<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: &'b mut (impl Iterator
- + Clone),
+ ) -> Result, SideChannelError> {
+ unsafe { self.test_impl(addresses, Self::MAX_ADDR) }
+ }
+
+ unsafe fn prepare<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: &'b mut (impl Iterator
- + Clone),
+ ) -> Result<(), SideChannelError> {
+ unsafe { self.prepare_impl(addresses, Self::MAX_ADDR) }
+ }
fn victim(&mut self, operation: &dyn Fn()) {
operation(); // TODO use a different helper core ?
@@ -357,251 +479,141 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
&mut self,
addresses: impl IntoIterator
- + Clone,
) -> Result<(), ChannelFatalError> {
- unimplemented!()
- /*
- let mut pages = HashMap::>::new();
- for addr in addresses {
- let page = get_vpn(addr);
- pages.entry(page).or_insert_with(HashSet::new).insert(addr);
- }
+ let core_pair = vec![(self.attacker_core, self.victim_core)];
- let core_per_socket = find_core_per_socket();
+ let pages = addresses
+ .into_iter()
+ .map(|addr: *const u8| unsafe {
+ &*slice_from_raw_parts(get_vpn(addr) as *const u8, PAGE_LEN)
+ })
+ .collect::>();
- let operations = [
- CalibrateOperation2T {
- prepare: maccess::,
- op: only_flush,
- name: "clflush_remote_hit",
- display_name: "clflush remote hit",
- },
- CalibrateOperation2T {
- prepare: noop::,
- op: only_flush,
- name: "clflush_miss",
- display_name: "clflush miss",
- },
- ];
- const HIT_INDEX: usize = 0;
- const MISS_INDEX: usize = 1;
-
- // Generate core iterator
- let mut core_pairs: Vec<(usize, usize)> = Vec::new();
-
- let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
-
- for i in 0..CpuSet::count() {
- if old.is_set(i).unwrap() {
- core_pairs.push((i, i));
- }
- }
-
- // Probably needs more metadata
- let mut per_core: HashMap>> =
- HashMap::new();
-
- let mut core_averages: HashMap = HashMap::new();
-
- for (page, _) in pages {
- let p = page as *const u8;
- let r = unsafe {
- calibrate_fixed_freq_2_thread(
- p,
- 64, // FIXME : MAGIC
- PAGE_LEN as isize, // MAGIC
- &mut core_pairs.clone().into_iter(),
- &operations,
- CalibrationOptions {
- hist_params: HistParams {
- bucket_number: CFLUSH_BUCKET_NUMBER,
- bucket_size: CFLUSH_BUCKET_SIZE,
- iterations: CFLUSH_NUM_ITER << 1,
- },
- verbosity: Verbosity::NoOutput,
- optimised_addresses: true,
- },
- core_per_socket,
- )
- };
-
- /* TODO refactor a good chunk of calibration result analysis to make thresholds in a separate function
- Generating Cumulative Sums and then using that to compute error count for each possible threshold is a recurring joke.
- It might be worth in a second time to refactor this to handle more generic strategies (such as double thresholds)
- What about handling non attributes values (time values that are not attributed as hit or miss)
- */
-
- /*
-
- Non Naive F+F flow
- Vec -> ASVP,Thresholds,Error Does not care as much. Can probably re-use functions to build a single one.
- Add API to query predicted error rate, compare with covert channel result.
- */
-
- for result2t in r {
- if result2t.main_core != result2t.helper_core {
- panic!("Unexpected core numbers");
+ let mut res =
+ match Self::calibration_for_core_pairs(core_pair.into_iter(), pages.into_iter()) {
+ Err(e) => {
+ return Err(ChannelFatalError::Oops);
}
- let core = result2t.main_core;
- match result2t.res {
- Err(e) => panic!("Oops: {:#?}", e),
- Ok(results_1t) => {
- for r1t in results_1t {
- // This will be turned into map_values style functions + Calibration1T -> Reasonable Type
+ Ok(r) => r,
+ };
+ assert_eq!(res.keys().count(), 1);
+ let t = res
+ .remove(&AV {
+ attacker: self.attacker_core,
+ victim: self.victim_core,
+ })
+ .unwrap()
+ .1;
- // Already handled
- let offset = r1t.offset;
- let addr = unsafe { p.offset(offset) };
- let slice = self.get_slice(addr);
+ for (sp, threshold) in t {
+ //println!("Inserting sp: {:?} => Threshold: {:?}", sp, threshold);
+ self.thresholds.insert(sp, threshold);
+ }
- // To Raw histogram
- let miss_hist = &r1t.histogram[MISS_INDEX];
- let hit_hist = &r1t.histogram[HIT_INDEX];
- if miss_hist.len() != hit_hist.len() {
- panic!("Maformed results");
+ Ok(())
+ }
+}
+
+unsafe impl Send for FlushAndFlush {}
+unsafe impl Sync for FlushAndFlush {}
+
+impl CovertChannel for FlushAndFlush {
+ const BIT_PER_PAGE: usize = 1; //PAGE_SHIFT - 6; // FIXME MAGIC cache line size
+
+ unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
+ let mut offset = 0;
+
+ if Self::BIT_PER_PAGE == 1 {
+ let page = self.preferred_address[&page];
+
+ if let Some(b) = bits.next() {
+ //println!("Transmitting {} on page {:p}", b, page);
+ if b {
+ unsafe { only_reload(page) };
+ } else {
+ unsafe { only_flush(page) };
+ }
+ }
+ } else {
+ for i in 0..Self::BIT_PER_PAGE {
+ if let Some(b) = bits.next() {
+ if b {
+ offset += 1 << i + 6; // Magic FIXME cache line size
+ }
+ }
+ }
+ unsafe { maccess(page.offset(offset as isize)) };
+ }
+ }
+
+ unsafe fn receive(&self, page: *const u8) -> Vec {
+ if Self::BIT_PER_PAGE == 1 {
+ let addresses: Vec<*const u8> = vec![self.preferred_address[&page]];
+ let r = unsafe { self.test_impl(&mut addresses.iter(), u32::max_value()) };
+ match r {
+ Err(e) => panic!("{:?}", e),
+ Ok(status_vec) => {
+ assert_eq!(status_vec.len(), 1);
+ let received = status_vec[0].1 == Hit;
+ //println!("Received {} on page {:p}", received, page);
+ return vec![received];
+ }
+ }
+ } else {
+ let addresses = (0..PAGE_LEN)
+ .step_by(64)
+ .map(|o| unsafe { page.offset(o as isize) })
+ .collect::>();
+ let r = unsafe { self.test_impl(&mut addresses.iter(), u32::max_value()) };
+ match r {
+ Err(e) => panic!("{:?}", e),
+ Ok(status_vec) => {
+ for (addr, status) in status_vec {
+ if status == Hit {
+ let offset = unsafe { addr.offset_from(page) } >> 6; // Fixme cache line size magic
+ let mut res = Vec::new();
+ for i in 0..Self::BIT_PER_PAGE {
+ res.push((offset & (1 << i)) != 0);
}
- let len = miss_hist.len();
-
- // Cum Sums
- let miss_cum_sum = cum_sum(miss_hist);
- let hit_cum_sum = cum_sum(hit_hist);
- let miss_total = miss_cum_sum[len - 1];
- let hit_total = hit_cum_sum[len - 1];
-
- // Error rate per threshold computations
-
- // Threshold is less than equal => miss, strictly greater than => hit
- let mut error_miss_less_than_hit = vec![0; len - 1];
- // Threshold is less than equal => hit, strictly greater than => miss
- let mut error_hit_less_than_miss = vec![0; len - 1];
-
- let mut min_error_hlm = u32::max_value();
- let mut min_error_mlh = u32::max_value();
-
- for i in 0..(len - 1) {
- error_hit_less_than_miss[i] =
- miss_cum_sum[i] + (hit_total - hit_cum_sum[i]);
- error_miss_less_than_hit[i] =
- hit_cum_sum[i] + (miss_total - miss_cum_sum[i]);
-
- if error_hit_less_than_miss[i] < min_error_hlm {
- min_error_hlm = error_hit_less_than_miss[i];
- }
- if error_miss_less_than_hit[i] < min_error_mlh {
- min_error_mlh = error_miss_less_than_hit[i];
- }
- }
-
- let hlm = min_error_hlm < min_error_mlh;
-
- let (errors, min_error) = if hlm {
- (&error_hit_less_than_miss, min_error_hlm)
- } else {
- (&error_miss_less_than_hit, min_error_mlh)
- };
-
- // Find the min -> gives potetial thresholds with info
- let mut potential_thresholds = Vec::new();
-
- for i in 0..errors.len() {
- if errors[i] == min_error {
- let num_true_hit;
- let num_false_hit;
- let num_true_miss;
- let num_false_miss;
- if hlm {
- num_true_hit = hit_cum_sum[i];
- num_false_hit = miss_cum_sum[i];
- num_true_miss = miss_total - num_false_hit;
- num_false_miss = hit_total - num_true_hit;
- } else {
- num_true_miss = miss_cum_sum[i];
- num_false_miss = hit_cum_sum[i];
- num_true_hit = hit_total - num_false_miss;
- num_false_hit = miss_total - num_true_miss;
- }
- potential_thresholds.push((
- i,
- num_true_hit,
- num_false_hit,
- num_true_miss,
- num_false_miss,
- min_error as f32 / (hit_total + miss_total) as f32,
- ));
- }
- }
-
- let index = (potential_thresholds.len() - 1) / 2;
- let (threshold, _, _, _, _, error_rate) = potential_thresholds[index];
- // insert in per_core
- if per_core
- .entry(core)
- .or_insert_with(HashMap::new)
- .entry(page)
- .or_insert_with(HashMap::new)
- .insert(
- slice,
- (
- Threshold {
- bucket_index: threshold, // FIXME the bucket to time conversion
- miss_faster_than_hit: !hlm,
- },
- error_rate,
- ),
- )
- .is_some()
- {
- panic!("Duplicate slice result");
- }
- let core_average = core_averages.get(&core).unwrap_or(&(0.0, 0));
- let new_core_average =
- (core_average.0 + error_rate, core_average.1 + 1);
- core_averages.insert(core, new_core_average);
+ return res;
}
}
}
}
+ vec![false; Self::BIT_PER_PAGE]
}
+ }
- // We now get ASVP stuff with the correct core(in theory)
-
- // We now have a HashMap associating stuffs to cores, iterate on it and select the best.
- let mut best_core = 0;
-
- let mut best_error_rate = {
- let ca = core_averages[&0];
- ca.0 / ca.1 as f32
- };
- for (core, average) in core_averages {
- let error_rate = average.0 / average.1 as f32;
- if error_rate < best_error_rate {
- best_core = core;
- best_error_rate = error_rate;
+ unsafe fn ready_page(&mut self, page: *const u8) {
+ let r = unsafe { self.calibrate(vec![page].into_iter()) }.unwrap();
+ if Self::BIT_PER_PAGE == 1 {
+ let mut best_error_rate = 1.0;
+ let mut best_slice = 0;
+ for (sp, threshold_error) in
+ self.thresholds.iter().filter(|kv| kv.0.page == page as VPN)
+ {
+ if threshold_error.error.error_rate() < best_error_rate {
+ best_error_rate = threshold_error.error.error_rate();
+ best_slice = sp.slice;
+ }
}
- }
- let mut thresholds = HashMap::new();
- println!("Best core: {}, rate: {}", best_core, best_error_rate);
- let tmp = per_core.remove(&best_core).unwrap();
- for (page, per_page) in tmp {
- let page_entry = thresholds.entry(page).or_insert_with(HashMap::new);
- for (slice, per_slice) in per_page {
- println!(
- "page: {:x}, slice: {}, threshold: {:?}, error_rate: {}",
- page, slice, per_slice.0, per_slice.1
- );
- page_entry.insert(slice, per_slice.0);
+ for i in 0..PAGE_LEN {
+ let addr = unsafe { page.offset(i as isize) };
+ if self.get_slice(addr) == best_slice {
+ self.preferred_address.insert(page, addr);
+ let r = unsafe { self.prepare_impl(&mut vec![addr].iter(), u32::max_value()) }
+ .unwrap();
+
+ break;
+ }
}
+ } else {
+ let addresses = (0..PAGE_LEN)
+ .step_by(64)
+ .map(|o| unsafe { page.offset(o as isize) })
+ .collect::>();
+ //println!("{:#?}", addresses);
+ let r = unsafe { self.prepare_impl(&mut addresses.iter(), u32::max_value()) }.unwrap();
}
- self.thresholds = thresholds;
- println!("{:#?}", self.thresholds);
-
- // TODO handle error better for affinity setting and other issues.
-
- self.addresses_ready.clear();
-
- let mut cpuset = CpuSet::new();
- cpuset.set(best_core).unwrap();
- sched_setaffinity(Pid::from_raw(0), &cpuset).unwrap();
- Ok(())
- */
}
}
diff --git a/flush_reload/.cargo/config b/flush_reload/.cargo/config
new file mode 100644
index 0000000..2f05654
--- /dev/null
+++ b/flush_reload/.cargo/config
@@ -0,0 +1,2 @@
+[build]
+target = "x86_64-unknown-linux-gnu"
diff --git a/flush_reload/Cargo.toml b/flush_reload/Cargo.toml
index 34fa174..953ef54 100644
--- a/flush_reload/Cargo.toml
+++ b/flush_reload/Cargo.toml
@@ -9,3 +9,5 @@ edition = "2018"
[dependencies]
cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
+covert_channels_evaluation = {path = "../covert_channels_evaluation"}
+nix = "0.18.0"
diff --git a/flush_reload/src/naive.rs b/flush_reload/src/naive.rs
index 0db7214..ed1dff6 100644
--- a/flush_reload/src/naive.rs
+++ b/flush_reload/src/naive.rs
@@ -1,8 +1,11 @@
use cache_side_channel::{
- CacheStatus, ChannelFatalError, SideChannelError, SingleAddrCacheSideChannel,
+ CacheStatus, ChannelFatalError, CoreSpec, SideChannelError, SingleAddrCacheSideChannel,
};
use cache_utils::calibration::only_reload;
use cache_utils::flush;
+use covert_channels_evaluation::{BitIterator, CovertChannel};
+use nix::sched::{sched_getaffinity, CpuSet};
+use nix::unistd::Pid;
#[derive(Debug)]
pub struct NaiveFlushAndReload {
@@ -58,3 +61,42 @@ impl SingleAddrCacheSideChannel for NaiveFlushAndReload {
Ok(())
}
}
+
+unsafe impl Send for NaiveFlushAndReload {}
+unsafe impl Sync for NaiveFlushAndReload {}
+
+impl CoreSpec for NaiveFlushAndReload {
+ fn main_core(&self) -> CpuSet {
+ sched_getaffinity(Pid::from_raw(0)).unwrap()
+ }
+
+ fn helper_core(&self) -> CpuSet {
+ sched_getaffinity(Pid::from_raw(0)).unwrap()
+ }
+}
+
+impl CovertChannel for NaiveFlushAndReload {
+ const BIT_PER_PAGE: usize = 1;
+
+ unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
+ unimplemented!()
+ }
+
+ unsafe fn receive(&self, page: *const u8) -> Vec {
+ unimplemented!()
+ /*
+ let r = self.test_single(page);
+ match r {
+ Err(e) => unimplemented!(),
+ Ok(status) => match status {
+ CacheStatus::Hit => vec![true],
+ CacheStatus::Miss => vec![false],
+ },
+ }
+ */
+ }
+
+ unsafe fn ready_page(&mut self, page: *const u8) {
+ unimplemented!()
+ }
+}