Final update to AES Major update to covert channel benchmarking
This commit is contained in:
parent
7efc28e253
commit
236b8bee48
@ -38,6 +38,7 @@
|
|||||||
<sourceFolder url="file://$MODULE_DIR$/covert_channels_evaluation/src" isTestSource="false" />
|
<sourceFolder url="file://$MODULE_DIR$/covert_channels_evaluation/src" isTestSource="false" />
|
||||||
<sourceFolder url="file://$MODULE_DIR$/basic_timing_cache_channel/src" isTestSource="false" />
|
<sourceFolder url="file://$MODULE_DIR$/basic_timing_cache_channel/src" isTestSource="false" />
|
||||||
<sourceFolder url="file://$MODULE_DIR$/turn_lock/src" isTestSource="false" />
|
<sourceFolder url="file://$MODULE_DIR$/turn_lock/src" isTestSource="false" />
|
||||||
|
<sourceFolder url="file://$MODULE_DIR$/covert_channels_benchmark/src" isTestSource="false" />
|
||||||
<excludeFolder url="file://$MODULE_DIR$/cache_info/target" />
|
<excludeFolder url="file://$MODULE_DIR$/cache_info/target" />
|
||||||
<excludeFolder url="file://$MODULE_DIR$/cache_utils/target" />
|
<excludeFolder url="file://$MODULE_DIR$/cache_utils/target" />
|
||||||
<excludeFolder url="file://$MODULE_DIR$/kernel/target" />
|
<excludeFolder url="file://$MODULE_DIR$/kernel/target" />
|
||||||
|
16
Cargo.lock
generated
16
Cargo.lock
generated
@ -76,6 +76,9 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "cache_side_channel"
|
name = "cache_side_channel"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"nix",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cache_utils"
|
name = "cache_utils"
|
||||||
@ -105,12 +108,22 @@ version = "0.1.10"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "covert_channels_benchmark"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"covert_channels_evaluation",
|
||||||
|
"flush_flush",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "covert_channels_evaluation"
|
name = "covert_channels_evaluation"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bit_field 0.10.1",
|
"bit_field 0.10.1",
|
||||||
|
"cache_side_channel",
|
||||||
"cache_utils",
|
"cache_utils",
|
||||||
|
"nix",
|
||||||
"rand",
|
"rand",
|
||||||
"turn_lock",
|
"turn_lock",
|
||||||
]
|
]
|
||||||
@ -149,6 +162,7 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"cache_side_channel",
|
"cache_side_channel",
|
||||||
"cache_utils",
|
"cache_utils",
|
||||||
|
"covert_channels_evaluation",
|
||||||
"nix",
|
"nix",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -158,6 +172,8 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"cache_side_channel",
|
"cache_side_channel",
|
||||||
"cache_utils",
|
"cache_utils",
|
||||||
|
"covert_channels_evaluation",
|
||||||
|
"nix",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -6,6 +6,7 @@ members = [
|
|||||||
"cache_utils",
|
"cache_utils",
|
||||||
"cpuid",
|
"cpuid",
|
||||||
"aes-t-tables",
|
"aes-t-tables",
|
||||||
|
"covert_channels_benchmark",
|
||||||
"covert_channels_evaluation",
|
"covert_channels_evaluation",
|
||||||
"cache_side_channel",
|
"cache_side_channel",
|
||||||
"flush_reload",
|
"flush_reload",
|
||||||
|
@ -6,7 +6,7 @@ use openssl::aes;
|
|||||||
|
|
||||||
use crate::CacheStatus::Miss;
|
use crate::CacheStatus::Miss;
|
||||||
use cache_side_channel::table_side_channel::TableCacheSideChannel;
|
use cache_side_channel::table_side_channel::TableCacheSideChannel;
|
||||||
use cache_side_channel::CacheStatus;
|
use cache_side_channel::{restore_affinity, set_affinity, CacheStatus};
|
||||||
use memmap2::Mmap;
|
use memmap2::Mmap;
|
||||||
use openssl::aes::aes_ige;
|
use openssl::aes::aes_ige;
|
||||||
use openssl::symm::Mode;
|
use openssl::symm::Mode;
|
||||||
@ -58,6 +58,8 @@ pub unsafe fn attack_t_tables_poc(
|
|||||||
side_channel: &mut impl TableCacheSideChannel,
|
side_channel: &mut impl TableCacheSideChannel,
|
||||||
parameters: AESTTableParams,
|
parameters: AESTTableParams,
|
||||||
) {
|
) {
|
||||||
|
let old_affinity = set_affinity(&side_channel.main_core());
|
||||||
|
|
||||||
// Note : This function doesn't handle the case where the address space is not shared. (Additionally you have the issue of complicated eviction sets due to complex addressing)
|
// Note : This function doesn't handle the case where the address space is not shared. (Additionally you have the issue of complicated eviction sets due to complex addressing)
|
||||||
// TODO
|
// TODO
|
||||||
|
|
||||||
@ -133,4 +135,6 @@ pub unsafe fn attack_t_tables_poc(
|
|||||||
}
|
}
|
||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
restore_affinity(&old_affinity);
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,8 @@ fn main() {
|
|||||||
let openssl_path = Path::new(env!("OPENSSL_DIR")).join("lib/libcrypto.so");
|
let openssl_path = Path::new(env!("OPENSSL_DIR")).join("lib/libcrypto.so");
|
||||||
let mut side_channel = NaiveFlushAndReload::from_threshold(220);
|
let mut side_channel = NaiveFlushAndReload::from_threshold(220);
|
||||||
let te = TE_CITRON_VERT;
|
let te = TE_CITRON_VERT;
|
||||||
|
for i in 0..4 {
|
||||||
|
println!("AES attack with Naive F+R, key 0");
|
||||||
unsafe {
|
unsafe {
|
||||||
attack_t_tables_poc(
|
attack_t_tables_poc(
|
||||||
&mut side_channel,
|
&mut side_channel,
|
||||||
@ -35,7 +37,8 @@ fn main() {
|
|||||||
openssl_path: &openssl_path,
|
openssl_path: &openssl_path,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}; /**/
|
};
|
||||||
|
println!("AES attack with Naive F+R, key 1");
|
||||||
unsafe {
|
unsafe {
|
||||||
attack_t_tables_poc(
|
attack_t_tables_poc(
|
||||||
&mut side_channel,
|
&mut side_channel,
|
||||||
@ -47,6 +50,8 @@ fn main() {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
println!("AES attack with Multiple F+F (limit = 3), key 0");
|
||||||
|
{
|
||||||
let (mut side_channel_ff, old, core) = FlushAndFlush::new_any_single_core().unwrap();
|
let (mut side_channel_ff, old, core) = FlushAndFlush::new_any_single_core().unwrap();
|
||||||
unsafe {
|
unsafe {
|
||||||
attack_t_tables_poc(
|
attack_t_tables_poc(
|
||||||
@ -59,9 +64,12 @@ fn main() {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
}
|
||||||
|
|
||||||
sched_setaffinity(Pid::from_raw(0), &old);
|
println!("AES attack with Single F+F , key 1");
|
||||||
let (mut side_channel_ff, old, core) = SingleFlushAndFlush::new_any_single_core().unwrap();
|
{
|
||||||
|
let (mut side_channel_ff, old, core) =
|
||||||
|
SingleFlushAndFlush::new_any_single_core().unwrap();
|
||||||
unsafe {
|
unsafe {
|
||||||
attack_t_tables_poc(
|
attack_t_tables_poc(
|
||||||
&mut side_channel_ff,
|
&mut side_channel_ff,
|
||||||
@ -72,6 +80,7 @@ fn main() {
|
|||||||
openssl_path: &openssl_path,
|
openssl_path: &openssl_path,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
};
|
}
|
||||||
sched_setaffinity(Pid::from_raw(0), &old);
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,3 +7,4 @@ edition = "2018"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
nix = "0.18.0"
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
#![feature(unsafe_block_in_unsafe_fn)]
|
#![feature(unsafe_block_in_unsafe_fn)]
|
||||||
#![deny(unsafe_op_in_unsafe_fn)]
|
#![deny(unsafe_op_in_unsafe_fn)]
|
||||||
|
|
||||||
|
use nix::sched::{sched_getaffinity, sched_setaffinity, CpuSet};
|
||||||
|
use nix::unistd::Pid;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
pub mod table_side_channel;
|
pub mod table_side_channel;
|
||||||
@ -17,6 +19,7 @@ pub enum ChannelFatalError {
|
|||||||
Oops,
|
Oops,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub enum SideChannelError {
|
pub enum SideChannelError {
|
||||||
NeedRecalibration,
|
NeedRecalibration,
|
||||||
FatalError(ChannelFatalError),
|
FatalError(ChannelFatalError),
|
||||||
@ -24,7 +27,23 @@ pub enum SideChannelError {
|
|||||||
AddressNotCalibrated(*const u8),
|
AddressNotCalibrated(*const u8),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait SingleAddrCacheSideChannel: Debug {
|
pub trait CoreSpec {
|
||||||
|
fn main_core(&self) -> CpuSet;
|
||||||
|
fn helper_core(&self) -> CpuSet;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn restore_affinity(cpu_set: &CpuSet) {
|
||||||
|
sched_setaffinity(Pid::from_raw(0), &cpu_set).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use = "This result must be used to restore affinity"]
|
||||||
|
pub fn set_affinity(cpu_set: &CpuSet) -> CpuSet {
|
||||||
|
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
|
||||||
|
sched_setaffinity(Pid::from_raw(0), &cpu_set).unwrap();
|
||||||
|
old
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait SingleAddrCacheSideChannel: CoreSpec + Debug {
|
||||||
//type SingleChannelFatalError: Debug;
|
//type SingleChannelFatalError: Debug;
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
@ -44,7 +63,7 @@ pub trait SingleAddrCacheSideChannel: Debug {
|
|||||||
) -> Result<(), ChannelFatalError>;
|
) -> Result<(), ChannelFatalError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait MultipleAddrCacheSideChannel: Debug {
|
pub trait MultipleAddrCacheSideChannel: CoreSpec + Debug {
|
||||||
const MAX_ADDR: u32;
|
const MAX_ADDR: u32;
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
CacheStatus, ChannelFatalError, MultipleAddrCacheSideChannel, SideChannelError,
|
CacheStatus, ChannelFatalError, CoreSpec, MultipleAddrCacheSideChannel, SideChannelError,
|
||||||
SingleAddrCacheSideChannel,
|
SingleAddrCacheSideChannel,
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
pub struct TableAttackResult {
|
pub struct TableAttackResult {
|
||||||
pub addr: *const u8,
|
pub addr: *const u8,
|
||||||
@ -20,7 +21,7 @@ impl TableAttackResult {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait TableCacheSideChannel {
|
pub trait TableCacheSideChannel: CoreSpec + Debug {
|
||||||
//type ChannelFatalError: Debug;
|
//type ChannelFatalError: Debug;
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
|
@ -252,7 +252,6 @@ fn main() {
|
|||||||
let new_analysis: Result<HashMap<ASVP, ErrorPredictions>, nix::Error> =
|
let new_analysis: Result<HashMap<ASVP, ErrorPredictions>, nix::Error> =
|
||||||
calibration_result_to_ASVP(
|
calibration_result_to_ASVP(
|
||||||
r,
|
r,
|
||||||
pointer,
|
|
||||||
|cal_1t_res| {
|
|cal_1t_res| {
|
||||||
ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
|
ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
|
||||||
cal_1t_res, hit_index, miss_index,
|
cal_1t_res, hit_index, miss_index,
|
||||||
@ -645,8 +644,8 @@ fn main() {
|
|||||||
// Print header
|
// Print header
|
||||||
println!(
|
println!(
|
||||||
"AVAnalysis:Attacker,Victim,{},{}",
|
"AVAnalysis:Attacker,Victim,{},{}",
|
||||||
error_header("AVSP_Best_AV_"),
|
error_header("AVSP_AVAverage_"),
|
||||||
error_header("AV_Best_AV_")
|
error_header("AV_AVAverage_")
|
||||||
);
|
);
|
||||||
//print lines
|
//print lines
|
||||||
|
|
||||||
@ -668,9 +667,9 @@ fn main() {
|
|||||||
|
|
||||||
println!(
|
println!(
|
||||||
"AttackerAnalysis:Attacker,{},{},{}",
|
"AttackerAnalysis:Attacker,{},{},{}",
|
||||||
error_header("AVSP_Best_A_"),
|
error_header("AVSP_AAverage_"),
|
||||||
error_header("ASP_Best_A_"),
|
error_header("ASP_AAverage_"),
|
||||||
error_header("AV_Best_A_"),
|
error_header("AV_AAverage_"),
|
||||||
);
|
);
|
||||||
|
|
||||||
for attacker in keys {
|
for attacker in keys {
|
||||||
@ -682,15 +681,4 @@ fn main() {
|
|||||||
AV = format_error(&av_best_a_erros[&attacker].0)
|
AV = format_error(&av_best_a_erros[&attacker].0)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
println!(
|
|
||||||
"analysis result: {:?}",
|
|
||||||
asvp_threshold_errors.keys().copied().collect::<Vec<ASVP>>()
|
|
||||||
);
|
|
||||||
println!("Global Analysis: {:#?}", global_threshold_errors);
|
|
||||||
println!(
|
|
||||||
"Global thrshold total error rate :{}",
|
|
||||||
global_threshold_errors.error.error_rate()
|
|
||||||
);*/
|
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,8 @@ pub unsafe fn l3_and_reload(p: *const u8) -> u64 {
|
|||||||
only_reload(p)
|
only_reload(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const PAGE_LEN: usize = 1 << 12;
|
pub const PAGE_SHIFT: usize = 12;
|
||||||
|
pub const PAGE_LEN: usize = 1 << PAGE_SHIFT;
|
||||||
|
|
||||||
pub fn get_vpn<T>(p: *const T) -> usize {
|
pub fn get_vpn<T>(p: *const T) -> usize {
|
||||||
(p as usize) & (!(PAGE_LEN - 1)) // FIXME
|
(p as usize) & (!(PAGE_LEN - 1)) // FIXME
|
||||||
@ -246,6 +247,7 @@ pub fn calibrate_flush(
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct CalibrateResult {
|
pub struct CalibrateResult {
|
||||||
|
pub page: VPN,
|
||||||
pub offset: isize,
|
pub offset: isize,
|
||||||
pub histogram: Vec<Vec<u32>>,
|
pub histogram: Vec<Vec<u32>>,
|
||||||
pub median: Vec<u64>,
|
pub median: Vec<u64>,
|
||||||
@ -383,6 +385,7 @@ fn calibrate_impl_fixed_freq(
|
|||||||
|
|
||||||
// TODO add some useful impl to CalibrateResults
|
// TODO add some useful impl to CalibrateResults
|
||||||
let mut calibrate_result = CalibrateResult {
|
let mut calibrate_result = CalibrateResult {
|
||||||
|
page: get_vpn(pointer),
|
||||||
offset: i,
|
offset: i,
|
||||||
histogram: Vec::new(),
|
histogram: Vec::new(),
|
||||||
median: vec![0; operations.len()],
|
median: vec![0; operations.len()],
|
||||||
@ -646,12 +649,12 @@ fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>>(
|
|||||||
"Calibration for main_core {}, helper {}.",
|
"Calibration for main_core {}, helper {}.",
|
||||||
main_core, helper_core
|
main_core, helper_core
|
||||||
);
|
);
|
||||||
}
|
|
||||||
|
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Calibration for main_core {}, helper {}.",
|
"Calibration for main_core {}, helper {}.",
|
||||||
main_core, helper_core
|
main_core, helper_core
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let mut core = CpuSet::new();
|
let mut core = CpuSet::new();
|
||||||
match core.set(main_core) {
|
match core.set(main_core) {
|
||||||
@ -716,6 +719,7 @@ fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>>(
|
|||||||
|
|
||||||
// TODO add some useful impl to CalibrateResults
|
// TODO add some useful impl to CalibrateResults
|
||||||
let mut calibrate_result = CalibrateResult {
|
let mut calibrate_result = CalibrateResult {
|
||||||
|
page: get_vpn(pointer),
|
||||||
offset: i,
|
offset: i,
|
||||||
histogram: Vec::new(),
|
histogram: Vec::new(),
|
||||||
median: vec![0; operations.len()],
|
median: vec![0; operations.len()],
|
||||||
@ -1574,7 +1578,6 @@ impl Threshold {
|
|||||||
|
|
||||||
pub fn calibration_result_to_ASVP<T, Analysis: Fn(CalibrateResult) -> T>(
|
pub fn calibration_result_to_ASVP<T, Analysis: Fn(CalibrateResult) -> T>(
|
||||||
results: Vec<CalibrateResult2T>,
|
results: Vec<CalibrateResult2T>,
|
||||||
base: *const u8,
|
|
||||||
analysis: Analysis,
|
analysis: Analysis,
|
||||||
slicing: &impl Fn(usize) -> u8,
|
slicing: &impl Fn(usize) -> u8,
|
||||||
) -> Result<HashMap<ASVP, T>, nix::Error> {
|
) -> Result<HashMap<ASVP, T>, nix::Error> {
|
||||||
@ -1587,8 +1590,8 @@ pub fn calibration_result_to_ASVP<T, Analysis: Fn(CalibrateResult) -> T>(
|
|||||||
Ok(calibrate_1t_results) => {
|
Ok(calibrate_1t_results) => {
|
||||||
for result_1t in calibrate_1t_results {
|
for result_1t in calibrate_1t_results {
|
||||||
let offset = result_1t.offset;
|
let offset = result_1t.offset;
|
||||||
let addr = unsafe { base.offset(offset) };
|
let page = result_1t.page;
|
||||||
let page = get_vpn(addr); //TODO
|
let addr = page + offset as usize;
|
||||||
let slice = slicing(addr as usize);
|
let slice = slicing(addr as usize);
|
||||||
let analysed = analysis(result_1t);
|
let analysed = analysis(result_1t);
|
||||||
let asvp = ASVP {
|
let asvp = ASVP {
|
||||||
|
@ -26,7 +26,7 @@ pub struct MMappedMemory<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T> MMappedMemory<T> {
|
impl<T> MMappedMemory<T> {
|
||||||
pub fn try_new(size: usize) -> Result<MMappedMemory<T>, nix::Error> {
|
pub fn try_new(size: usize, huge: bool) -> Result<MMappedMemory<T>, nix::Error> {
|
||||||
assert_ne!(size_of::<T>(), 0);
|
assert_ne!(size_of::<T>(), 0);
|
||||||
if let Some(p) = unsafe {
|
if let Some(p) = unsafe {
|
||||||
let p = mman::mmap(
|
let p = mman::mmap(
|
||||||
@ -35,11 +35,16 @@ impl<T> MMappedMemory<T> {
|
|||||||
mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE,
|
mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE,
|
||||||
mman::MapFlags::MAP_PRIVATE
|
mman::MapFlags::MAP_PRIVATE
|
||||||
| mman::MapFlags::MAP_ANONYMOUS
|
| mman::MapFlags::MAP_ANONYMOUS
|
||||||
| mman::MapFlags::MAP_HUGETLB,
|
| if huge {
|
||||||
|
mman::MapFlags::MAP_HUGETLB
|
||||||
|
} else {
|
||||||
|
mman::MapFlags::MAP_ANONYMOUS
|
||||||
|
},
|
||||||
-1,
|
-1,
|
||||||
0,
|
0,
|
||||||
)?;
|
)?;
|
||||||
Unique::new(p as *mut T)
|
let pointer_T = p as *mut T;
|
||||||
|
Unique::new(pointer_T)
|
||||||
} {
|
} {
|
||||||
Ok(MMappedMemory { pointer: p, size })
|
Ok(MMappedMemory { pointer: p, size })
|
||||||
} else {
|
} else {
|
||||||
@ -47,8 +52,8 @@ impl<T> MMappedMemory<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(size: usize) -> MMappedMemory<T> {
|
pub fn new(size: usize, huge: bool) -> MMappedMemory<T> {
|
||||||
Self::try_new(size).unwrap()
|
Self::try_new(size, huge).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn slice(&self) -> &[T] {
|
pub fn slice(&self) -> &[T] {
|
||||||
|
2
covert_channels_benchmark/.cargo/config
Normal file
2
covert_channels_benchmark/.cargo/config
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
[build]
|
||||||
|
target = "x86_64-unknown-linux-gnu"
|
11
covert_channels_benchmark/Cargo.toml
Normal file
11
covert_channels_benchmark/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
[package]
|
||||||
|
name = "covert_channels_benchmark"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Guillume DIDIER <guillaume.didier@inria.fr>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
covert_channels_evaluation = {path = "../covert_channels_evaluation"}
|
||||||
|
flush_flush = {path = "../flush_flush"}
|
21
covert_channels_benchmark/src/main.rs
Normal file
21
covert_channels_benchmark/src/main.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
#![feature(unsafe_block_in_unsafe_fn)]
|
||||||
|
#![deny(unsafe_op_in_unsafe_fn)]
|
||||||
|
|
||||||
|
use covert_channels_evaluation::benchmark_channel;
|
||||||
|
use flush_flush::FlushAndFlush;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
for _ in 0..16 {
|
||||||
|
//let sender = 0;
|
||||||
|
//let receiver = 2;
|
||||||
|
let (channel, old, receiver, sender) = match FlushAndFlush::new_any_two_core(true) {
|
||||||
|
Err(e) => {
|
||||||
|
panic!("{:?}", e);
|
||||||
|
}
|
||||||
|
Ok(r) => r,
|
||||||
|
};
|
||||||
|
|
||||||
|
let r = benchmark_channel(channel, 1, 1 << 15);
|
||||||
|
println!("{:?}", r);
|
||||||
|
}
|
||||||
|
}
|
@ -11,3 +11,5 @@ rand = "0.7.3"
|
|||||||
bit_field = "0.10.1"
|
bit_field = "0.10.1"
|
||||||
turn_lock = { path = "../turn_lock" }
|
turn_lock = { path = "../turn_lock" }
|
||||||
cache_utils = { path = "../cache_utils" }
|
cache_utils = { path = "../cache_utils" }
|
||||||
|
nix = "0.18.0"
|
||||||
|
cache_side_channel = { path = "../cache_side_channel" }
|
||||||
|
@ -16,22 +16,28 @@ const PAGE_SIZE: usize = 1 << 12; // FIXME Magic
|
|||||||
// Alos time in order to determine duration, in rdtsc and seconds.
|
// Alos time in order to determine duration, in rdtsc and seconds.
|
||||||
|
|
||||||
use bit_field::BitField;
|
use bit_field::BitField;
|
||||||
|
use cache_side_channel::{restore_affinity, set_affinity, CoreSpec};
|
||||||
use cache_utils::mmap::MMappedMemory;
|
use cache_utils::mmap::MMappedMemory;
|
||||||
use cache_utils::rdtsc_fence;
|
use cache_utils::rdtsc_fence;
|
||||||
|
use nix::sched::sched_getaffinity;
|
||||||
|
use nix::unistd::Pid;
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
|
use std::fmt::Debug;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Safety considerations : Not ensure thread safety, need proper locking as needed.
|
* Safety considerations : Not ensure thread safety, need proper locking as needed.
|
||||||
*/
|
*/
|
||||||
pub trait CovertChannel: Send + Sync {
|
pub trait CovertChannel: Send + Sync + CoreSpec + Debug {
|
||||||
const BIT_PER_PAGE: usize;
|
const BIT_PER_PAGE: usize;
|
||||||
unsafe fn transmit(&self, page: *const u8, bits: &mut BitIterator);
|
unsafe fn transmit(&self, page: *const u8, bits: &mut BitIterator);
|
||||||
unsafe fn receive(&self, page: *const u8) -> Vec<bool>;
|
unsafe fn receive(&self, page: *const u8) -> Vec<bool>;
|
||||||
|
unsafe fn ready_page(&mut self, page: *const u8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct CovertChannelBenchmarkResult {
|
pub struct CovertChannelBenchmarkResult {
|
||||||
pub num_bytes_transmitted: usize,
|
pub num_bytes_transmitted: usize,
|
||||||
pub num_bit_errors: usize,
|
pub num_bit_errors: usize,
|
||||||
@ -84,7 +90,6 @@ struct CovertChannelPage {
|
|||||||
struct CovertChannelParams<T: CovertChannel + Send> {
|
struct CovertChannelParams<T: CovertChannel + Send> {
|
||||||
pages: Vec<CovertChannelPage>,
|
pages: Vec<CovertChannelPage>,
|
||||||
covert_channel: Arc<T>,
|
covert_channel: Arc<T>,
|
||||||
transmit_core: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T: 'static + CovertChannel + Send> Send for CovertChannelParams<T> {}
|
unsafe impl<T: 'static + CovertChannel + Send> Send for CovertChannelParams<T> {}
|
||||||
@ -93,45 +98,58 @@ fn transmit_thread<T: CovertChannel>(
|
|||||||
num_bytes: usize,
|
num_bytes: usize,
|
||||||
mut params: CovertChannelParams<T>,
|
mut params: CovertChannelParams<T>,
|
||||||
) -> (u64, Vec<u8>) {
|
) -> (u64, Vec<u8>) {
|
||||||
|
let old_affinity = set_affinity(&(*params.covert_channel).helper_core());
|
||||||
|
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
result.reserve(num_bytes);
|
result.reserve(num_bytes);
|
||||||
for _ in 0..num_bytes {
|
for _ in 0..num_bytes {
|
||||||
let byte = rand::random();
|
let byte = rand::random();
|
||||||
result.push(byte);
|
result.push(byte);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut bit_sent = 0;
|
||||||
let mut bit_iter = BitIterator::new(&result);
|
let mut bit_iter = BitIterator::new(&result);
|
||||||
let start = unsafe { rdtsc_fence() };
|
let start = unsafe { rdtsc_fence() };
|
||||||
while !bit_iter.atEnd() {
|
while !bit_iter.atEnd() {
|
||||||
for page in params.pages.iter_mut() {
|
for page in params.pages.iter_mut() {
|
||||||
page.turn.wait();
|
page.turn.wait();
|
||||||
unsafe { params.covert_channel.transmit(page.addr, &mut bit_iter) };
|
unsafe { params.covert_channel.transmit(page.addr, &mut bit_iter) };
|
||||||
|
bit_sent += T::BIT_PER_PAGE;
|
||||||
page.turn.next();
|
page.turn.next();
|
||||||
|
if bit_iter.atEnd() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
(start, result)
|
(start, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
|
pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
|
||||||
channel: T,
|
mut channel: T,
|
||||||
num_pages: usize,
|
num_pages: usize,
|
||||||
num_bytes: usize,
|
num_bytes: usize,
|
||||||
transmit_core: usize,
|
|
||||||
receive_core: usize,
|
|
||||||
) -> CovertChannelBenchmarkResult {
|
) -> CovertChannelBenchmarkResult {
|
||||||
// Allocate pages
|
// Allocate pages
|
||||||
|
|
||||||
|
let old_affinity = set_affinity(&channel.main_core());
|
||||||
|
|
||||||
let size = num_pages * PAGE_SIZE;
|
let size = num_pages * PAGE_SIZE;
|
||||||
let m = MMappedMemory::new(size);
|
let mut m = MMappedMemory::new(size, false);
|
||||||
let mut pages_transmit = Vec::new();
|
let mut pages_transmit = Vec::new();
|
||||||
let mut pages_receive = Vec::new();
|
let mut pages_receive = Vec::new();
|
||||||
|
for i in 0..num_pages {
|
||||||
|
m.slice_mut()[i * PAGE_SIZE] = i as u8;
|
||||||
|
}
|
||||||
let array: &[u8] = m.slice();
|
let array: &[u8] = m.slice();
|
||||||
for i in 0..num_pages {
|
for i in 0..num_pages {
|
||||||
let addr = &array[i * PAGE_SIZE] as *const u8;
|
let addr = &array[i * PAGE_SIZE] as *const u8;
|
||||||
let mut turns = TurnLock::new(2);
|
let mut turns = TurnLock::new(2);
|
||||||
let mut t_iter = turns.drain(0..);
|
let mut t_iter = turns.drain(0..);
|
||||||
let receive_lock = t_iter.next().unwrap();
|
|
||||||
let transmit_lock = t_iter.next().unwrap();
|
let transmit_lock = t_iter.next().unwrap();
|
||||||
|
let receive_lock = t_iter.next().unwrap();
|
||||||
|
|
||||||
assert!(t_iter.next().is_none());
|
assert!(t_iter.next().is_none());
|
||||||
|
unsafe { channel.ready_page(addr) };
|
||||||
pages_transmit.push(CovertChannelPage {
|
pages_transmit.push(CovertChannelPage {
|
||||||
turn: transmit_lock,
|
turn: transmit_lock,
|
||||||
addr,
|
addr,
|
||||||
@ -141,17 +159,13 @@ pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
|
|||||||
addr,
|
addr,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let covert_channel_arc = Arc::new(channel);
|
let covert_channel_arc = Arc::new(channel);
|
||||||
let params = CovertChannelParams {
|
let params = CovertChannelParams {
|
||||||
pages: pages_transmit,
|
pages: pages_transmit,
|
||||||
covert_channel: covert_channel_arc.clone(),
|
covert_channel: covert_channel_arc.clone(),
|
||||||
transmit_core,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if transmit_core == receive_core {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
let helper = thread::spawn(move || transmit_thread(num_bytes, params));
|
let helper = thread::spawn(move || transmit_thread(num_bytes, params));
|
||||||
// Create the thread parameters
|
// Create the thread parameters
|
||||||
let mut received_bytes: Vec<u8> = Vec::new();
|
let mut received_bytes: Vec<u8> = Vec::new();
|
||||||
@ -171,10 +185,14 @@ pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
|
|||||||
}
|
}
|
||||||
received_bytes.push(byte);
|
received_bytes.push(byte);
|
||||||
}
|
}
|
||||||
|
if received_bytes.len() >= num_bytes {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// TODO
|
// TODO
|
||||||
// receiver thread
|
// receiver thread
|
||||||
}
|
}
|
||||||
|
|
||||||
let stop = unsafe { rdtsc_fence() };
|
let stop = unsafe { rdtsc_fence() };
|
||||||
let r = helper.join();
|
let r = helper.join();
|
||||||
let (start, sent_bytes) = match r {
|
let (start, sent_bytes) = match r {
|
||||||
@ -184,13 +202,15 @@ pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
|
|||||||
assert_eq!(sent_bytes.len(), received_bytes.len());
|
assert_eq!(sent_bytes.len(), received_bytes.len());
|
||||||
assert_eq!(num_bytes, received_bytes.len());
|
assert_eq!(num_bytes, received_bytes.len());
|
||||||
|
|
||||||
|
restore_affinity(&old_affinity);
|
||||||
|
|
||||||
let mut num_bit_error = 0;
|
let mut num_bit_error = 0;
|
||||||
for i in 0..num_bytes {
|
for i in 0..num_bytes {
|
||||||
num_bit_error += (sent_bytes[i] ^ received_bytes[i]).count_ones() as usize;
|
num_bit_error += (sent_bytes[i] ^ received_bytes[i]).count_ones() as usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
let error_rate = (num_bit_error as f64) / ((num_bytes * u8::BIT_LENGTH) as f64);
|
let error_rate = (num_bit_error as f64) / ((num_bytes * u8::BIT_LENGTH) as f64);
|
||||||
// Create transmit thread
|
|
||||||
CovertChannelBenchmarkResult {
|
CovertChannelBenchmarkResult {
|
||||||
num_bytes_transmitted: num_bytes,
|
num_bytes_transmitted: num_bytes,
|
||||||
num_bit_errors: num_bit_error,
|
num_bit_errors: num_bit_error,
|
||||||
|
@ -10,3 +10,4 @@ edition = "2018"
|
|||||||
cache_utils = { path = "../cache_utils" }
|
cache_utils = { path = "../cache_utils" }
|
||||||
cache_side_channel = { path = "../cache_side_channel" }
|
cache_side_channel = { path = "../cache_side_channel" }
|
||||||
nix = "0.18.0"
|
nix = "0.18.0"
|
||||||
|
covert_channels_evaluation = {path = "../covert_channels_evaluation"}
|
||||||
|
@ -5,13 +5,14 @@ pub mod naive;
|
|||||||
|
|
||||||
use cache_side_channel::SideChannelError::{AddressNotCalibrated, AddressNotReady};
|
use cache_side_channel::SideChannelError::{AddressNotCalibrated, AddressNotReady};
|
||||||
use cache_side_channel::{
|
use cache_side_channel::{
|
||||||
CacheStatus, ChannelFatalError, MultipleAddrCacheSideChannel, SideChannelError,
|
CacheStatus, ChannelFatalError, CoreSpec, MultipleAddrCacheSideChannel, SideChannelError,
|
||||||
SingleAddrCacheSideChannel,
|
SingleAddrCacheSideChannel,
|
||||||
};
|
};
|
||||||
use cache_utils::calibration::{
|
use cache_utils::calibration::{
|
||||||
calibrate_fixed_freq_2_thread, get_cache_slicing, get_vpn, only_flush, CalibrateOperation2T,
|
accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, get_cache_slicing,
|
||||||
CalibrationOptions, HistParams, Verbosity, CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE,
|
get_vpn, only_flush, only_reload, CalibrateOperation2T, CalibrationOptions, ErrorPredictions,
|
||||||
CFLUSH_NUM_ITER, PAGE_LEN,
|
HistParams, HistogramCumSum, PotentialThresholds, Verbosity, ASVP, CFLUSH_BUCKET_NUMBER,
|
||||||
|
CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER, PAGE_LEN, PAGE_SHIFT,
|
||||||
};
|
};
|
||||||
use cache_utils::calibration::{ErrorPrediction, Slice, Threshold, ThresholdError, AV, SP, VPN};
|
use cache_utils::calibration::{ErrorPrediction, Slice, Threshold, ThresholdError, AV, SP, VPN};
|
||||||
use cache_utils::complex_addressing::CacheSlicing;
|
use cache_utils::complex_addressing::CacheSlicing;
|
||||||
@ -28,11 +29,13 @@ pub struct FlushAndFlush {
|
|||||||
slicing: CacheSlicing,
|
slicing: CacheSlicing,
|
||||||
attacker_core: usize,
|
attacker_core: usize,
|
||||||
victim_core: usize,
|
victim_core: usize,
|
||||||
|
preferred_address: HashMap<*const u8, *const u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum FlushAndFlushError {
|
pub enum FlushAndFlushError {
|
||||||
NoSlicing,
|
NoSlicing,
|
||||||
|
Nix(nix::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -56,6 +59,16 @@ impl SingleFlushAndFlush {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl CoreSpec for SingleFlushAndFlush {
|
||||||
|
fn main_core(&self) -> CpuSet {
|
||||||
|
self.0.main_core()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn helper_core(&self) -> CpuSet {
|
||||||
|
self.0.helper_core()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SingleAddrCacheSideChannel for SingleFlushAndFlush {
|
impl SingleAddrCacheSideChannel for SingleFlushAndFlush {
|
||||||
unsafe fn test_single(&mut self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
|
unsafe fn test_single(&mut self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
|
||||||
unsafe { self.0.test_single(addr) }
|
unsafe { self.0.test_single(addr) }
|
||||||
@ -90,6 +103,7 @@ impl FlushAndFlush {
|
|||||||
slicing,
|
slicing,
|
||||||
attacker_core,
|
attacker_core,
|
||||||
victim_core,
|
victim_core,
|
||||||
|
preferred_address: Default::default(),
|
||||||
};
|
};
|
||||||
Ok(ret)
|
Ok(ret)
|
||||||
} else {
|
} else {
|
||||||
@ -126,6 +140,14 @@ impl FlushAndFlush {
|
|||||||
|
|
||||||
let mut calibrate_results2t_vec = Vec::new();
|
let mut calibrate_results2t_vec = Vec::new();
|
||||||
|
|
||||||
|
let slicing = match get_cache_slicing(core_per_socket) {
|
||||||
|
Some(s) => s,
|
||||||
|
None => {
|
||||||
|
return Err(FlushAndFlushError::NoSlicing);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let h = |addr: usize| slicing.hash(addr).unwrap();
|
||||||
|
|
||||||
for page in pages {
|
for page in pages {
|
||||||
// FIXME Cache line size is magic
|
// FIXME Cache line size is magic
|
||||||
let mut r = unsafe {
|
let mut r = unsafe {
|
||||||
@ -139,7 +161,7 @@ impl FlushAndFlush {
|
|||||||
hist_params: HistParams {
|
hist_params: HistParams {
|
||||||
bucket_number: CFLUSH_BUCKET_NUMBER,
|
bucket_number: CFLUSH_BUCKET_NUMBER,
|
||||||
bucket_size: CFLUSH_BUCKET_SIZE,
|
bucket_size: CFLUSH_BUCKET_SIZE,
|
||||||
iterations: CFLUSH_NUM_ITER << 1,
|
iterations: CFLUSH_NUM_ITER,
|
||||||
},
|
},
|
||||||
verbosity: Verbosity::NoOutput,
|
verbosity: Verbosity::NoOutput,
|
||||||
optimised_addresses: true,
|
optimised_addresses: true,
|
||||||
@ -149,25 +171,68 @@ impl FlushAndFlush {
|
|||||||
};
|
};
|
||||||
calibrate_results2t_vec.append(&mut r);
|
calibrate_results2t_vec.append(&mut r);
|
||||||
}
|
}
|
||||||
unimplemented!();
|
let analysis: HashMap<ASVP, ThresholdError> = calibration_result_to_ASVP(
|
||||||
|
calibrate_results2t_vec,
|
||||||
|
|cal_1t_res| {
|
||||||
|
let e = ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
|
||||||
|
cal_1t_res, HIT_INDEX, MISS_INDEX,
|
||||||
|
));
|
||||||
|
PotentialThresholds::minimizing_total_error(e)
|
||||||
|
.median()
|
||||||
|
.unwrap()
|
||||||
|
},
|
||||||
|
&h,
|
||||||
|
)
|
||||||
|
.map_err(|e| FlushAndFlushError::Nix(e))?;
|
||||||
|
|
||||||
|
let asvp_best_av_errors: HashMap<AV, (ErrorPrediction, HashMap<SP, ThresholdError>)> =
|
||||||
|
accumulate(
|
||||||
|
analysis,
|
||||||
|
|asvp: ASVP| AV {
|
||||||
|
attacker: asvp.attacker,
|
||||||
|
victim: asvp.victim,
|
||||||
|
},
|
||||||
|
|| (ErrorPrediction::default(), HashMap::new()),
|
||||||
|
|acc: &mut (ErrorPrediction, HashMap<SP, ThresholdError>),
|
||||||
|
threshold_error,
|
||||||
|
asvp: ASVP,
|
||||||
|
av| {
|
||||||
|
assert_eq!(av.attacker, asvp.attacker);
|
||||||
|
assert_eq!(av.victim, asvp.victim);
|
||||||
|
let sp = SP {
|
||||||
|
slice: asvp.slice,
|
||||||
|
page: asvp.page,
|
||||||
|
};
|
||||||
|
acc.0 += threshold_error.error;
|
||||||
|
acc.1.insert(sp, threshold_error);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
Ok(asvp_best_av_errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_with_core_pairs(
|
fn new_with_core_pairs(
|
||||||
core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
|
core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
|
||||||
) -> Result<(Self, usize, usize), FlushAndFlushError> {
|
) -> Result<(Self, usize, usize), FlushAndFlushError> {
|
||||||
let m = MMappedMemory::new(PAGE_LEN);
|
let m = MMappedMemory::new(PAGE_LEN, false);
|
||||||
let array: &[u8] = m.slice();
|
let array: &[u8] = m.slice();
|
||||||
|
|
||||||
let res = Self::calibration_for_core_pairs(core_pairs, vec![array].into_iter());
|
let mut res = Self::calibration_for_core_pairs(core_pairs, vec![array].into_iter())?;
|
||||||
|
|
||||||
// Call the calibration function on a local page sized buffer.
|
let mut best_error_rate = 1.0;
|
||||||
|
let mut best_av = Default::default();
|
||||||
// Classical analysis flow to generate all ASVP, Threshold, Error.
|
|
||||||
|
|
||||||
// Reduction to determine average / max error for each core.
|
|
||||||
|
|
||||||
// Select the proper core
|
// Select the proper core
|
||||||
unimplemented!();
|
|
||||||
|
for (av, (global_error_pred, thresholds)) in res.iter() {
|
||||||
|
if global_error_pred.error_rate() < best_error_rate {
|
||||||
|
best_av = *av;
|
||||||
|
best_error_rate = global_error_pred.error_rate();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Self::new(best_av.attacker, best_av.victim)
|
||||||
|
.map(|this| (this, best_av.attacker, best_av.victim))
|
||||||
|
|
||||||
|
// Set no threshold as calibrated on local array that will get dropped.
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), FlushAndFlushError> {
|
pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), FlushAndFlushError> {
|
||||||
@ -224,7 +289,7 @@ impl FlushAndFlush {
|
|||||||
self.slicing.hash(addr as usize).unwrap()
|
self.slicing.hash(addr as usize).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_cores(&mut self, attacker: usize, victim: usize) -> Result<(), nix::Error> {
|
pub fn set_cores(&mut self, attacker: usize, victim: usize) -> Result<(), FlushAndFlushError> {
|
||||||
let old_attacker = self.attacker_core;
|
let old_attacker = self.attacker_core;
|
||||||
let old_victim = self.victim_core;
|
let old_victim = self.victim_core;
|
||||||
|
|
||||||
@ -247,34 +312,41 @@ impl FlushAndFlush {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn recalibrate(&mut self, pages: impl IntoIterator<Item = VPN>) -> Result<(), nix::Error> {
|
fn recalibrate(
|
||||||
|
&mut self,
|
||||||
|
pages: impl IntoIterator<Item = VPN>,
|
||||||
|
) -> Result<(), FlushAndFlushError> {
|
||||||
// unset readiness status.
|
// unset readiness status.
|
||||||
// Call calibration with core pairs with a single core pair
|
// Call calibration with core pairs with a single core pair
|
||||||
// Use results \o/ (or error out)
|
// Use results \o/ (or error out)
|
||||||
|
|
||||||
unimplemented!();
|
self.addresses_ready.clear();
|
||||||
|
|
||||||
|
// Fixme refactor in depth core pairs to make explicit main vs helper.
|
||||||
|
let core_pairs = vec![(self.attacker_core, self.victim_core)];
|
||||||
|
|
||||||
|
let pages: HashSet<&[u8]> = self
|
||||||
|
.thresholds
|
||||||
|
.keys()
|
||||||
|
.map(|sp: &SP| unsafe { &*slice_from_raw_parts(sp.page as *const u8, PAGE_LEN) })
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut res = Self::calibration_for_core_pairs(core_pairs.into_iter(), pages.into_iter())?;
|
||||||
|
assert_eq!(res.keys().count(), 1);
|
||||||
|
self.thresholds = res
|
||||||
|
.remove(&AV {
|
||||||
|
attacker: self.attacker_core,
|
||||||
|
victim: self.victim_core,
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
|
.1;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl Debug for FlushAndFlush {
|
unsafe fn test_impl<'a, 'b, 'c>(
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
&'a self,
|
||||||
f.debug_struct("FlushAndFlush")
|
|
||||||
.field("thresholds", &self.thresholds)
|
|
||||||
.field("addresses_ready", &self.addresses_ready)
|
|
||||||
.field("slicing", &self.slicing)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use cache_utils::calibration::cum_sum;
|
|
||||||
use cache_utils::mmap::MMappedMemory;
|
|
||||||
|
|
||||||
impl MultipleAddrCacheSideChannel for FlushAndFlush {
|
|
||||||
const MAX_ADDR: u32 = 3;
|
|
||||||
|
|
||||||
unsafe fn test<'a, 'b, 'c>(
|
|
||||||
&'a mut self,
|
|
||||||
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
|
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
|
||||||
|
limit: u32,
|
||||||
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> {
|
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> {
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
let mut tmp = Vec::new();
|
let mut tmp = Vec::new();
|
||||||
@ -283,7 +355,7 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
|
|||||||
i += 1;
|
i += 1;
|
||||||
let t = unsafe { only_flush(*addr) };
|
let t = unsafe { only_flush(*addr) };
|
||||||
tmp.push((addr, t));
|
tmp.push((addr, t));
|
||||||
if i == Self::MAX_ADDR {
|
if i == limit {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -304,9 +376,10 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn prepare<'a, 'b, 'c>(
|
unsafe fn prepare_impl<'a, 'b, 'c>(
|
||||||
&'a mut self,
|
&'a mut self,
|
||||||
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
|
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
|
||||||
|
limit: u32,
|
||||||
) -> Result<(), SideChannelError> {
|
) -> Result<(), SideChannelError> {
|
||||||
use core::arch::x86_64 as arch_x86;
|
use core::arch::x86_64 as arch_x86;
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
@ -321,7 +394,7 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
|
|||||||
if !self.thresholds.contains_key(&SP { slice, page: vpn }) {
|
if !self.thresholds.contains_key(&SP { slice, page: vpn }) {
|
||||||
return Err(AddressNotCalibrated(*addr));
|
return Err(AddressNotCalibrated(*addr));
|
||||||
}
|
}
|
||||||
if i == Self::MAX_ADDR {
|
if i == limit {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -329,14 +402,63 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
|
|||||||
for addr in addresses {
|
for addr in addresses {
|
||||||
i += 1;
|
i += 1;
|
||||||
unsafe { flush(*addr) };
|
unsafe { flush(*addr) };
|
||||||
|
//println!("{:p}", *addr);
|
||||||
self.addresses_ready.insert(*addr);
|
self.addresses_ready.insert(*addr);
|
||||||
if i == Self::MAX_ADDR {
|
if i == limit {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unsafe { arch_x86::_mm_mfence() };
|
unsafe { arch_x86::_mm_mfence() };
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for FlushAndFlush {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("FlushAndFlush")
|
||||||
|
.field("thresholds", &self.thresholds)
|
||||||
|
.field("addresses_ready", &self.addresses_ready)
|
||||||
|
.field("slicing", &self.slicing)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CoreSpec for FlushAndFlush {
|
||||||
|
fn main_core(&self) -> CpuSet {
|
||||||
|
let mut main = CpuSet::new();
|
||||||
|
main.set(self.attacker_core);
|
||||||
|
main
|
||||||
|
}
|
||||||
|
|
||||||
|
fn helper_core(&self) -> CpuSet {
|
||||||
|
let mut helper = CpuSet::new();
|
||||||
|
helper.set(self.victim_core);
|
||||||
|
helper
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
use cache_side_channel::CacheStatus::Hit;
|
||||||
|
use cache_utils::calibration::cum_sum;
|
||||||
|
use cache_utils::mmap::MMappedMemory;
|
||||||
|
use covert_channels_evaluation::{BitIterator, CovertChannel};
|
||||||
|
use std::ptr::slice_from_raw_parts;
|
||||||
|
|
||||||
|
impl MultipleAddrCacheSideChannel for FlushAndFlush {
|
||||||
|
const MAX_ADDR: u32 = 3;
|
||||||
|
|
||||||
|
unsafe fn test<'a, 'b, 'c>(
|
||||||
|
&'a mut self,
|
||||||
|
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
|
||||||
|
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> {
|
||||||
|
unsafe { self.test_impl(addresses, Self::MAX_ADDR) }
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn prepare<'a, 'b, 'c>(
|
||||||
|
&'a mut self,
|
||||||
|
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
|
||||||
|
) -> Result<(), SideChannelError> {
|
||||||
|
unsafe { self.prepare_impl(addresses, Self::MAX_ADDR) }
|
||||||
|
}
|
||||||
|
|
||||||
fn victim(&mut self, operation: &dyn Fn()) {
|
fn victim(&mut self, operation: &dyn Fn()) {
|
||||||
operation(); // TODO use a different helper core ?
|
operation(); // TODO use a different helper core ?
|
||||||
@ -357,251 +479,141 @@ impl MultipleAddrCacheSideChannel for FlushAndFlush {
|
|||||||
&mut self,
|
&mut self,
|
||||||
addresses: impl IntoIterator<Item = *const u8> + Clone,
|
addresses: impl IntoIterator<Item = *const u8> + Clone,
|
||||||
) -> Result<(), ChannelFatalError> {
|
) -> Result<(), ChannelFatalError> {
|
||||||
unimplemented!()
|
let core_pair = vec![(self.attacker_core, self.victim_core)];
|
||||||
/*
|
|
||||||
let mut pages = HashMap::<VPN, HashSet<*const u8>>::new();
|
let pages = addresses
|
||||||
for addr in addresses {
|
.into_iter()
|
||||||
let page = get_vpn(addr);
|
.map(|addr: *const u8| unsafe {
|
||||||
pages.entry(page).or_insert_with(HashSet::new).insert(addr);
|
&*slice_from_raw_parts(get_vpn(addr) as *const u8, PAGE_LEN)
|
||||||
|
})
|
||||||
|
.collect::<HashSet<&[u8]>>();
|
||||||
|
|
||||||
|
let mut res =
|
||||||
|
match Self::calibration_for_core_pairs(core_pair.into_iter(), pages.into_iter()) {
|
||||||
|
Err(e) => {
|
||||||
|
return Err(ChannelFatalError::Oops);
|
||||||
}
|
}
|
||||||
|
Ok(r) => r,
|
||||||
let core_per_socket = find_core_per_socket();
|
|
||||||
|
|
||||||
let operations = [
|
|
||||||
CalibrateOperation2T {
|
|
||||||
prepare: maccess::<u8>,
|
|
||||||
op: only_flush,
|
|
||||||
name: "clflush_remote_hit",
|
|
||||||
display_name: "clflush remote hit",
|
|
||||||
},
|
|
||||||
CalibrateOperation2T {
|
|
||||||
prepare: noop::<u8>,
|
|
||||||
op: only_flush,
|
|
||||||
name: "clflush_miss",
|
|
||||||
display_name: "clflush miss",
|
|
||||||
},
|
|
||||||
];
|
|
||||||
const HIT_INDEX: usize = 0;
|
|
||||||
const MISS_INDEX: usize = 1;
|
|
||||||
|
|
||||||
// Generate core iterator
|
|
||||||
let mut core_pairs: Vec<(usize, usize)> = Vec::new();
|
|
||||||
|
|
||||||
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
|
|
||||||
|
|
||||||
for i in 0..CpuSet::count() {
|
|
||||||
if old.is_set(i).unwrap() {
|
|
||||||
core_pairs.push((i, i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Probably needs more metadata
|
|
||||||
let mut per_core: HashMap<usize, HashMap<VPN, HashMap<Slice, (Threshold, f32)>>> =
|
|
||||||
HashMap::new();
|
|
||||||
|
|
||||||
let mut core_averages: HashMap<usize, (f32, u32)> = HashMap::new();
|
|
||||||
|
|
||||||
for (page, _) in pages {
|
|
||||||
let p = page as *const u8;
|
|
||||||
let r = unsafe {
|
|
||||||
calibrate_fixed_freq_2_thread(
|
|
||||||
p,
|
|
||||||
64, // FIXME : MAGIC
|
|
||||||
PAGE_LEN as isize, // MAGIC
|
|
||||||
&mut core_pairs.clone().into_iter(),
|
|
||||||
&operations,
|
|
||||||
CalibrationOptions {
|
|
||||||
hist_params: HistParams {
|
|
||||||
bucket_number: CFLUSH_BUCKET_NUMBER,
|
|
||||||
bucket_size: CFLUSH_BUCKET_SIZE,
|
|
||||||
iterations: CFLUSH_NUM_ITER << 1,
|
|
||||||
},
|
|
||||||
verbosity: Verbosity::NoOutput,
|
|
||||||
optimised_addresses: true,
|
|
||||||
},
|
|
||||||
core_per_socket,
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
assert_eq!(res.keys().count(), 1);
|
||||||
|
let t = res
|
||||||
|
.remove(&AV {
|
||||||
|
attacker: self.attacker_core,
|
||||||
|
victim: self.victim_core,
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
|
.1;
|
||||||
|
|
||||||
/* TODO refactor a good chunk of calibration result analysis to make thresholds in a separate function
|
for (sp, threshold) in t {
|
||||||
Generating Cumulative Sums and then using that to compute error count for each possible threshold is a recurring joke.
|
//println!("Inserting sp: {:?} => Threshold: {:?}", sp, threshold);
|
||||||
It might be worth in a second time to refactor this to handle more generic strategies (such as double thresholds)
|
self.thresholds.insert(sp, threshold);
|
||||||
What about handling non attributes values (time values that are not attributed as hit or miss)
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
Non Naive F+F flow
|
|
||||||
Vec<CalibrationResult2T> -> ASVP,Thresholds,Error Does not care as much. Can probably re-use functions to build a single one.
|
|
||||||
Add API to query predicted error rate, compare with covert channel result.
|
|
||||||
*/
|
|
||||||
|
|
||||||
for result2t in r {
|
|
||||||
if result2t.main_core != result2t.helper_core {
|
|
||||||
panic!("Unexpected core numbers");
|
|
||||||
}
|
|
||||||
let core = result2t.main_core;
|
|
||||||
match result2t.res {
|
|
||||||
Err(e) => panic!("Oops: {:#?}", e),
|
|
||||||
Ok(results_1t) => {
|
|
||||||
for r1t in results_1t {
|
|
||||||
// This will be turned into map_values style functions + Calibration1T -> Reasonable Type
|
|
||||||
|
|
||||||
// Already handled
|
|
||||||
let offset = r1t.offset;
|
|
||||||
let addr = unsafe { p.offset(offset) };
|
|
||||||
let slice = self.get_slice(addr);
|
|
||||||
|
|
||||||
// To Raw histogram
|
|
||||||
let miss_hist = &r1t.histogram[MISS_INDEX];
|
|
||||||
let hit_hist = &r1t.histogram[HIT_INDEX];
|
|
||||||
if miss_hist.len() != hit_hist.len() {
|
|
||||||
panic!("Maformed results");
|
|
||||||
}
|
|
||||||
let len = miss_hist.len();
|
|
||||||
|
|
||||||
// Cum Sums
|
|
||||||
let miss_cum_sum = cum_sum(miss_hist);
|
|
||||||
let hit_cum_sum = cum_sum(hit_hist);
|
|
||||||
let miss_total = miss_cum_sum[len - 1];
|
|
||||||
let hit_total = hit_cum_sum[len - 1];
|
|
||||||
|
|
||||||
// Error rate per threshold computations
|
|
||||||
|
|
||||||
// Threshold is less than equal => miss, strictly greater than => hit
|
|
||||||
let mut error_miss_less_than_hit = vec![0; len - 1];
|
|
||||||
// Threshold is less than equal => hit, strictly greater than => miss
|
|
||||||
let mut error_hit_less_than_miss = vec![0; len - 1];
|
|
||||||
|
|
||||||
let mut min_error_hlm = u32::max_value();
|
|
||||||
let mut min_error_mlh = u32::max_value();
|
|
||||||
|
|
||||||
for i in 0..(len - 1) {
|
|
||||||
error_hit_less_than_miss[i] =
|
|
||||||
miss_cum_sum[i] + (hit_total - hit_cum_sum[i]);
|
|
||||||
error_miss_less_than_hit[i] =
|
|
||||||
hit_cum_sum[i] + (miss_total - miss_cum_sum[i]);
|
|
||||||
|
|
||||||
if error_hit_less_than_miss[i] < min_error_hlm {
|
|
||||||
min_error_hlm = error_hit_less_than_miss[i];
|
|
||||||
}
|
|
||||||
if error_miss_less_than_hit[i] < min_error_mlh {
|
|
||||||
min_error_mlh = error_miss_less_than_hit[i];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let hlm = min_error_hlm < min_error_mlh;
|
|
||||||
|
|
||||||
let (errors, min_error) = if hlm {
|
|
||||||
(&error_hit_less_than_miss, min_error_hlm)
|
|
||||||
} else {
|
|
||||||
(&error_miss_less_than_hit, min_error_mlh)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Find the min -> gives potetial thresholds with info
|
|
||||||
let mut potential_thresholds = Vec::new();
|
|
||||||
|
|
||||||
for i in 0..errors.len() {
|
|
||||||
if errors[i] == min_error {
|
|
||||||
let num_true_hit;
|
|
||||||
let num_false_hit;
|
|
||||||
let num_true_miss;
|
|
||||||
let num_false_miss;
|
|
||||||
if hlm {
|
|
||||||
num_true_hit = hit_cum_sum[i];
|
|
||||||
num_false_hit = miss_cum_sum[i];
|
|
||||||
num_true_miss = miss_total - num_false_hit;
|
|
||||||
num_false_miss = hit_total - num_true_hit;
|
|
||||||
} else {
|
|
||||||
num_true_miss = miss_cum_sum[i];
|
|
||||||
num_false_miss = hit_cum_sum[i];
|
|
||||||
num_true_hit = hit_total - num_false_miss;
|
|
||||||
num_false_hit = miss_total - num_true_miss;
|
|
||||||
}
|
|
||||||
potential_thresholds.push((
|
|
||||||
i,
|
|
||||||
num_true_hit,
|
|
||||||
num_false_hit,
|
|
||||||
num_true_miss,
|
|
||||||
num_false_miss,
|
|
||||||
min_error as f32 / (hit_total + miss_total) as f32,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let index = (potential_thresholds.len() - 1) / 2;
|
|
||||||
let (threshold, _, _, _, _, error_rate) = potential_thresholds[index];
|
|
||||||
// insert in per_core
|
|
||||||
if per_core
|
|
||||||
.entry(core)
|
|
||||||
.or_insert_with(HashMap::new)
|
|
||||||
.entry(page)
|
|
||||||
.or_insert_with(HashMap::new)
|
|
||||||
.insert(
|
|
||||||
slice,
|
|
||||||
(
|
|
||||||
Threshold {
|
|
||||||
bucket_index: threshold, // FIXME the bucket to time conversion
|
|
||||||
miss_faster_than_hit: !hlm,
|
|
||||||
},
|
|
||||||
error_rate,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
panic!("Duplicate slice result");
|
|
||||||
}
|
|
||||||
let core_average = core_averages.get(&core).unwrap_or(&(0.0, 0));
|
|
||||||
let new_core_average =
|
|
||||||
(core_average.0 + error_rate, core_average.1 + 1);
|
|
||||||
core_averages.insert(core, new_core_average);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We now get ASVP stuff with the correct core(in theory)
|
|
||||||
|
|
||||||
// We now have a HashMap associating stuffs to cores, iterate on it and select the best.
|
|
||||||
let mut best_core = 0;
|
|
||||||
|
|
||||||
let mut best_error_rate = {
|
|
||||||
let ca = core_averages[&0];
|
|
||||||
ca.0 / ca.1 as f32
|
|
||||||
};
|
|
||||||
for (core, average) in core_averages {
|
|
||||||
let error_rate = average.0 / average.1 as f32;
|
|
||||||
if error_rate < best_error_rate {
|
|
||||||
best_core = core;
|
|
||||||
best_error_rate = error_rate;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let mut thresholds = HashMap::new();
|
|
||||||
println!("Best core: {}, rate: {}", best_core, best_error_rate);
|
|
||||||
let tmp = per_core.remove(&best_core).unwrap();
|
|
||||||
for (page, per_page) in tmp {
|
|
||||||
let page_entry = thresholds.entry(page).or_insert_with(HashMap::new);
|
|
||||||
for (slice, per_slice) in per_page {
|
|
||||||
println!(
|
|
||||||
"page: {:x}, slice: {}, threshold: {:?}, error_rate: {}",
|
|
||||||
page, slice, per_slice.0, per_slice.1
|
|
||||||
);
|
|
||||||
page_entry.insert(slice, per_slice.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.thresholds = thresholds;
|
|
||||||
println!("{:#?}", self.thresholds);
|
|
||||||
|
|
||||||
// TODO handle error better for affinity setting and other issues.
|
|
||||||
|
|
||||||
self.addresses_ready.clear();
|
|
||||||
|
|
||||||
let mut cpuset = CpuSet::new();
|
|
||||||
cpuset.set(best_core).unwrap();
|
|
||||||
sched_setaffinity(Pid::from_raw(0), &cpuset).unwrap();
|
|
||||||
Ok(())
|
Ok(())
|
||||||
*/
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl Send for FlushAndFlush {}
|
||||||
|
unsafe impl Sync for FlushAndFlush {}
|
||||||
|
|
||||||
|
impl CovertChannel for FlushAndFlush {
|
||||||
|
const BIT_PER_PAGE: usize = 1; //PAGE_SHIFT - 6; // FIXME MAGIC cache line size
|
||||||
|
|
||||||
|
unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
|
||||||
|
let mut offset = 0;
|
||||||
|
|
||||||
|
if Self::BIT_PER_PAGE == 1 {
|
||||||
|
let page = self.preferred_address[&page];
|
||||||
|
|
||||||
|
if let Some(b) = bits.next() {
|
||||||
|
//println!("Transmitting {} on page {:p}", b, page);
|
||||||
|
if b {
|
||||||
|
unsafe { only_reload(page) };
|
||||||
|
} else {
|
||||||
|
unsafe { only_flush(page) };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i in 0..Self::BIT_PER_PAGE {
|
||||||
|
if let Some(b) = bits.next() {
|
||||||
|
if b {
|
||||||
|
offset += 1 << i + 6; // Magic FIXME cache line size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
unsafe { maccess(page.offset(offset as isize)) };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn receive(&self, page: *const u8) -> Vec<bool> {
|
||||||
|
if Self::BIT_PER_PAGE == 1 {
|
||||||
|
let addresses: Vec<*const u8> = vec![self.preferred_address[&page]];
|
||||||
|
let r = unsafe { self.test_impl(&mut addresses.iter(), u32::max_value()) };
|
||||||
|
match r {
|
||||||
|
Err(e) => panic!("{:?}", e),
|
||||||
|
Ok(status_vec) => {
|
||||||
|
assert_eq!(status_vec.len(), 1);
|
||||||
|
let received = status_vec[0].1 == Hit;
|
||||||
|
//println!("Received {} on page {:p}", received, page);
|
||||||
|
return vec![received];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let addresses = (0..PAGE_LEN)
|
||||||
|
.step_by(64)
|
||||||
|
.map(|o| unsafe { page.offset(o as isize) })
|
||||||
|
.collect::<HashSet<*const u8>>();
|
||||||
|
let r = unsafe { self.test_impl(&mut addresses.iter(), u32::max_value()) };
|
||||||
|
match r {
|
||||||
|
Err(e) => panic!("{:?}", e),
|
||||||
|
Ok(status_vec) => {
|
||||||
|
for (addr, status) in status_vec {
|
||||||
|
if status == Hit {
|
||||||
|
let offset = unsafe { addr.offset_from(page) } >> 6; // Fixme cache line size magic
|
||||||
|
let mut res = Vec::new();
|
||||||
|
for i in 0..Self::BIT_PER_PAGE {
|
||||||
|
res.push((offset & (1 << i)) != 0);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vec![false; Self::BIT_PER_PAGE]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn ready_page(&mut self, page: *const u8) {
|
||||||
|
let r = unsafe { self.calibrate(vec![page].into_iter()) }.unwrap();
|
||||||
|
if Self::BIT_PER_PAGE == 1 {
|
||||||
|
let mut best_error_rate = 1.0;
|
||||||
|
let mut best_slice = 0;
|
||||||
|
for (sp, threshold_error) in
|
||||||
|
self.thresholds.iter().filter(|kv| kv.0.page == page as VPN)
|
||||||
|
{
|
||||||
|
if threshold_error.error.error_rate() < best_error_rate {
|
||||||
|
best_error_rate = threshold_error.error.error_rate();
|
||||||
|
best_slice = sp.slice;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i in 0..PAGE_LEN {
|
||||||
|
let addr = unsafe { page.offset(i as isize) };
|
||||||
|
if self.get_slice(addr) == best_slice {
|
||||||
|
self.preferred_address.insert(page, addr);
|
||||||
|
let r = unsafe { self.prepare_impl(&mut vec![addr].iter(), u32::max_value()) }
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let addresses = (0..PAGE_LEN)
|
||||||
|
.step_by(64)
|
||||||
|
.map(|o| unsafe { page.offset(o as isize) })
|
||||||
|
.collect::<Vec<*const u8>>();
|
||||||
|
//println!("{:#?}", addresses);
|
||||||
|
let r = unsafe { self.prepare_impl(&mut addresses.iter(), u32::max_value()) }.unwrap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
flush_reload/.cargo/config
Normal file
2
flush_reload/.cargo/config
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
[build]
|
||||||
|
target = "x86_64-unknown-linux-gnu"
|
@ -9,3 +9,5 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
cache_utils = { path = "../cache_utils" }
|
cache_utils = { path = "../cache_utils" }
|
||||||
cache_side_channel = { path = "../cache_side_channel" }
|
cache_side_channel = { path = "../cache_side_channel" }
|
||||||
|
covert_channels_evaluation = {path = "../covert_channels_evaluation"}
|
||||||
|
nix = "0.18.0"
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
use cache_side_channel::{
|
use cache_side_channel::{
|
||||||
CacheStatus, ChannelFatalError, SideChannelError, SingleAddrCacheSideChannel,
|
CacheStatus, ChannelFatalError, CoreSpec, SideChannelError, SingleAddrCacheSideChannel,
|
||||||
};
|
};
|
||||||
use cache_utils::calibration::only_reload;
|
use cache_utils::calibration::only_reload;
|
||||||
use cache_utils::flush;
|
use cache_utils::flush;
|
||||||
|
use covert_channels_evaluation::{BitIterator, CovertChannel};
|
||||||
|
use nix::sched::{sched_getaffinity, CpuSet};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct NaiveFlushAndReload {
|
pub struct NaiveFlushAndReload {
|
||||||
@ -58,3 +61,42 @@ impl SingleAddrCacheSideChannel for NaiveFlushAndReload {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsafe impl Send for NaiveFlushAndReload {}
|
||||||
|
unsafe impl Sync for NaiveFlushAndReload {}
|
||||||
|
|
||||||
|
impl CoreSpec for NaiveFlushAndReload {
|
||||||
|
fn main_core(&self) -> CpuSet {
|
||||||
|
sched_getaffinity(Pid::from_raw(0)).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn helper_core(&self) -> CpuSet {
|
||||||
|
sched_getaffinity(Pid::from_raw(0)).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CovertChannel for NaiveFlushAndReload {
|
||||||
|
const BIT_PER_PAGE: usize = 1;
|
||||||
|
|
||||||
|
unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn receive(&self, page: *const u8) -> Vec<bool> {
|
||||||
|
unimplemented!()
|
||||||
|
/*
|
||||||
|
let r = self.test_single(page);
|
||||||
|
match r {
|
||||||
|
Err(e) => unimplemented!(),
|
||||||
|
Ok(status) => match status {
|
||||||
|
CacheStatus::Hit => vec![true],
|
||||||
|
CacheStatus::Miss => vec![false],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn ready_page(&mut self, page: *const u8) {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user