Update the cache channel interface

This commit is contained in:
Guillume DIDIER 2021-06-28 16:26:02 +02:00
parent 1b38b4913c
commit b7b5cbbfc3
14 changed files with 428 additions and 300 deletions

1
Cargo.lock generated
View File

@ -356,6 +356,7 @@ dependencies = [
"cache_side_channel", "cache_side_channel",
"cache_utils", "cache_utils",
"flush_flush", "flush_flush",
"flush_reload",
"nix", "nix",
"rand", "rand",
] ]

View File

@ -30,26 +30,20 @@ fn main() {
let te = TE_CITRON_VERT; let te = TE_CITRON_VERT;
let mut side_channel_fr = NaiveFlushAndReload::new( let mut side_channel_naivefr = NaiveFlushAndReload::new(Threshold {
Threshold {
bucket_index: 220, bucket_index: 220,
miss_faster_than_hit: false, miss_faster_than_hit: false,
}, });
NaiveFRPrimitives {}, let mut side_channel_naiveff = NaiveFlushAndFlush::new(Threshold {
);
let mut side_channel_naiveff = NaiveFlushAndFlush::new(
Threshold {
bucket_index: 202, bucket_index: 202,
miss_faster_than_hit: true, miss_faster_than_hit: true,
}, });
FFPrimitives {},
);
for (index, key) in [KEY1, KEY2].iter().enumerate() { for (index, key) in [KEY1, KEY2].iter().enumerate() {
println!("AES attack with Naive F+R, key {}", index); println!("AES attack with Naive F+R, key {}", index);
unsafe { unsafe {
attack_t_tables_poc( attack_t_tables_poc(
&mut side_channel_fr, &mut side_channel_naivefr,
AESTTableParams { AESTTableParams {
num_encryptions: 1 << 12, num_encryptions: 1 << 12,
key: *key, key: *key,
@ -74,11 +68,8 @@ fn main() {
}; };
println!("AES attack with Single F+F, key {}", index); println!("AES attack with Single F+F, key {}", index);
{ {
let mut side_channel_ff = SingleFlushAndFlush::new( let mut side_channel_ff =
FlushAndFlush::new_any_single_core(FFPrimitives {}) SingleFlushAndFlush::new(FlushAndFlush::new_any_single_core().unwrap().0);
.unwrap()
.0,
);
unsafe { unsafe {
attack_t_tables_poc( attack_t_tables_poc(
&mut side_channel_ff, &mut side_channel_ff,

View File

@ -36,8 +36,9 @@ use std::ptr::slice_from_raw_parts;
pub mod naive; pub mod naive;
pub trait TimingChannelPrimitives: Debug + Send + Sync { pub trait TimingChannelPrimitives: Debug + Send + Sync + Default {
unsafe fn attack(&self, addr: *const u8) -> u64; unsafe fn attack(&self, addr: *const u8) -> u64;
const NEED_RESET: bool;
} }
pub struct TopologyAwareTimingChannelHandle { pub struct TopologyAwareTimingChannelHandle {
@ -79,7 +80,7 @@ unsafe impl<T: TimingChannelPrimitives + Send> Send for TopologyAwareTimingChann
unsafe impl<T: TimingChannelPrimitives + Sync> Sync for TopologyAwareTimingChannel<T> {} unsafe impl<T: TimingChannelPrimitives + Sync> Sync for TopologyAwareTimingChannel<T> {}
impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> { impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
pub fn new(main_core: usize, helper_core: usize, t: T) -> Result<Self, TopologyAwareError> { pub fn new(main_core: usize, helper_core: usize) -> Result<Self, TopologyAwareError> {
if let Some(slicing) = get_cache_slicing(find_core_per_socket()) { if let Some(slicing) = get_cache_slicing(find_core_per_socket()) {
if !slicing.can_hash() { if !slicing.can_hash() {
return Err(TopologyAwareError::NoSlicing); return Err(TopologyAwareError::NoSlicing);
@ -92,7 +93,7 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
main_core, main_core,
helper_core, helper_core,
preferred_address: Default::default(), preferred_address: Default::default(),
t, t: Default::default(),
calibration_epoch: 0, calibration_epoch: 0,
}; };
Ok(ret) Ok(ret)
@ -205,11 +206,12 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
fn new_with_core_pairs( fn new_with_core_pairs(
core_pairs: impl Iterator<Item = (usize, usize)> + Clone, core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
t: T,
) -> Result<(Self, usize, usize), TopologyAwareError> { ) -> Result<(Self, usize, usize), TopologyAwareError> {
let m = MMappedMemory::new(PAGE_LEN, false); let m = MMappedMemory::new(PAGE_LEN, false);
let array: &[u8] = m.slice(); let array: &[u8] = m.slice();
let t = Default::default();
let mut res = Self::calibration_for_core_pairs(&t, core_pairs, vec![array].into_iter())?; let mut res = Self::calibration_for_core_pairs(&t, core_pairs, vec![array].into_iter())?;
let mut best_error_rate = 1.0; let mut best_error_rate = 1.0;
@ -223,13 +225,13 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
best_error_rate = global_error_pred.error_rate(); best_error_rate = global_error_pred.error_rate();
} }
} }
Self::new(best_av.attacker, best_av.victim, t) Self::new(best_av.attacker, best_av.victim)
.map(|this| (this, best_av.attacker, best_av.victim)) .map(|this| (this, best_av.attacker, best_av.victim))
// Set no threshold as calibrated on local array that will get dropped. // Set no threshold as calibrated on local array that will get dropped.
} }
pub fn new_any_single_core(t: T) -> Result<(Self, CpuSet, usize), TopologyAwareError> { pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), TopologyAwareError> {
// Generate core iterator // Generate core iterator
let mut core_pairs: Vec<(usize, usize)> = Vec::new(); let mut core_pairs: Vec<(usize, usize)> = Vec::new();
@ -246,7 +248,7 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
// Call out to private constructor that takes a core pair list, determines best and makes the choice. // Call out to private constructor that takes a core pair list, determines best and makes the choice.
// The private constructor will set the correct affinity for main (attacker thread) // The private constructor will set the correct affinity for main (attacker thread)
Self::new_with_core_pairs(core_pairs.into_iter(), t).map(|(channel, attacker, victim)| { Self::new_with_core_pairs(core_pairs.into_iter()).map(|(channel, attacker, victim)| {
assert_eq!(attacker, victim); assert_eq!(attacker, victim);
(channel, old, attacker) (channel, old, attacker)
}) })
@ -254,7 +256,6 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
pub fn new_any_two_core( pub fn new_any_two_core(
distinct: bool, distinct: bool,
t: T,
) -> Result<(Self, CpuSet, usize, usize), TopologyAwareError> { ) -> Result<(Self, CpuSet, usize, usize), TopologyAwareError> {
let old = sched_getaffinity(Pid::from_raw(0)).unwrap(); let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
@ -272,7 +273,7 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
} }
} }
Self::new_with_core_pairs(core_pairs.into_iter(), t).map(|(channel, attacker, victim)| { Self::new_with_core_pairs(core_pairs.into_iter()).map(|(channel, attacker, victim)| {
if distinct { if distinct {
assert_ne!(attacker, victim); assert_ne!(attacker, victim);
} }
@ -344,11 +345,15 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
unsafe fn test_one_impl( unsafe fn test_one_impl(
&self, &self,
handle: &mut TopologyAwareTimingChannelHandle, handle: &mut TopologyAwareTimingChannelHandle,
reset: bool,
) -> Result<CacheStatus, SideChannelError> { ) -> Result<CacheStatus, SideChannelError> {
if handle.calibration_epoch != self.calibration_epoch { if handle.calibration_epoch != self.calibration_epoch {
return Err(SideChannelError::NeedRecalibration); return Err(SideChannelError::NeedRecalibration);
} }
let time = unsafe { self.t.attack(handle.addr) }; let time = unsafe { self.t.attack(handle.addr) };
if T::NEED_RESET && reset {
unsafe { flush(handle.addr) };
}
if handle.threshold.is_hit(time) { if handle.threshold.is_hit(time) {
Ok(CacheStatus::Hit) Ok(CacheStatus::Hit)
} else { } else {
@ -360,12 +365,13 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
&self, &self,
addresses: &mut Vec<&mut TopologyAwareTimingChannelHandle>, addresses: &mut Vec<&mut TopologyAwareTimingChannelHandle>,
limit: u32, limit: u32,
reset: bool,
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> { ) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> {
let mut result = Vec::new(); let mut result = Vec::new();
let mut tmp = Vec::new(); let mut tmp = Vec::new();
let mut i = 0; let mut i = 0;
for addr in addresses { for addr in addresses {
let r = unsafe { self.test_one_impl(addr) }; let r = unsafe { self.test_one_impl(addr, false) };
tmp.push((addr.to_const_u8_pointer(), r)); tmp.push((addr.to_const_u8_pointer(), r));
i += 1; i += 1;
if i == limit { if i == limit {
@ -381,6 +387,9 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
return Err(e); return Err(e);
} }
} }
if T::NEED_RESET && reset {
unsafe { flush(addr) };
}
} }
Ok(result) Ok(result)
} }
@ -456,11 +465,12 @@ impl<T: TimingChannelPrimitives> MultipleAddrCacheSideChannel for TopologyAwareT
unsafe fn test<'a>( unsafe fn test<'a>(
&mut self, &mut self,
addresses: &mut Vec<&'a mut Self::Handle>, addresses: &mut Vec<&'a mut Self::Handle>,
reset: bool,
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> ) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError>
where where
Self::Handle: 'a, Self::Handle: 'a,
{ {
unsafe { self.test_impl(addresses, Self::MAX_ADDR) } unsafe { self.test_impl(addresses, Self::MAX_ADDR, reset) }
} }
unsafe fn prepare<'a>( unsafe fn prepare<'a>(
@ -558,12 +568,11 @@ impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T>
} }
unsafe fn receive(&self, handle: &mut Self::CovertChannelHandle) -> Vec<bool> { unsafe fn receive(&self, handle: &mut Self::CovertChannelHandle) -> Vec<bool> {
let r = unsafe { self.test_one_impl(&mut handle.0) }; let r = unsafe { self.test_one_impl(&mut handle.0, false) }; // transmit does the reload / flush as needed.
match r { match r {
Err(e) => panic!("{:?}", e), Err(e) => panic!("{:?}", e),
Ok(status) => { Ok(status) => {
let received = status == CacheStatus::Hit; let received = status == CacheStatus::Hit;
//println!("Received {} on page {:p}", received, page);
return vec![received]; return vec![received];
} }
} }
@ -672,8 +681,9 @@ impl<T: MultipleAddrCacheSideChannel> SingleAddrCacheSideChannel for SingleChann
unsafe fn test_single( unsafe fn test_single(
&mut self, &mut self,
handle: &mut Self::Handle, handle: &mut Self::Handle,
reset: bool,
) -> Result<CacheStatus, SideChannelError> { ) -> Result<CacheStatus, SideChannelError> {
unsafe { self.inner.test_single(handle) } unsafe { self.inner.test_single(handle, reset) }
} }
unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> { unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> {

View File

@ -36,13 +36,13 @@ unsafe impl<T: TimingChannelPrimitives + Send> Send for NaiveTimingChannel<T> {}
unsafe impl<T: TimingChannelPrimitives + Sync> Sync for NaiveTimingChannel<T> {} unsafe impl<T: TimingChannelPrimitives + Sync> Sync for NaiveTimingChannel<T> {}
impl<T: TimingChannelPrimitives> NaiveTimingChannel<T> { impl<T: TimingChannelPrimitives> NaiveTimingChannel<T> {
pub fn new(threshold: Threshold, t: T) -> Self { pub fn new(threshold: Threshold) -> Self {
Self { Self {
threshold, threshold,
current: Default::default(), current: Default::default(),
main_core: sched_getaffinity(Pid::from_raw(0)).unwrap(), main_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
helper_core: sched_getaffinity(Pid::from_raw(0)).unwrap(), helper_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
channel_primitive: t, channel_primitive: Default::default(),
} }
} }
@ -68,9 +68,13 @@ impl<T: TimingChannelPrimitives> NaiveTimingChannel<T> {
unsafe fn test_impl( unsafe fn test_impl(
&self, &self,
handle: &mut NaiveTimingChannelHandle, handle: &mut NaiveTimingChannelHandle,
reset: bool,
) -> Result<CacheStatus, SideChannelError> { ) -> Result<CacheStatus, SideChannelError> {
// This should be handled in prepare / unprepare // This should be handled in prepare / unprepare
let t = unsafe { self.channel_primitive.attack(handle.addr) }; let t = unsafe { self.channel_primitive.attack(handle.addr) };
if T::NEED_RESET && reset {
unsafe { flush(handle.addr) };
}
if self.threshold.is_hit(t) { if self.threshold.is_hit(t) {
Ok(CacheStatus::Hit) Ok(CacheStatus::Hit)
} else { } else {
@ -121,7 +125,7 @@ impl<T: TimingChannelPrimitives + Send + Sync> CovertChannel for NaiveTimingChan
} }
unsafe fn receive(&self, handle: &mut Self::CovertChannelHandle) -> Vec<bool> { unsafe fn receive(&self, handle: &mut Self::CovertChannelHandle) -> Vec<bool> {
let r = unsafe { self.test_impl(handle) }; let r = unsafe { self.test_impl(handle, false) };
match r { match r {
Err(e) => panic!(), Err(e) => panic!(),
Ok(status) => match status { Ok(status) => match status {
@ -141,8 +145,9 @@ impl<T: TimingChannelPrimitives> SingleAddrCacheSideChannel for NaiveTimingChann
unsafe fn test_single( unsafe fn test_single(
&mut self, &mut self,
handle: &mut Self::Handle, handle: &mut Self::Handle,
reset: bool,
) -> Result<CacheStatus, SideChannelError> { ) -> Result<CacheStatus, SideChannelError> {
unsafe { self.test_impl(handle) } unsafe { self.test_impl(handle, reset) }
} }
unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> { unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> {

View File

@ -56,6 +56,7 @@ pub trait SingleAddrCacheSideChannel: CoreSpec + Debug {
unsafe fn test_single( unsafe fn test_single(
&mut self, &mut self,
handle: &mut Self::Handle, handle: &mut Self::Handle,
reset: bool,
) -> Result<CacheStatus, SideChannelError>; ) -> Result<CacheStatus, SideChannelError>;
/// # Safety /// # Safety
/// ///
@ -80,6 +81,7 @@ pub trait MultipleAddrCacheSideChannel: CoreSpec + Debug {
unsafe fn test<'a, 'b, 'c>( unsafe fn test<'a, 'b, 'c>(
&'a mut self, &'a mut self,
addresses: &'b mut Vec<&'c mut Self::Handle>, addresses: &'b mut Vec<&'c mut Self::Handle>,
reset: bool,
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> ) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError>
where where
Self::Handle: 'c; Self::Handle: 'c;
@ -110,9 +112,10 @@ impl<T: MultipleAddrCacheSideChannel> SingleAddrCacheSideChannel for T {
unsafe fn test_single( unsafe fn test_single(
&mut self, &mut self,
handle: &mut Self::Handle, handle: &mut Self::Handle,
reset: bool,
) -> Result<CacheStatus, SideChannelError> { ) -> Result<CacheStatus, SideChannelError> {
let mut handles = vec![handle]; let mut handles = vec![handle];
unsafe { self.test(&mut handles) }.map(|v| v[0].1) unsafe { self.test(&mut handles, reset) }.map(|v| v[0].1)
} }
unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> { unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> {

View File

@ -66,7 +66,6 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel<T::Handle> for T {
for addr in addresses { for addr in addresses {
let mut hit = 0; let mut hit = 0;
let mut miss = 0; let mut miss = 0;
for iteration in 0..100 {
match unsafe { self.prepare_single(addr) } { match unsafe { self.prepare_single(addr) } {
Ok(_) => {} Ok(_) => {}
Err(e) => match e { Err(e) => match e {
@ -76,8 +75,9 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel<T::Handle> for T {
SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(), SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
}, },
} }
for iteration in 0..100 {
self.victim_single(victim); self.victim_single(victim);
let r = unsafe { self.test_single(addr) }; let r = unsafe { self.test_single(addr, true) };
match r { match r {
Ok(status) => {} Ok(status) => {}
Err(e) => match e { Err(e) => match e {
@ -90,17 +90,8 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel<T::Handle> for T {
} }
} }
for _iteration in 0..num_iteration { for _iteration in 0..num_iteration {
match unsafe { self.prepare_single(addr) } {
Ok(_) => {}
Err(e) => match e {
SideChannelError::NeedRecalibration => unimplemented!(),
SideChannelError::FatalError(e) => return Err(e),
SideChannelError::AddressNotReady(_addr) => panic!(),
SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
},
}
self.victim_single(victim); self.victim_single(victim);
let r = unsafe { self.test_single(addr) }; let r = unsafe { self.test_single(addr, true) };
match r { match r {
Ok(status) => match status { Ok(status) => match status {
CacheStatus::Hit => { CacheStatus::Hit => {
@ -169,7 +160,6 @@ impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel<T::Handle> for T {
for i in 0..100 { for i in 0..100 {
// TODO Warmup // TODO Warmup
} }
for i in 0..num_iteration {
match unsafe { MultipleAddrCacheSideChannel::prepare(self, &mut batch) } { match unsafe { MultipleAddrCacheSideChannel::prepare(self, &mut batch) } {
Ok(_) => {} Ok(_) => {}
Err(e) => match e { Err(e) => match e {
@ -186,9 +176,10 @@ impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel<T::Handle> for T {
} }
}, },
} }
for i in 0..num_iteration {
MultipleAddrCacheSideChannel::victim(self, victim); MultipleAddrCacheSideChannel::victim(self, victim);
let r = unsafe { MultipleAddrCacheSideChannel::test(self, &mut batch) }; // Fixme error handling let r = unsafe { MultipleAddrCacheSideChannel::test(self, &mut batch, true) }; // Fixme error handling
match r { match r {
Err(e) => match e { Err(e) => match e {
SideChannelError::NeedRecalibration => { SideChannelError::NeedRecalibration => {

View File

@ -9,7 +9,8 @@ use cache_utils::calibration::Threshold;
use covert_channels_evaluation::{benchmark_channel, CovertChannel, CovertChannelBenchmarkResult}; use covert_channels_evaluation::{benchmark_channel, CovertChannel, CovertChannelBenchmarkResult};
use flush_flush::naive::NaiveFlushAndFlush; use flush_flush::naive::NaiveFlushAndFlush;
use flush_flush::{FFPrimitives, FlushAndFlush, SingleFlushAndFlush}; use flush_flush::{FFPrimitives, FlushAndFlush, SingleFlushAndFlush};
use flush_reload::naive::{NaiveFRPrimitives, NaiveFlushAndReload}; use flush_reload::naive::NaiveFlushAndReload;
use flush_reload::FRPrimitives;
use nix::sched::{sched_getaffinity, CpuSet}; use nix::sched::{sched_getaffinity, CpuSet};
use nix::unistd::Pid; use nix::unistd::Pid;
@ -138,13 +139,10 @@ fn main() {
let naive_ff = run_benchmark( let naive_ff = run_benchmark(
"Naive F+F", "Naive F+F",
|i, j| { |i, j| {
let mut r = NaiveFlushAndFlush::new( let mut r = NaiveFlushAndFlush::new(Threshold {
Threshold {
bucket_index: 202, bucket_index: 202,
miss_faster_than_hit: true, miss_faster_than_hit: true,
}, });
FFPrimitives {},
);
r.set_cores(i, j); r.set_cores(i, j);
(r, i, j) (r, i, j)
}, },
@ -153,16 +151,13 @@ fn main() {
old, old,
); );
let fr = run_benchmark( let naive_fr = run_benchmark(
"F+R", "Naive F+R",
|i, j| { |i, j| {
let mut r = NaiveFlushAndReload::new( let mut r = NaiveFlushAndReload::new(Threshold {
Threshold {
bucket_index: 250, bucket_index: 250,
miss_faster_than_hit: false, miss_faster_than_hit: false,
}, });
NaiveFRPrimitives {},
);
r.set_cores(i, j); r.set_cores(i, j);
(r, i, j) (r, i, j)
}, },
@ -174,7 +169,25 @@ fn main() {
let ff = run_benchmark( let ff = run_benchmark(
"Better F+F", "Better F+F",
|i, j| { |i, j| {
let (mut r, i, j) = match FlushAndFlush::new_any_two_core(true, FFPrimitives {}) { let (mut r, i, j) = match FlushAndFlush::new_any_two_core(true) {
Ok((channel, _old, main_core, helper_core)) => {
(channel, main_core, helper_core)
}
Err(e) => {
panic!("{:?}", e);
}
};
(r, i, j)
},
1,
num_pages,
old,
);
let fr = run_benchmark(
"Better F+R",
|i, j| {
let (mut r, i, j) = match FlushAndFlush::new_any_two_core(true) {
Ok((channel, _old, main_core, helper_core)) => { Ok((channel, _old, main_core, helper_core)) => {
(channel, main_core, helper_core) (channel, main_core, helper_core)
} }
@ -190,53 +203,3 @@ fn main() {
); );
} }
} }
/*
fn main() {
for num_pages in 1..=32 {
/*println!("Benchmarking F+F");
for _ in 0..16 {
// TODO Use the best possible ASV, not best possible AV
let (channel, old, receiver, sender) = match SingleFlushAndFlush::new_any_two_core(true) {
Err(e) => {
panic!("{:?}", e);
}
Ok(r) => r,
};
let r = benchmark_channel(channel, NUM_PAGES, NUM_BYTES);
println!("{:?}", r);
println!("C: {}, T: {}", r.capacity(), r.true_capacity());
}*/
let naive_ff = run_benchmark(
"Naive F+F",
|| NaiveFlushAndFlush::from_threshold(202),
NUM_ITER << 4,
num_pages,
);
let better_ff = run_benchmark(
"Better F+F",
|| {
match FlushAndFlush::new_any_two_core(true) {
Err(e) => {
panic!("{:?}", e);
}
Ok(r) => r,
}
.0
},
NUM_ITER,
num_pages,
);
let fr = run_benchmark(
"F+R",
|| NaiveFlushAndReload::from_threshold(230),
NUM_ITER,
num_pages,
);
}
}
*/

View File

@ -10,13 +10,14 @@ use basic_timing_cache_channel::{
use cache_side_channel::MultipleAddrCacheSideChannel; use cache_side_channel::MultipleAddrCacheSideChannel;
use cache_utils::calibration::only_flush; use cache_utils::calibration::only_flush;
#[derive(Debug)] #[derive(Debug, Default)]
pub struct FFPrimitives {} pub struct FFPrimitives {}
impl TimingChannelPrimitives for FFPrimitives { impl TimingChannelPrimitives for FFPrimitives {
unsafe fn attack(&self, addr: *const u8) -> u64 { unsafe fn attack(&self, addr: *const u8) -> u64 {
unsafe { only_flush(addr) } unsafe { only_flush(addr) }
} }
const NEED_RESET: bool = false;
} }
pub type FlushAndFlush = TopologyAwareTimingChannel<FFPrimitives>; pub type FlushAndFlush = TopologyAwareTimingChannel<FFPrimitives>;

View File

@ -3,6 +3,29 @@
pub mod naive; pub mod naive;
use basic_timing_cache_channel::{
SingleChannel, TimingChannelPrimitives, TopologyAwareTimingChannel,
};
use cache_side_channel::MultipleAddrCacheSideChannel;
use cache_utils::calibration::only_reload;
#[derive(Debug, Default)]
pub struct FRPrimitives {}
impl TimingChannelPrimitives for FRPrimitives {
unsafe fn attack(&self, addr: *const u8) -> u64 {
unsafe { only_reload(addr) }
}
const NEED_RESET: bool = true;
}
pub type FlushAndReload = TopologyAwareTimingChannel<FRPrimitives>;
pub type FRHandle = <FlushAndReload as MultipleAddrCacheSideChannel>::Handle;
pub type SingleFlushAndReload = SingleChannel<FlushAndReload>;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#[test] #[test]

View File

@ -1,15 +1,4 @@
use crate::FRPrimitives;
use basic_timing_cache_channel::naive::NaiveTimingChannel; use basic_timing_cache_channel::naive::NaiveTimingChannel;
use basic_timing_cache_channel::TimingChannelPrimitives;
use cache_utils::calibration::only_reload; pub type NaiveFlushAndReload = NaiveTimingChannel<FRPrimitives>;
#[derive(Debug)]
pub struct NaiveFRPrimitives {}
impl TimingChannelPrimitives for NaiveFRPrimitives {
unsafe fn attack(&self, addr: *const u8) -> u64 {
unsafe { only_reload(addr) }
}
}
pub type NaiveFlushAndReload = NaiveTimingChannel<NaiveFRPrimitives>;

View File

@ -10,6 +10,7 @@ edition = "2018"
cache_utils = { path = "../cache_utils" } cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" } cache_side_channel = { path = "../cache_side_channel" }
flush_flush = { path = "../flush_flush" } flush_flush = { path = "../flush_flush" }
flush_reload = { path = "../flush_reload" }
basic_timing_cache_channel = { path = "../basic_timing_cache_channel" } basic_timing_cache_channel = { path = "../basic_timing_cache_channel" }
nix = "0.20.0" nix = "0.20.0"
rand = "0.8.3" rand = "0.8.3"

7
prefetcher_reverse/run-msr.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
PREFETCH_MSR=$1
sudo wrmsr -a 0x1a4 $PREFETCH_MSR
sudo rdmsr -a 0x1a4
cargo run --release > with-${PREFETCH_MSR}-prefetcher.log

View File

@ -0,0 +1,281 @@
use crate::Probe::{Flush, Load};
use basic_timing_cache_channel::{TopologyAwareError, TopologyAwareTimingChannel};
use cache_side_channel::CacheStatus::Hit;
use cache_side_channel::{
set_affinity, CacheStatus, CoreSpec, MultipleAddrCacheSideChannel, SingleAddrCacheSideChannel,
};
use cache_utils::calibration::PAGE_LEN;
use cache_utils::mmap::MMappedMemory;
use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush};
use flush_reload::{FRHandle, FRPrimitives, FlushAndReload};
use nix::sys::stat::stat;
use rand::seq::SliceRandom;
use std::iter::{Cycle, Peekable};
use std::ops::Range;
// NB these may need to be changed / dynamically measured.
pub const CACHE_LINE_LEN: usize = 64;
pub const PAGE_CACHELINE_LEN: usize = PAGE_LEN / CACHE_LINE_LEN;
pub struct Prober {
pages: Vec<MMappedMemory<u8>>,
ff_handles: Vec<Vec<FFHandle>>,
fr_handles: Vec<Vec<FRHandle>>,
page_indexes: Peekable<Cycle<Range<usize>>>,
ff_channel: FlushAndFlush,
fr_channel: FlushAndReload,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Probe {
Load(usize),
Flush(usize),
FullFlush,
}
pub struct ProbePattern {
pub pattern: Vec<usize>,
pub probe: Probe,
}
enum ProberError {
NoMem(nix::Error),
TopologyError(TopologyAwareError),
Nix(nix::Error),
}
/**
Result of running a probe pattern num_iteration times,
*/
pub enum ProbeResult {
Load(u32),
Flush(u32),
FullFlush(Vec<u32>),
}
pub struct ProbePatternResult {
pub num_iteration: u32,
pub pattern_result: Vec<u32>,
pub probe_result: ProbeResult,
}
struct DPRItem {
pattern_result: Vec<u32>,
probe_result: u32,
}
struct DualProbeResult {
probe_offset: usize,
load: DPRItem,
flush: DPRItem,
}
pub struct FullPageDualProbeResults {
num_iteration: u32,
results: Vec<DualProbeResult>,
}
struct SingleProbeResult {
probe_offset: usize,
pattern_result: Vec<u32>,
probe_result: u32,
}
pub struct FullPageSingleProbeResult {
probe_type: Probe,
num_iteration: u32,
results: Vec<SingleProbeResult>,
}
// Helper function
/**
This function is a helper that determine what is the maximum stride for a pattern of len accesses
starting at a given offset, both forward and backward.
Special case for length 0.
*/
fn max_stride(offset: usize, len: usize) -> (isize, isize) {
if len == 0 {
(0, 0)
} else {
let min = -((offset / (len * CACHE_LINE_LEN)) as isize);
let max = ((PAGE_LEN - offset) / (len * CACHE_LINE_LEN)) as isize;
(min, max)
}
}
impl Prober {
fn new(num_pages: usize) -> Result<Prober, ProberError> {
let mut vec = Vec::new();
let mut handles = Vec::new();
let (mut ff_channel, cpuset, core) = match FlushAndFlush::new_any_single_core() {
Ok(res) => res,
Err(err) => {
return Err(ProberError::TopologyError(err));
}
};
let old_affinity = match set_affinity(&ff_channel.main_core()) {
Ok(old) => old,
Err(nixerr) => return Err(ProberError::Nix(nixerr)),
};
let mut fr_channel = match FlushAndReload::new(core, core) {
Ok(res) => res,
Err(err) => {
return Err(ProberError::TopologyError(err));
}
};
for i in 0..num_pages {
let mut p = match MMappedMemory::<u8>::try_new(PAGE_LEN, false) {
Ok(p) => p,
Err(e) => {
return Err(ProberError::NoMem(e));
}
};
for j in 0..PAGE_LEN {
p[j] = (i * PAGE_CACHELINE_LEN + j) as u8;
}
let page_addresses =
((0..PAGE_LEN).step_by(CACHE_LINE_LEN)).map(|offset| &p[offset] as *const u8);
let ff_page_handles = unsafe { ff_channel.calibrate(page_addresses.clone()) }.unwrap();
let fr_page_handles = unsafe { fr_channel.calibrate(page_addresses) }.unwrap();
vec.push(p);
handles.push((ff_page_handles, fr_page_handles));
}
let mut page_indexes = (0..(handles.len())).cycle().peekable();
handles.shuffle(&mut rand::thread_rng());
let mut ff_handles = Vec::new();
let mut fr_handles = Vec::new();
for (ff_handle, fr_handle) in handles {
ff_handles.push(ff_handle);
fr_handles.push(fr_handle);
}
Ok(Prober {
pages: vec,
ff_handles,
fr_handles,
page_indexes,
ff_channel,
fr_channel,
})
}
/*
fn probe(&mut self, probe_type: Probe, offset: usize) -> CacheStatus {
let page_index = self.page_indexes.peek().unwrap();
match probe_type {
Probe::Load => {
let h = &mut self.handles[*page_index][offset].fr;
unsafe { self.fr_channel.test_single(h, false) }.unwrap()
}
Probe::Flush => {
let h = &mut self.handles[*page_index][offset].ff;
unsafe { self.ff_channel.test_single(h, false) }.unwrap()
}
}
}
*/
fn probe_pattern_once(
&mut self,
pattern: &ProbePattern,
result: Option<&mut ProbePatternResult>,
) {
enum ProbeOutput {
Single(CacheStatus),
Full(Vec<(*const u8, CacheStatus)>),
}
self.page_indexes.next();
let page_index = *self.page_indexes.peek().unwrap();
let mut ff_handles = self.ff_handles[page_index].iter_mut().collect();
unsafe { self.ff_channel.prepare(&mut ff_handles) };
let mut pattern_res = vec![CacheStatus::Miss; pattern.pattern.len()];
for (i, offset) in pattern.pattern.iter().enumerate() {
let h = &mut self.fr_handles[page_index][*offset];
pattern_res[i] = unsafe { self.fr_channel.test_single(h, false) }.unwrap()
}
let mut probe_out = match pattern.probe {
Load(offset) => {
let h = &mut self.fr_handles[page_index][offset];
ProbeOutput::Single(unsafe { self.fr_channel.test_single(h, false) }.unwrap())
}
Flush(offset) => {
let h = &mut self.ff_handles[page_index][offset];
ProbeOutput::Single(unsafe { self.ff_channel.test_single(h, false) }.unwrap())
}
Probe::FullFlush => {
ProbeOutput::Full(unsafe { self.ff_channel.test(&mut ff_handles, true).unwrap() })
}
};
if let Some(result_ref) = result {
result_ref.num_iteration += 1;
match result_ref.probe_result {
ProbeResult::Load(ref mut r) | ProbeResult::Flush(ref mut r) => {
if let ProbeOutput::Single(status) = probe_out {
if status == Hit {
*r += 1;
}
} else {
panic!()
}
}
ProbeResult::FullFlush(ref mut v) => {
if let ProbeOutput::Full(vstatus) = probe_out {
for (i, status) in vstatus.iter().enumerate() {
if status.1 == Hit {
v[i] += 1;
}
}
} else {
panic!()
}
}
}
for (i, res) in pattern_res.into_iter().enumerate() {
if res == Hit {
result_ref.pattern_result[i] += 1
}
}
}
}
pub fn probe_pattern(
&mut self,
pattern: &ProbePattern,
num_iteration: u32,
warmup: u32,
) -> ProbePatternResult {
let mut result = ProbePatternResult {
num_iteration: 0,
pattern_result: vec![0; pattern.pattern.len()],
probe_result: match pattern.probe {
Load(_) => ProbeResult::Load(0),
Flush(_) => ProbeResult::Flush(0),
Probe::FullFlush => ProbeResult::FullFlush(vec![0; PAGE_CACHELINE_LEN]),
},
};
for _ in 0..warmup {
self.probe_pattern_once(pattern, None);
}
for _ in 0..num_iteration {
self.probe_pattern_once(pattern, Some(&mut result));
}
result
}
}

View File

@ -11,27 +11,14 @@ use cache_utils::mmap;
use cache_utils::mmap::MMappedMemory; use cache_utils::mmap::MMappedMemory;
use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush}; use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush};
use nix::Error; use nix::Error;
use prefetcher_reverse::{Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN};
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use std::iter::Cycle; use std::iter::Cycle;
use std::ops::Range; use std::ops::Range;
pub const CACHE_LINE_LEN: usize = 64;
pub const PAGE_CACHELINE_LEN: usize = PAGE_LEN / CACHE_LINE_LEN;
pub const NUM_ITERATION: usize = 1 << 10; pub const NUM_ITERATION: usize = 1 << 10;
pub const NUM_PAGES: usize = 256; pub const NUM_PAGES: usize = 256;
fn max_stride(offset: usize, len: usize) -> (isize, isize) {
if len == 0 {
(1, 1)
} else {
let min = -((offset / (len * CACHE_LINE_LEN)) as isize);
let max = ((PAGE_LEN - offset) / (len * CACHE_LINE_LEN)) as isize;
(min, max)
}
}
// TODO negative stride // TODO negative stride
fn generate_pattern(offset: usize, len: usize, stride: isize) -> Option<Vec<usize>> { fn generate_pattern(offset: usize, len: usize, stride: isize) -> Option<Vec<usize>> {
let end = (offset as isize + stride * len as isize) * CACHE_LINE_LEN as isize; let end = (offset as isize + stride * len as isize) * CACHE_LINE_LEN as isize;
@ -57,7 +44,7 @@ fn execute_pattern(
unsafe { maccess(pointer) }; unsafe { maccess(pointer) };
} }
let mut measures = unsafe { channel.test(page_handles) }; let mut measures = unsafe { channel.test(page_handles, true) };
let mut res = vec![false; PAGE_CACHELINE_LEN]; let mut res = vec![false; PAGE_CACHELINE_LEN];
@ -78,140 +65,15 @@ fn execute_pattern_probe1(
unsafe { maccess(pointer) }; unsafe { maccess(pointer) };
} }
let mut measure = unsafe { channel.test_single(&mut page_handles[probe_offset]) }; let mut measure = unsafe { channel.test_single(&mut page_handles[probe_offset], true) };
measure.unwrap() == Hit measure.unwrap() == Hit
} }
enum ProberError {
NoMem(Error),
TopologyError(TopologyAwareError),
Nix(nix::Error),
}
struct Prober {
pages: Vec<MMappedMemory<u8>>,
handles: Vec<Vec<FFHandle>>,
page_indexes: Cycle<Range<usize>>,
channel: FlushAndFlush,
}
struct ProbeResult {
probe_all_initial: [u32; PAGE_CACHELINE_LEN],
probe_1: [u32; PAGE_CACHELINE_LEN],
probe_all_final: [u32; PAGE_CACHELINE_LEN],
}
impl Prober {
fn new(num_pages: usize) -> Result<Prober, ProberError> {
let mut vec = Vec::new();
let mut handles = Vec::new();
let (mut channel, cpuset, core) = match FlushAndFlush::new_any_single_core(FFPrimitives {})
{
Ok(res) => res,
Err(err) => {
return Err(ProberError::TopologyError(err));
}
};
let old_affinity = match set_affinity(&channel.main_core()) {
Ok(old) => old,
Err(nixerr) => return Err(ProberError::Nix(nixerr)),
}; // FIXME error handling
for i in 0..NUM_PAGES {
let mut p = match MMappedMemory::<u8>::try_new(PAGE_LEN, false) {
Ok(p) => p,
Err(e) => {
return Err(ProberError::NoMem(e));
}
};
for j in 0..PAGE_LEN {
p[j] = (i * PAGE_CACHELINE_LEN + j) as u8;
}
let page_addresses =
((0..PAGE_LEN).step_by(CACHE_LINE_LEN)).map(|offset| &p[offset] as *const u8);
let page_handles = unsafe { channel.calibrate(page_addresses) }.unwrap();
vec.push(p);
handles.push(page_handles);
}
let mut page_indexes = (0..(handles.len())).cycle();
handles.shuffle(&mut rand::thread_rng());
let mut handles_mutref = Vec::new();
for page in handles.iter_mut() {
handles_mutref.push(
page.iter_mut()
.collect::<Vec<&mut <FlushAndFlush as MultipleAddrCacheSideChannel>::Handle>>(),
);
}
Ok(Prober {
pages: vec,
handles,
page_indexes,
channel,
})
}
fn probe_pattern(&mut self, pattern: Vec<usize>) -> ProbeResult {
let mut handles_mutref = Vec::new();
for page in self.handles.iter_mut() {
handles_mutref.push(
page.iter_mut()
.collect::<Vec<&mut <FlushAndFlush as MultipleAddrCacheSideChannel>::Handle>>(),
);
}
let mut probe_all_result_first = [0; PAGE_CACHELINE_LEN];
for _ in 0..NUM_ITERATION {
let page_index = self.page_indexes.next().unwrap();
unsafe { self.channel.prepare(&mut handles_mutref[page_index]) };
let res = execute_pattern(&mut self.channel, &mut handles_mutref[page_index], &pattern);
for j in 0..PAGE_CACHELINE_LEN {
if res[j] {
probe_all_result_first[j] += 1;
}
}
}
let mut probe1_result = [0; PAGE_CACHELINE_LEN];
for i in 0..PAGE_CACHELINE_LEN {
for _ in 0..NUM_ITERATION {
let page_index = self.page_indexes.next().unwrap();
unsafe { self.channel.prepare(&mut handles_mutref[page_index]) };
let res = execute_pattern_probe1(
&mut self.channel,
&mut handles_mutref[page_index],
&pattern,
i,
);
if res {
probe1_result[i] += 1;
}
}
}
let mut probe_all_result = [0; PAGE_CACHELINE_LEN];
for _ in 0..NUM_ITERATION {
let page_index = self.page_indexes.next().unwrap();
unsafe { self.channel.prepare(&mut handles_mutref[page_index]) };
let res = execute_pattern(&mut self.channel, &mut handles_mutref[page_index], &pattern);
for j in 0..PAGE_CACHELINE_LEN {
if res[j] {
probe_all_result[j] += 1;
}
}
}
ProbeResult {
probe_all_initial: probe_all_result_first,
probe_1: probe1_result,
probe_all_final: probe_all_result,
}
}
}
fn main() { fn main() {
let mut vec = Vec::new(); let mut vec = Vec::new();
let mut handles = Vec::new(); let mut handles = Vec::new();
let (mut channel, cpuset, core) = FlushAndFlush::new_any_single_core(FFPrimitives {}).unwrap(); let (mut channel, cpuset, core) = FlushAndFlush::new_any_single_core().unwrap();
let old_affinity = set_affinity(&channel.main_core()); let old_affinity = set_affinity(&channel.main_core());
for i in 0..NUM_PAGES { for i in 0..NUM_PAGES {
let mut p = MMappedMemory::<u8>::new(PAGE_LEN, false); let mut p = MMappedMemory::<u8>::new(PAGE_LEN, false);