New bersion of the side channels, with common implementation for F+R and F+F

This commit is contained in:
Guillume DIDIER 2021-01-26 10:03:50 +01:00
parent cd5aa57390
commit 7a5cae722c
17 changed files with 1076 additions and 1038 deletions

8
Cargo.lock generated
View File

@ -45,6 +45,12 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "basic_timing_cache_channel"
version = "0.1.0"
dependencies = [
"cache_side_channel",
"cache_utils",
"covert_channels_evaluation",
"nix",
]
[[package]]
name = "bit_field"
@ -163,6 +169,7 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
name = "flush_flush"
version = "0.1.0"
dependencies = [
"basic_timing_cache_channel",
"cache_side_channel",
"cache_utils",
"covert_channels_evaluation",
@ -173,6 +180,7 @@ dependencies = [
name = "flush_reload"
version = "0.1.0"
dependencies = [
"basic_timing_cache_channel",
"cache_side_channel",
"cache_utils",
"covert_channels_evaluation",

View File

@ -0,0 +1,2 @@
[build]
target = "x86_64-unknown-linux-gnu"

View File

@ -7,3 +7,7 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
nix = "0.18.0"
covert_channels_evaluation = {path = "../covert_channels_evaluation"}

View File

@ -1,3 +1,6 @@
#![feature(unsafe_block_in_unsafe_fn)]
#![deny(unsafe_op_in_unsafe_fn)]
// TODO
// Common logic for the ability to calibrate along slices
@ -6,6 +9,712 @@
// Should be used by F+F and non Naive F+R
use crate::naive::NaiveTimingChannelHandle;
use cache_side_channel::SideChannelError::AddressNotReady;
use cache_side_channel::{
CacheStatus, ChannelFatalError, ChannelHandle, CoreSpec, MultipleAddrCacheSideChannel,
SideChannelError, SingleAddrCacheSideChannel,
};
use cache_utils::calibration::{
accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, get_cache_slicing,
get_vpn, only_flush, only_reload, CalibrateOperation2T, CalibrationOptions, ErrorPrediction,
ErrorPredictions, HashMap, HistParams, HistogramCumSum, PotentialThresholds, Slice, Threshold,
ThresholdError, Verbosity, ASVP, AV, CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER,
PAGE_LEN, SP, VPN,
};
use cache_utils::complex_addressing::CacheSlicing;
use cache_utils::mmap::MMappedMemory;
use cache_utils::{find_core_per_socket, flush, maccess, noop};
use covert_channels_evaluation::{BitIterator, CovertChannel};
use nix::sched::{sched_getaffinity, CpuSet};
use nix::unistd::Pid;
use std::collections::HashSet;
use std::fmt;
use std::fmt::{Debug, Formatter};
use std::ptr::slice_from_raw_parts;
pub mod naive;
pub trait TimingChannelPrimitives: Debug + Send + Sync {
unsafe fn attack(&self, addr: *const u8) -> u64;
}
pub struct TopologyAwareTimingChannelHandle {
threshold: Threshold,
vpn: VPN,
addr: *const u8,
ready: bool,
calibration_epoch: usize,
}
pub struct CovertChannelHandle<T: MultipleAddrCacheSideChannel>(T::Handle);
impl ChannelHandle for TopologyAwareTimingChannelHandle {
fn to_const_u8_pointer(&self) -> *const u8 {
self.addr
}
}
#[derive(Debug)]
pub enum TopologyAwareError {
NoSlicing,
Nix(nix::Error),
NeedRecalibration,
}
pub struct TopologyAwareTimingChannel<T: TimingChannelPrimitives> {
// TODO
slicing: CacheSlicing, // TODO : include fallback option (with per address thresholds ?)
main_core: usize, // aka attacker
helper_core: usize, // aka victim
t: T,
thresholds: HashMap<SP, ThresholdError>,
addresses: HashSet<*const u8>,
preferred_address: HashMap<VPN, *const u8>,
calibration_epoch: usize,
}
unsafe impl<T: TimingChannelPrimitives + Send> Send for TopologyAwareTimingChannel<T> {}
unsafe impl<T: TimingChannelPrimitives + Sync> Sync for TopologyAwareTimingChannel<T> {}
impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
pub fn new(main_core: usize, helper_core: usize, t: T) -> Result<Self, TopologyAwareError> {
if let Some(slicing) = get_cache_slicing(find_core_per_socket()) {
if !slicing.can_hash() {
return Err(TopologyAwareError::NoSlicing);
}
let ret = Self {
thresholds: Default::default(),
addresses: Default::default(),
slicing,
main_core,
helper_core,
preferred_address: Default::default(),
t,
calibration_epoch: 0,
};
Ok(ret)
} else {
Err(TopologyAwareError::NoSlicing)
}
}
// Takes a buffer / list of addresses or pages
// Takes a list of core pairs
// Run optimized calibration and processes results
fn calibration_for_core_pairs<'a>(
t: &T,
core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
pages: impl Iterator<Item = &'a [u8]>,
) -> Result<HashMap<AV, (ErrorPrediction, HashMap<SP, ThresholdError>)>, TopologyAwareError>
{
let core_per_socket = find_core_per_socket();
let operations = [
CalibrateOperation2T {
prepare: maccess::<u8>,
op: T::attack,
name: "hit",
display_name: "hit",
t: &t,
},
CalibrateOperation2T {
prepare: noop::<u8>,
op: T::attack,
name: "miss",
display_name: "miss",
t: &t,
},
];
const HIT_INDEX: usize = 0;
const MISS_INDEX: usize = 1;
let mut calibrate_results2t_vec = Vec::new();
let slicing = match get_cache_slicing(core_per_socket) {
Some(s) => s,
None => {
return Err(TopologyAwareError::NoSlicing);
}
};
let h = |addr: usize| slicing.hash(addr).unwrap();
for page in pages {
// FIXME Cache line size is magic
let mut r = unsafe {
calibrate_fixed_freq_2_thread(
&page[0] as *const u8,
64,
page.len() as isize,
&mut core_pairs.clone(),
&operations,
CalibrationOptions {
hist_params: HistParams {
bucket_number: CFLUSH_BUCKET_NUMBER,
bucket_size: CFLUSH_BUCKET_SIZE,
iterations: CFLUSH_NUM_ITER,
},
verbosity: Verbosity::NoOutput,
optimised_addresses: true,
},
core_per_socket,
)
};
calibrate_results2t_vec.append(&mut r);
}
let analysis: HashMap<ASVP, ThresholdError> = calibration_result_to_ASVP(
calibrate_results2t_vec,
|cal_1t_res| {
let e = ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
cal_1t_res, HIT_INDEX, MISS_INDEX,
));
PotentialThresholds::minimizing_total_error(e)
.median()
.unwrap()
},
&h,
)
.map_err(|e| TopologyAwareError::Nix(e))?;
let asvp_best_av_errors: HashMap<AV, (ErrorPrediction, HashMap<SP, ThresholdError>)> =
accumulate(
analysis,
|asvp: ASVP| AV {
attacker: asvp.attacker,
victim: asvp.victim,
},
|| (ErrorPrediction::default(), HashMap::new()),
|acc: &mut (ErrorPrediction, HashMap<SP, ThresholdError>),
threshold_error,
asvp: ASVP,
av| {
assert_eq!(av.attacker, asvp.attacker);
assert_eq!(av.victim, asvp.victim);
let sp = SP {
slice: asvp.slice,
page: asvp.page,
};
acc.0 += threshold_error.error;
acc.1.insert(sp, threshold_error);
},
);
Ok(asvp_best_av_errors)
}
fn new_with_core_pairs(
core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
t: T,
) -> Result<(Self, usize, usize), TopologyAwareError> {
let m = MMappedMemory::new(PAGE_LEN, false);
let array: &[u8] = m.slice();
let mut res = Self::calibration_for_core_pairs(&t, core_pairs, vec![array].into_iter())?;
let mut best_error_rate = 1.0;
let mut best_av = Default::default();
// Select the proper core
for (av, (global_error_pred, thresholds)) in res.iter() {
if global_error_pred.error_rate() < best_error_rate {
best_av = *av;
best_error_rate = global_error_pred.error_rate();
}
}
Self::new(best_av.attacker, best_av.victim, t)
.map(|this| (this, best_av.attacker, best_av.victim))
// Set no threshold as calibrated on local array that will get dropped.
}
pub fn new_any_single_core(t: T) -> Result<(Self, CpuSet, usize), TopologyAwareError> {
// Generate core iterator
let mut core_pairs: Vec<(usize, usize)> = Vec::new();
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
for i in 0..CpuSet::count() {
if old.is_set(i).unwrap() {
core_pairs.push((i, i));
}
}
// Generate all single core pairs
// Call out to private constructor that takes a core pair list, determines best and makes the choice.
// The private constructor will set the correct affinity for main (attacker thread)
Self::new_with_core_pairs(core_pairs.into_iter(), t).map(|(channel, attacker, victim)| {
assert_eq!(attacker, victim);
(channel, old, attacker)
})
}
pub fn new_any_two_core(
distinct: bool,
t: T,
) -> Result<(Self, CpuSet, usize, usize), TopologyAwareError> {
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
let mut core_pairs: Vec<(usize, usize)> = Vec::new();
for i in 0..CpuSet::count() {
if old.is_set(i).unwrap() {
for j in 0..CpuSet::count() {
if old.is_set(j).unwrap() {
if i != j || !distinct {
core_pairs.push((i, j));
}
}
}
}
}
Self::new_with_core_pairs(core_pairs.into_iter(), t).map(|(channel, attacker, victim)| {
if distinct {
assert_ne!(attacker, victim);
}
(channel, old, attacker, victim)
})
}
fn get_slice(&self, addr: *const u8) -> Slice {
// This will not work well if slicing is not known FIXME
self.slicing.hash(addr as usize).unwrap()
}
pub fn set_cores(&mut self, main: usize, helper: usize) -> Result<(), TopologyAwareError> {
let old_main = self.main_core;
let old_helper = self.helper_core;
self.main_core = main;
self.helper_core = helper;
let pages: Vec<VPN> = self
.thresholds
.keys()
.map(|sp: &SP| sp.page)
//.copied()
.collect();
match self.recalibrate(pages) {
Ok(()) => Ok(()),
Err(e) => {
self.main_core = old_main;
self.helper_core = old_helper;
Err(e)
}
}
}
fn recalibrate(
&mut self,
pages: impl IntoIterator<Item = VPN>,
) -> Result<(), TopologyAwareError> {
// unset readiness status.
// Call calibration with core pairs with a single core pair
// Use results \o/ (or error out)
self.addresses.clear();
// Fixme refactor in depth core pairs to make explicit main vs helper.
let core_pairs = vec![(self.main_core, self.helper_core)];
let pages: HashSet<&[u8]> = self
.thresholds
.keys()
.map(|sp: &SP| unsafe { &*slice_from_raw_parts(sp.page as *const u8, PAGE_LEN) })
.collect();
let mut res =
Self::calibration_for_core_pairs(&self.t, core_pairs.into_iter(), pages.into_iter())?;
assert_eq!(res.keys().count(), 1);
self.thresholds = res
.remove(&AV {
attacker: self.main_core,
victim: self.helper_core,
})
.unwrap()
.1;
self.calibration_epoch += 1;
Ok(())
}
unsafe fn test_one_impl(
&self,
handle: &mut TopologyAwareTimingChannelHandle,
) -> Result<CacheStatus, SideChannelError> {
if handle.calibration_epoch != self.calibration_epoch {
return Err(SideChannelError::NeedRecalibration);
}
let time = unsafe { self.t.attack(handle.addr) };
if handle.threshold.is_hit(time) {
Ok(CacheStatus::Hit)
} else {
Ok(CacheStatus::Miss)
}
}
unsafe fn test_impl(
&self,
addresses: &mut Vec<&mut TopologyAwareTimingChannelHandle>,
limit: u32,
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> {
let mut result = Vec::new();
let mut tmp = Vec::new();
let mut i = 0;
for addr in addresses {
let r = unsafe { self.test_one_impl(addr) };
tmp.push((addr.to_const_u8_pointer(), r));
i += 1;
if i == limit {
break;
}
}
for (addr, r) in tmp {
match r {
Ok(status) => {
result.push((addr, status));
}
Err(e) => {
return Err(e);
}
}
}
Ok(result)
}
unsafe fn prepare_one_impl(
&self,
handle: &mut TopologyAwareTimingChannelHandle,
) -> Result<(), SideChannelError> {
if handle.calibration_epoch != self.calibration_epoch {
return Err(SideChannelError::NeedRecalibration);
}
unsafe { flush(handle.addr) };
handle.ready = true;
Ok(())
}
unsafe fn prepare_impl(
&mut self,
addresses: &mut Vec<&mut TopologyAwareTimingChannelHandle>,
limit: u32,
) -> Result<(), SideChannelError> {
// Iterate on addresse prparig them, error early exit
let mut i = 0;
for handle in addresses {
match unsafe { self.prepare_one_impl(handle) } {
Ok(_) => {}
Err(e) => {
return Err(e);
}
}
i += 1;
if i == limit {
break;
}
}
Ok(())
}
}
impl<T: TimingChannelPrimitives> Debug for TopologyAwareTimingChannel<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("Topology Aware Channel")
.field("thresholds", &self.thresholds)
.field("addresses", &self.addresses)
.field("slicing", &self.slicing)
.field("main_core", &self.main_core)
.field("helper_core", &self.helper_core)
.field("preferred_addresses", &self.preferred_address)
.field("calibration_epoch", &self.calibration_epoch)
.field("primitive", &self.t)
.finish()
}
}
impl<T: TimingChannelPrimitives> CoreSpec for TopologyAwareTimingChannel<T> {
fn main_core(&self) -> CpuSet {
let mut main = CpuSet::new();
main.set(self.main_core);
main
}
fn helper_core(&self) -> CpuSet {
let mut helper = CpuSet::new();
helper.set(self.helper_core);
helper
}
}
impl<T: TimingChannelPrimitives> MultipleAddrCacheSideChannel for TopologyAwareTimingChannel<T> {
type Handle = TopologyAwareTimingChannelHandle;
const MAX_ADDR: u32 = 0;
unsafe fn test<'a>(
&mut self,
addresses: &mut Vec<&'a mut Self::Handle>,
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError>
where
Self::Handle: 'a,
{
unsafe { self.test_impl(addresses, Self::MAX_ADDR) }
}
unsafe fn prepare<'a>(
&mut self,
addresses: &mut Vec<&'a mut Self::Handle>,
) -> Result<(), SideChannelError>
where
Self::Handle: 'a,
{
unsafe { self.prepare_impl(addresses, Self::MAX_ADDR) }
}
fn victim(&mut self, operation: &dyn Fn()) {
operation(); // TODO use a different helper core ?
}
unsafe fn calibrate(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<Vec<Self::Handle>, ChannelFatalError> {
let core_pair = vec![(self.main_core, self.helper_core)];
let pages = addresses
.clone()
.into_iter()
.map(|addr: *const u8| unsafe {
&*slice_from_raw_parts(get_vpn(addr) as *const u8, PAGE_LEN)
})
.collect::<HashSet<&[u8]>>();
let mut res = match Self::calibration_for_core_pairs(
&self.t,
core_pair.into_iter(),
pages.into_iter(),
) {
Err(e) => {
return Err(ChannelFatalError::Oops);
}
Ok(r) => r,
};
assert_eq!(res.keys().count(), 1);
let t = res
.remove(&AV {
attacker: self.main_core,
victim: self.helper_core,
})
.unwrap()
.1;
for (sp, threshold) in t {
self.thresholds.insert(sp, threshold);
}
let mut result = vec![];
for addr in addresses {
let vpn = get_vpn(addr);
let slice = self.slicing.hash(addr as usize).unwrap();
let handle = TopologyAwareTimingChannelHandle {
threshold: self
.thresholds
.get(&SP { slice, page: vpn })
.unwrap()
.threshold,
vpn,
addr,
ready: false,
calibration_epoch: self.calibration_epoch,
};
result.push(handle);
}
Ok(result)
}
}
impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T> {
type Handle = CovertChannelHandle<TopologyAwareTimingChannel<T>>;
const BIT_PER_PAGE: usize = 1;
unsafe fn transmit<'a>(&self, handle: &mut Self::Handle, bits: &mut BitIterator<'a>) {
let page = handle.0.addr;
if let Some(b) = bits.next() {
if b {
unsafe { only_reload(page) };
} else {
unsafe { only_flush(page) };
}
}
}
unsafe fn receive(&self, handle: &mut Self::Handle) -> Vec<bool> {
let r = unsafe { self.test_one_impl(&mut handle.0) };
match r {
Err(e) => panic!("{:?}", e),
Ok(status) => {
let received = status == CacheStatus::Hit;
//println!("Received {} on page {:p}", received, page);
return vec![received];
}
}
}
unsafe fn ready_page(&mut self, page: *const u8) -> Result<Self::Handle, ()> {
let vpn: VPN = get_vpn(page);
// Check if the page has already been readied. If so should error out ?
if let Some(preferred) = self.preferred_address.get(&vpn) {
return Err(());
}
if self.thresholds.iter().filter(|kv| kv.0.page == vpn).count() == 0 {
// ensure calibration
let core_pair = vec![(self.main_core, self.helper_core)];
let as_slice = unsafe { &*slice_from_raw_parts(vpn as *const u8, PAGE_LEN) };
let pages = vec![as_slice];
let mut res = match Self::calibration_for_core_pairs(
&self.t,
core_pair.into_iter(),
pages.into_iter(),
) {
Err(e) => {
return Err(());
}
Ok(r) => r,
};
assert_eq!(res.keys().count(), 1);
let t = res
.remove(&AV {
attacker: self.main_core,
victim: self.helper_core,
})
.unwrap()
.1;
for (sp, threshold) in t {
self.thresholds.insert(sp, threshold);
}
}
let mut best_error_rate = 1.0;
let mut best_slice = 0;
for (sp, threshold_error) in self.thresholds.iter().filter(|kv| kv.0.page == vpn) {
if threshold_error.error.error_rate() < best_error_rate {
best_error_rate = threshold_error.error.error_rate();
best_slice = sp.slice;
}
}
for i in 0..PAGE_LEN {
let addr = unsafe { page.offset(i as isize) };
if self.get_slice(addr) == best_slice {
self.preferred_address.insert(vpn, addr);
// Create the right handle
let mut handle = Self::Handle {
0: TopologyAwareTimingChannelHandle {
threshold: self
.thresholds
.get(&SP {
slice: best_slice,
page: vpn,
})
.unwrap()
.threshold,
vpn,
addr,
ready: false,
calibration_epoch: self.calibration_epoch,
},
};
let r = unsafe { self.prepare_one_impl(&mut handle.0) }.unwrap();
return Ok(handle);
}
}
Err(())
}
}
// Extra helper for single address per page variants.
#[derive(Debug)]
pub struct SingleChannel<T: MultipleAddrCacheSideChannel> {
inner: T,
}
impl<T: MultipleAddrCacheSideChannel> SingleChannel<T> {
pub fn new(inner: T) -> Self {
Self { inner }
}
}
impl<T: MultipleAddrCacheSideChannel> CoreSpec for SingleChannel<T> {
fn main_core(&self) -> CpuSet {
self.inner.main_core()
}
fn helper_core(&self) -> CpuSet {
self.inner.helper_core()
}
}
impl<T: MultipleAddrCacheSideChannel> SingleAddrCacheSideChannel for SingleChannel<T> {
type Handle = T::Handle;
unsafe fn test_single(
&mut self,
handle: &mut Self::Handle,
) -> Result<CacheStatus, SideChannelError> {
unsafe { self.inner.test_single(handle) }
}
unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> {
unsafe { self.inner.prepare_single(handle) }
}
fn victim_single(&mut self, operation: &dyn Fn()) {
self.inner.victim_single(operation)
}
unsafe fn calibrate_single(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<Vec<Self::Handle>, ChannelFatalError> {
unsafe { self.inner.calibrate_single(addresses) }
}
}
/*
impl<T: MultipleAddrCacheSideChannel + Sync + Send> CovertChannel for SingleChannel<T> {
type Handle = CovertChannelHandle<T>;
const BIT_PER_PAGE: usize = 1;
unsafe fn transmit<'a>(&self, handle: &mut Self::Handle, bits: &mut BitIterator<'a>) {
unimplemented!()
}
unsafe fn receive(&self, handle: &mut Self::Handle) -> Vec<bool> {
let r = unsafe { self.test_single(handle) };
match r {
Err(e) => panic!("{:?}", e),
Ok(status_vec) => {
assert_eq!(status_vec.len(), 1);
let received = status_vec[0].1 == Hit;
//println!("Received {} on page {:p}", received, page);
return vec![received];
}
}
}
unsafe fn ready_page(&mut self, page: *const u8) -> Self::Handle {
unimplemented!()
}
}
*/
#[cfg(test)]
mod tests {
#[test]

View File

@ -0,0 +1,174 @@
use crate::TimingChannelPrimitives;
use cache_side_channel::{
CacheStatus, ChannelFatalError, ChannelHandle, CoreSpec, SideChannelError,
SingleAddrCacheSideChannel,
};
use cache_utils::calibration::{get_vpn, only_flush, only_reload, HashMap, Threshold, VPN};
use cache_utils::flush;
use covert_channels_evaluation::{BitIterator, CovertChannel};
use nix::sched::{sched_getaffinity, CpuSet};
use nix::unistd::Pid;
use std::fmt::Debug;
// Parameters required : The threshold.
#[derive(Debug)]
pub struct NaiveTimingChannel<T: TimingChannelPrimitives> {
threshold: Threshold,
current: HashMap<VPN, *const u8>,
main_core: CpuSet,
helper_core: CpuSet,
channel_primitive: T,
}
pub struct NaiveTimingChannelHandle {
vpn: VPN,
addr: *const u8,
}
impl ChannelHandle for NaiveTimingChannelHandle {
fn to_const_u8_pointer(&self) -> *const u8 {
self.addr
}
}
unsafe impl<T: TimingChannelPrimitives + Send> Send for NaiveTimingChannel<T> {}
unsafe impl<T: TimingChannelPrimitives + Sync> Sync for NaiveTimingChannel<T> {}
impl<T: TimingChannelPrimitives> NaiveTimingChannel<T> {
pub fn new(threshold: Threshold, t: T) -> Self {
Self {
threshold,
current: Default::default(),
main_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
helper_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
channel_primitive: t,
}
}
pub fn set_cores(&mut self, main_core: usize, helper_core: usize) {
self.main_core = CpuSet::new();
self.main_core.set(main_core).unwrap();
self.helper_core = CpuSet::new();
self.helper_core.set(helper_core).unwrap();
}
pub fn unready_page(
&mut self,
handle: NaiveTimingChannelHandle,
) -> Result<*const u8, ChannelFatalError> {
if let Some(addr) = self.current.remove(&handle.vpn) {
Ok(addr)
} else {
Err(ChannelFatalError::Oops)
}
}
unsafe fn test_impl(
&self,
handle: &mut NaiveTimingChannelHandle,
) -> Result<CacheStatus, SideChannelError> {
// This should be handled in prepare / unprepare
let t = unsafe { self.channel_primitive.attack(handle.addr) };
if self.threshold.is_hit(t) {
Ok(CacheStatus::Hit)
} else {
Ok(CacheStatus::Miss)
}
}
unsafe fn calibrate_impl(
&mut self,
addr: *const u8,
) -> Result<NaiveTimingChannelHandle, ChannelFatalError> {
let vpn = get_vpn(addr);
if self.current.get(&vpn).is_some() {
return Err(ChannelFatalError::Oops);
} else {
self.current.insert(vpn, addr);
Ok(NaiveTimingChannelHandle { vpn, addr })
}
}
}
impl<T: TimingChannelPrimitives> CoreSpec for NaiveTimingChannel<T> {
fn main_core(&self) -> CpuSet {
self.main_core
}
fn helper_core(&self) -> CpuSet {
self.helper_core
}
}
impl<T: TimingChannelPrimitives + Send + Sync> CovertChannel for NaiveTimingChannel<T> {
type Handle = NaiveTimingChannelHandle;
const BIT_PER_PAGE: usize = 1;
unsafe fn transmit<'a>(&self, handle: &mut Self::Handle, bits: &mut BitIterator<'a>) {
if let Some(b) = bits.next() {
if b {
unsafe { only_reload(handle.addr) };
} else {
unsafe { only_flush(handle.addr) };
}
}
}
unsafe fn receive(&self, handle: &mut Self::Handle) -> Vec<bool> {
let r = unsafe { self.test_impl(handle) };
match r {
Err(e) => panic!(),
Ok(status) => match status {
CacheStatus::Hit => vec![true],
CacheStatus::Miss => vec![false],
},
}
}
unsafe fn ready_page(&mut self, page: *const u8) -> Result<Self::Handle, ()> {
unsafe { self.calibrate_impl(page) }.map_err(|_| ())
}
}
impl<T: TimingChannelPrimitives> SingleAddrCacheSideChannel for NaiveTimingChannel<T> {
type Handle = NaiveTimingChannelHandle;
unsafe fn test_single(
&mut self,
handle: &mut Self::Handle,
) -> Result<CacheStatus, SideChannelError> {
unsafe { self.test_impl(handle) }
}
unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> {
unsafe { flush(handle.addr) };
Ok(())
}
fn victim_single(&mut self, operation: &dyn Fn()) {
operation()
}
/// # Safety
///
/// addr needs to be a valid pointer
unsafe fn calibrate_single(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<Vec<Self::Handle>, ChannelFatalError> {
let mut result = vec![];
for addr in addresses {
match unsafe { self.calibrate_impl(addr) } {
Ok(handle) => result.push(handle),
Err(e) => {
return Err(e);
}
}
}
Ok(result)
}
}
// Include a helper code to get global threshold model ?
// TODO

View File

@ -27,6 +27,10 @@ pub enum SideChannelError {
AddressNotCalibrated(*const u8),
}
pub trait ChannelHandle {
fn to_const_u8_pointer(&self) -> *const u8;
}
pub trait CoreSpec {
fn main_core(&self) -> CpuSet;
fn helper_core(&self) -> CpuSet;
@ -44,15 +48,19 @@ pub fn set_affinity(cpu_set: &CpuSet) -> CpuSet {
}
pub trait SingleAddrCacheSideChannel: CoreSpec + Debug {
type Handle: ChannelHandle;
//type SingleChannelFatalError: Debug;
/// # Safety
///
/// addr must be a valid pointer to read.
unsafe fn test_single(&mut self, addr: *const u8) -> Result<CacheStatus, SideChannelError>;
unsafe fn test_single(
&mut self,
handle: &mut Self::Handle,
) -> Result<CacheStatus, SideChannelError>;
/// # Safety
///
/// addr must be a valid pointer to read.
unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError>;
unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError>;
fn victim_single(&mut self, operation: &dyn Fn());
/// # Safety
///
@ -60,26 +68,31 @@ pub trait SingleAddrCacheSideChannel: CoreSpec + Debug {
unsafe fn calibrate_single(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError>;
) -> Result<Vec<Self::Handle>, ChannelFatalError>;
}
pub trait MultipleAddrCacheSideChannel: CoreSpec + Debug {
type Handle: ChannelHandle;
const MAX_ADDR: u32;
/// # Safety
///
/// addresses must contain only valid pointers to read.
unsafe fn test<'a, 'b, 'c>(
&'a mut self,
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError>;
addresses: &'b mut Vec<&'c mut Self::Handle>,
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError>
where
Self::Handle: 'c;
/// # Safety
///
/// addresses must contain only valid pointers to read.
unsafe fn prepare<'a, 'b, 'c>(
&'a mut self,
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
) -> Result<(), SideChannelError>;
addresses: &'b mut Vec<&'c mut Self::Handle>,
) -> Result<(), SideChannelError>
where
Self::Handle: 'c;
fn victim(&mut self, operation: &dyn Fn());
/// # Safety
@ -88,18 +101,23 @@ pub trait MultipleAddrCacheSideChannel: CoreSpec + Debug {
unsafe fn calibrate(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError>;
) -> Result<Vec<Self::Handle>, ChannelFatalError>;
}
impl<T: MultipleAddrCacheSideChannel> SingleAddrCacheSideChannel for T {
unsafe fn test_single(&mut self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
let addresses = vec![addr];
unsafe { self.test(&mut addresses.iter()) }.map(|v| v[0].1)
type Handle = <Self as MultipleAddrCacheSideChannel>::Handle;
unsafe fn test_single(
&mut self,
handle: &mut Self::Handle,
) -> Result<CacheStatus, SideChannelError> {
let mut handles = vec![handle];
unsafe { self.test(&mut handles) }.map(|v| v[0].1)
}
unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
let addresses = vec![addr];
unsafe { self.prepare(&mut addresses.iter()) }
unsafe fn prepare_single(&mut self, handle: &mut Self::Handle) -> Result<(), SideChannelError> {
let mut handles = vec![handle];
unsafe { self.prepare(&mut handles) }
}
fn victim_single(&mut self, operation: &dyn Fn()) {
@ -109,7 +127,7 @@ impl<T: MultipleAddrCacheSideChannel> SingleAddrCacheSideChannel for T {
unsafe fn calibrate_single(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError> {
) -> Result<Vec<Self::Handle>, ChannelFatalError> {
unsafe { self.calibrate(addresses) }
}
}

View File

@ -1,6 +1,6 @@
use crate::{
CacheStatus, ChannelFatalError, CoreSpec, MultipleAddrCacheSideChannel, SideChannelError,
SingleAddrCacheSideChannel,
CacheStatus, ChannelFatalError, ChannelHandle, CoreSpec, MultipleAddrCacheSideChannel,
SideChannelError, SingleAddrCacheSideChannel,
};
use std::collections::HashMap;
@ -21,7 +21,7 @@ impl TableAttackResult {
}
}
pub trait TableCacheSideChannel: CoreSpec + Debug {
pub trait TableCacheSideChannel<Handle: ChannelHandle>: CoreSpec + Debug {
//type ChannelFatalError: Debug;
/// # Safety
///
@ -29,40 +29,45 @@ pub trait TableCacheSideChannel: CoreSpec + Debug {
unsafe fn calibrate(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError>;
) -> Result<Vec<Handle>, ChannelFatalError>;
/// # Safety
///
/// addresses must contain only valid pointers to read.
unsafe fn attack<'a, 'b, 'c>(
&'a mut self,
addresses: impl Iterator<Item = &'c *const u8> + Clone,
addresses: impl Iterator<Item = &'c mut Handle> + Clone,
victim: &'b dyn Fn(),
num_iteration: u32,
) -> Result<Vec<TableAttackResult>, ChannelFatalError>;
) -> Result<Vec<TableAttackResult>, ChannelFatalError>
where
Handle: 'c;
}
impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel for T {
impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel<T::Handle> for T {
default unsafe fn calibrate(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError> {
) -> Result<Vec<T::Handle>, ChannelFatalError> {
unsafe { self.calibrate_single(addresses) }
}
//type ChannelFatalError = T::SingleChannelFatalError;
default unsafe fn attack<'a, 'b, 'c>(
&'a mut self,
addresses: impl Iterator<Item = &'c *const u8> + Clone,
addresses: impl Iterator<Item = &'c mut T::Handle> + Clone,
victim: &'b dyn Fn(),
num_iteration: u32,
) -> Result<Vec<TableAttackResult>, ChannelFatalError> {
) -> Result<Vec<TableAttackResult>, ChannelFatalError>
where
T::Handle: 'c,
{
let mut result = Vec::new();
for addr in addresses {
let mut hit = 0;
let mut miss = 0;
for iteration in 0..100 {
match unsafe { self.prepare_single(*addr) } {
match unsafe { self.prepare_single(addr) } {
Ok(_) => {}
Err(e) => match e {
SideChannelError::NeedRecalibration => unimplemented!(),
@ -72,7 +77,7 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel for T {
},
}
self.victim_single(victim);
let r = unsafe { self.test_single(*addr) };
let r = unsafe { self.test_single(addr) };
match r {
Ok(status) => {}
Err(e) => match e {
@ -85,7 +90,7 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel for T {
}
}
for _iteration in 0..num_iteration {
match unsafe { self.prepare_single(*addr) } {
match unsafe { self.prepare_single(addr) } {
Ok(_) => {}
Err(e) => match e {
SideChannelError::NeedRecalibration => unimplemented!(),
@ -95,7 +100,7 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel for T {
},
}
self.victim_single(victim);
let r = unsafe { self.test_single(*addr) };
let r = unsafe { self.test_single(addr) };
match r {
Ok(status) => match status {
CacheStatus::Hit => {
@ -115,7 +120,7 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel for T {
}
}
result.push(TableAttackResult {
addr: *addr,
addr: addr.to_const_u8_pointer(),
hit,
miss,
});
@ -126,11 +131,11 @@ impl<T: SingleAddrCacheSideChannel> TableCacheSideChannel for T {
// TODO limit number of simultaneous tested address + randomise order ?
impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel for T {
impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel<T::Handle> for T {
unsafe fn calibrate(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError> {
) -> Result<Vec<T::Handle>, ChannelFatalError> {
unsafe { self.calibrate(addresses) }
}
//type ChannelFatalError = T::MultipleChannelFatalError;
@ -140,19 +145,22 @@ impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel for T {
/// addresses must contain only valid pointers to read.
unsafe fn attack<'a, 'b, 'c>(
&'a mut self,
mut addresses: impl Iterator<Item = &'c *const u8> + Clone,
mut addresses: impl Iterator<Item = &'c mut T::Handle> + Clone,
victim: &'b dyn Fn(),
num_iteration: u32,
) -> Result<Vec<TableAttackResult>, ChannelFatalError> {
) -> Result<Vec<TableAttackResult>, ChannelFatalError>
where
T::Handle: 'c,
{
let mut v = Vec::new();
while let Some(addr) = addresses.next() {
let mut batch = Vec::new();
batch.push(*addr);
batch.push(addr);
let mut hits: HashMap<*const u8, u32> = HashMap::new();
let mut misses: HashMap<*const u8, u32> = HashMap::new();
for i in 1..T::MAX_ADDR {
if let Some(addr) = addresses.next() {
batch.push(*addr);
batch.push(addr);
} else {
break;
}
@ -161,7 +169,7 @@ impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel for T {
// TODO Warmup
}
for i in 0..num_iteration {
match unsafe { MultipleAddrCacheSideChannel::prepare(self, &mut batch.iter()) } {
match unsafe { MultipleAddrCacheSideChannel::prepare(self, &mut batch) } {
Ok(_) => {}
Err(e) => match e {
SideChannelError::NeedRecalibration => unimplemented!(),
@ -179,7 +187,7 @@ impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel for T {
}
MultipleAddrCacheSideChannel::victim(self, victim);
let r = unsafe { MultipleAddrCacheSideChannel::test(self, &mut batch.iter()) }; // Fixme error handling
let r = unsafe { MultipleAddrCacheSideChannel::test(self, &mut batch) }; // Fixme error handling
match r {
Err(e) => match e {
SideChannelError::NeedRecalibration => {
@ -209,9 +217,9 @@ impl<T: MultipleAddrCacheSideChannel> TableCacheSideChannel for T {
for addr in batch {
v.push(TableAttackResult {
addr,
hit: *hits.get(&addr).unwrap_or(&0u32),
miss: *misses.get(&addr).unwrap_or(&0u32),
addr: addr.to_const_u8_pointer(),
hit: *hits.get(&addr.to_const_u8_pointer()).unwrap_or(&0u32),
miss: *misses.get(&addr.to_const_u8_pointer()).unwrap_or(&0u32),
})
}
}

View File

@ -66,6 +66,25 @@ struct Threshold {
pub num_false_miss: u32,
}
unsafe fn only_flush_wrap(_: &(), addr: *const u8) -> u64 {
unsafe { only_flush(addr) }
}
unsafe fn only_reload_wrap(_: &(), addr: *const u8) -> u64 {
unsafe { only_reload(addr) }
}
unsafe fn load_and_flush_wrap(_: &(), addr: *const u8) -> u64 {
unsafe { load_and_flush(addr) }
}
unsafe fn flush_and_reload_wrap(_: &(), addr: *const u8) -> u64 {
unsafe { flush_and_reload(addr) }
}
unsafe fn reload_and_flush_wrap(_: &(), addr: *const u8) -> u64 {
unsafe { reload_and_flush(addr) }
}
fn main() {
// Grab a slice of memory
@ -117,63 +136,73 @@ fn main() {
let operations = [
CalibrateOperation2T {
prepare: maccess::<u8>,
op: only_flush,
op: only_flush_wrap,
name: "clflush_remote_hit",
display_name: "clflush remote hit",
t: &(),
},
CalibrateOperation2T {
prepare: maccess::<u8>,
op: load_and_flush,
op: load_and_flush_wrap,
name: "clflush_shared_hit",
display_name: "clflush shared hit",
t: &(),
},
CalibrateOperation2T {
prepare: flush,
op: only_flush,
op: only_flush_wrap,
name: "clflush_miss_f",
display_name: "clflush miss - f",
t: &(),
},
CalibrateOperation2T {
prepare: flush,
op: load_and_flush,
op: load_and_flush_wrap,
name: "clflush_local_hit_f",
display_name: "clflush local hit - f",
t: &(),
},
CalibrateOperation2T {
prepare: noop::<u8>,
op: only_flush,
op: only_flush_wrap,
name: "clflush_miss_n",
display_name: "clflush miss - n",
t: &(),
},
CalibrateOperation2T {
prepare: noop::<u8>,
op: load_and_flush,
op: load_and_flush_wrap,
name: "clflush_local_hit_n",
display_name: "clflush local hit - n",
t: &(),
},
CalibrateOperation2T {
prepare: noop::<u8>,
op: flush_and_reload,
op: flush_and_reload_wrap,
name: "reload_miss",
display_name: "reload miss",
t: &(),
},
CalibrateOperation2T {
prepare: maccess::<u8>,
op: reload_and_flush,
op: reload_and_flush_wrap,
name: "reload_remote_hit",
display_name: "reload remote hit",
t: &(),
},
CalibrateOperation2T {
prepare: maccess::<u8>,
op: only_reload,
op: only_reload_wrap,
name: "reload_shared_hit",
display_name: "reload shared hit",
t: &(),
},
CalibrateOperation2T {
prepare: noop::<u8>,
op: only_reload,
op: only_reload_wrap,
name: "reload_local_hit",
display_name: "reload local hit",
t: &(),
},
];

View File

@ -16,11 +16,12 @@ use std::sync::{Arc, Mutex};
use std::thread;
use turn_lock::TurnHandle;
pub struct CalibrateOperation2T<'a> {
pub struct CalibrateOperation2T<'a, T> {
pub prepare: unsafe fn(*const u8) -> (),
pub op: unsafe fn(*const u8) -> u64,
pub op: unsafe fn(&T, *const u8) -> u64,
pub name: &'a str,
pub display_name: &'a str,
pub t: &'a T,
}
pub struct CalibrateResult2T {
@ -31,12 +32,12 @@ pub struct CalibrateResult2T {
// TODO
}
pub unsafe fn calibrate_fixed_freq_2_thread<I: Iterator<Item = (usize, usize)>>(
pub unsafe fn calibrate_fixed_freq_2_thread<I: Iterator<Item = (usize, usize)>, T>(
p: *const u8,
increment: usize,
len: isize,
cores: &mut I,
operations: &[CalibrateOperation2T],
operations: &[CalibrateOperation2T<T>],
options: CalibrationOptions,
core_per_socket: u8,
) -> Vec<CalibrateResult2T> {
@ -63,12 +64,12 @@ struct HelperThreadParams {
// TODO : Modularisation / factorisation of some of the common code with the single threaded no_std version ?
#[cfg(feature = "use_std")]
fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>>(
fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>, T>(
p: *const u8,
increment: usize,
len: isize,
cores: &mut I,
operations: &[CalibrateOperation2T],
operations: &[CalibrateOperation2T<T>],
mut options: CalibrationOptions,
core_per_socket: u8,
) -> Vec<CalibrateResult2T> {
@ -248,16 +249,18 @@ fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>>(
if helper_core != main_core {
for op in operations {
params = main_turn_handle.wait();
params.op = op.prepare;
let mut hist = vec![0; options.hist_params.bucket_number];
for _ in 0..options.hist_params.iterations {
main_turn_handle.next();
params = main_turn_handle.wait();
let _time = unsafe { (op.op)(pointer) };
let _time = unsafe { (op.op)(op.t, pointer) };
}
for _ in 0..options.hist_params.iterations {
//params.next();
main_turn_handle.next();
params = main_turn_handle.wait();
let time = unsafe { (op.op)(pointer) };
let time = unsafe { (op.op)(op.t, pointer) };
let bucket = min(options.hist_params.bucket_number - 1, to_bucket(time));
hist[bucket] += 1;
}
@ -269,12 +272,12 @@ fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>>(
for _ in 0..options.hist_params.iterations {
unsafe { (op.prepare)(pointer) };
unsafe { arch_x86::_mm_mfence() }; // Test with this ?
let _time = unsafe { (op.op)(pointer) };
let _time = unsafe { (op.op)(op.t, pointer) };
}
for _ in 0..options.hist_params.iterations {
unsafe { (op.prepare)(pointer) };
unsafe { arch_x86::_mm_mfence() }; // Test with this ?
let time = unsafe { (op.op)(pointer) };
let time = unsafe { (op.op)(op.t, pointer) };
let bucket = min(options.hist_params.bucket_number - 1, to_bucket(time));
hist[bucket] += 1;
}
@ -363,7 +366,7 @@ fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>>(
if helper_core != main_core {
// terminate the thread
params.stop = true;
params.next();
main_turn_handle.next();
params = main_turn_handle.wait();
// join thread.
helper_thread.unwrap().join();
@ -390,15 +393,18 @@ fn calibrate_fixed_freq_2_thread_helper(
Err(e) => {
let mut params = turn_handle.wait();
params.stop = true;
params.next();
turn_handle.next();
return Err(e);
}
}
match sched_setaffinity(Pid::from_raw(0), &core) {
Ok(_) => {}
Err(_e) => {
unimplemented!();
Err(e) => {
let mut params = turn_handle.wait();
params.stop = true;
turn_handle.next();
return Err(e);
}
}
@ -406,7 +412,7 @@ fn calibrate_fixed_freq_2_thread_helper(
// grab lock
let params = turn_handle.wait();
if params.stop {
params.next();
turn_handle.next();
return Ok(());
}
// get the relevant parameters
@ -414,7 +420,7 @@ fn calibrate_fixed_freq_2_thread_helper(
let op = params.op;
unsafe { op(addr) };
// release lock
params.next()
turn_handle.next();
}
}

View File

@ -1,6 +1,6 @@
#![feature(unsafe_block_in_unsafe_fn)]
#![deny(unsafe_op_in_unsafe_fn)]
use turn_lock::TurnLock;
use turn_lock::TurnHandle;
const PAGE_SIZE: usize = 1 << 12; // FIXME Magic
@ -34,12 +34,12 @@ use std::thread;
/**
* Safety considerations : Not ensure thread safety, need proper locking as needed.
*/
pub trait CovertChnel: Send + Sync + CoreSpec + Debug {
pub trait CovertChannel: Send + Sync + CoreSpec + Debug {
type Handle;
const BIT_PER_PAGE: usize;
unsafe fn transmit(&self, handle: &mut Handle, bits: &mut BitIterator);
unsafe fn receive(&self, handle: &mut Handle) -> Vec<bool>;
unsafe fn ready_page(&mut self, page: *const u8) -> Handle;
unsafe fn transmit(&self, handle: &mut Self::Handle, bits: &mut BitIterator);
unsafe fn receive(&self, handle: &mut Self::Handle) -> Vec<bool>;
unsafe fn ready_page(&mut self, page: *const u8) -> Result<Self::Handle, ()>; // TODO Error Type
}
#[derive(Debug)]
@ -117,13 +117,8 @@ impl Iterator for BitIterator<'_> {
}
}
struct CovertChannelPage {
pub turn: TurnLock,
pub addr: *const u8,
}
struct CovertChannelParams<T: CovertChannel + Send> {
pages: Vec<CovertChannelPage>,
handles: Vec<TurnHandle<T::Handle>>,
covert_channel: Arc<T>,
}
@ -147,11 +142,11 @@ fn transmit_thread<T: CovertChannel>(
let start_time = std::time::Instant::now();
let start = unsafe { rdtsc_fence() };
while !bit_iter.atEnd() {
for page in params.pages.iter_mut() {
page.turn.wait();
unsafe { params.covert_channel.transmit(page.addr, &mut bit_iter) };
for page in params.handles.iter_mut() {
let mut handle = page.wait();
unsafe { params.covert_channel.transmit(&mut *handle, &mut bit_iter) };
bit_sent += T::BIT_PER_PAGE;
page.turn.next();
page.next();
if bit_iter.atEnd() {
break;
}
@ -171,34 +166,30 @@ pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
let size = num_pages * PAGE_SIZE;
let mut m = MMappedMemory::new(size, false);
let mut pages_transmit = Vec::new();
let mut pages_receive = Vec::new();
let mut receiver_turn_handles = Vec::new();
let mut transmit_turn_handles = Vec::new();
for i in 0..num_pages {
m.slice_mut()[i * PAGE_SIZE] = i as u8;
}
let array: &[u8] = m.slice();
for i in 0..num_pages {
let addr = &array[i * PAGE_SIZE] as *const u8;
let mut turns = TurnLock::new(2);
let handle = unsafe { channel.ready_page(addr) }.unwrap();
let mut turns = TurnHandle::new(2, handle);
let mut t_iter = turns.drain(0..);
let transmit_lock = t_iter.next().unwrap();
let receive_lock = t_iter.next().unwrap();
assert!(t_iter.next().is_none());
unsafe { channel.ready_page(addr) };
pages_transmit.push(CovertChannelPage {
turn: transmit_lock,
addr,
});
pages_receive.push(CovertChannelPage {
turn: receive_lock,
addr,
});
transmit_turn_handles.push(transmit_lock);
receiver_turn_handles.push(receive_lock);
}
let covert_channel_arc = Arc::new(channel);
let params = CovertChannelParams {
pages: pages_transmit,
handles: transmit_turn_handles,
covert_channel: covert_channel_arc.clone(),
};
@ -207,10 +198,10 @@ pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
let mut received_bytes: Vec<u8> = Vec::new();
let mut received_bits = VecDeque::<bool>::new();
while received_bytes.len() < num_bytes {
for page in pages_receive.iter_mut() {
page.turn.wait();
let mut bits = unsafe { covert_channel_arc.receive(page.addr) };
page.turn.next();
for handle in receiver_turn_handles.iter_mut() {
let mut page = handle.wait();
let mut bits = unsafe { covert_channel_arc.receive(&mut *page) };
handle.next();
received_bits.extend(&mut bits.iter());
while received_bits.len() >= u8::BIT_LENGTH {
let mut byte = 0;
@ -268,7 +259,8 @@ mod tests {
#[test]
fn test_bit_vec() {
let bit_iter = BitIterator::new(vec![0x55, 0xf]);
let bits = vec![0x55, 0xf];
let bit_iter = BitIterator::new(&bits);
let results = vec![
false, true, false, true, false, true, false, true, false, false, false, false, true,
true, true, true,

View File

@ -11,3 +11,4 @@ cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
nix = "0.18.0"
covert_channels_evaluation = {path = "../covert_channels_evaluation"}
basic_timing_cache_channel = { path = "../basic_timing_cache_channel" }

View File

@ -3,680 +3,24 @@
pub mod naive;
use cache_side_channel::SideChannelError::{AddressNotCalibrated, AddressNotReady};
use cache_side_channel::{
CacheStatus, ChannelFatalError, CoreSpec, MultipleAddrCacheSideChannel, SideChannelError,
SingleAddrCacheSideChannel,
use basic_timing_cache_channel::{
SingleChannel, TimingChannelPrimitives, TopologyAwareTimingChannel,
};
use cache_utils::calibration::{
accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, get_cache_slicing,
get_vpn, only_flush, only_reload, CalibrateOperation2T, CalibrationOptions, ErrorPredictions,
HistParams, HistogramCumSum, PotentialThresholds, Verbosity, ASVP, CFLUSH_BUCKET_NUMBER,
CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER, PAGE_LEN, PAGE_SHIFT,
};
use cache_utils::calibration::{ErrorPrediction, Slice, Threshold, ThresholdError, AV, SP, VPN};
use cache_utils::complex_addressing::CacheSlicing;
use cache_utils::{find_core_per_socket, flush, maccess, noop};
use nix::sched::{sched_getaffinity, sched_setaffinity, CpuSet};
use nix::unistd::Pid;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::fmt::{Debug, Formatter};
pub struct FlushAndFlush {
thresholds: HashMap<SP, ThresholdError>,
addresses_ready: HashSet<*const u8>,
slicing: CacheSlicing,
attacker_core: usize,
victim_core: usize,
preferred_address: HashMap<*const u8, *const u8>,
}
use cache_utils::calibration::only_flush;
#[derive(Debug)]
pub enum FlushAndFlushError {
NoSlicing,
Nix(nix::Error),
}
#[derive(Debug)]
pub struct SingleFlushAndFlush(FlushAndFlush);
impl SingleFlushAndFlush {
pub fn new(attacker_core: usize, victim_core: usize) -> Result<Self, FlushAndFlushError> {
FlushAndFlush::new(attacker_core, victim_core).map(|ff| SingleFlushAndFlush(ff))
}
pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), FlushAndFlushError> {
FlushAndFlush::new_any_single_core()
.map(|(ff, old, core)| (SingleFlushAndFlush(ff), old, core))
}
pub fn new_any_two_core(
distinct: bool,
) -> Result<(Self, CpuSet, usize, usize), FlushAndFlushError> {
FlushAndFlush::new_any_two_core(distinct)
.map(|(ff, old, attacker, victim)| (SingleFlushAndFlush(ff), old, attacker, victim))
}
}
impl CoreSpec for SingleFlushAndFlush {
fn main_core(&self) -> CpuSet {
self.0.main_core()
}
fn helper_core(&self) -> CpuSet {
self.0.helper_core()
}
}
impl SingleAddrCacheSideChannel for SingleFlushAndFlush {
unsafe fn test_single(&mut self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
unsafe { self.0.test_single(addr) }
}
unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
unsafe { self.0.prepare_single(addr) }
}
fn victim_single(&mut self, operation: &dyn Fn()) {
self.0.victim_single(operation)
}
unsafe fn calibrate_single(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError> {
unsafe { self.0.calibrate_single(addresses) }
}
}
impl FlushAndFlush {
pub fn new(attacker_core: usize, victim_core: usize) -> Result<Self, FlushAndFlushError> {
if let Some(slicing) = get_cache_slicing(find_core_per_socket()) {
if !slicing.can_hash() {
return Err(FlushAndFlushError::NoSlicing);
}
let ret = Self {
thresholds: Default::default(),
addresses_ready: Default::default(),
slicing,
attacker_core,
victim_core,
preferred_address: Default::default(),
};
Ok(ret)
} else {
Err(FlushAndFlushError::NoSlicing)
}
}
// Takes a buffer / list of addresses or pages
// Takes a list of core pairs
// Run optimized calibration and processes results
fn calibration_for_core_pairs<'a>(
core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
pages: impl Iterator<Item = &'a [u8]>,
) -> Result<HashMap<AV, (ErrorPrediction, HashMap<SP, ThresholdError>)>, FlushAndFlushError>
{
let core_per_socket = find_core_per_socket();
let operations = [
CalibrateOperation2T {
prepare: maccess::<u8>,
op: only_flush,
name: "clflush_remote_hit",
display_name: "clflush remote hit",
},
CalibrateOperation2T {
prepare: noop::<u8>,
op: only_flush,
name: "clflush_miss",
display_name: "clflush miss",
},
];
const HIT_INDEX: usize = 0;
const MISS_INDEX: usize = 1;
let mut calibrate_results2t_vec = Vec::new();
let slicing = match get_cache_slicing(core_per_socket) {
Some(s) => s,
None => {
return Err(FlushAndFlushError::NoSlicing);
}
};
let h = |addr: usize| slicing.hash(addr).unwrap();
for page in pages {
// FIXME Cache line size is magic
let mut r = unsafe {
calibrate_fixed_freq_2_thread(
&page[0] as *const u8,
64,
page.len() as isize,
&mut core_pairs.clone(),
&operations,
CalibrationOptions {
hist_params: HistParams {
bucket_number: CFLUSH_BUCKET_NUMBER,
bucket_size: CFLUSH_BUCKET_SIZE,
iterations: CFLUSH_NUM_ITER,
},
verbosity: Verbosity::NoOutput,
optimised_addresses: true,
},
core_per_socket,
)
};
calibrate_results2t_vec.append(&mut r);
}
let analysis: HashMap<ASVP, ThresholdError> = calibration_result_to_ASVP(
calibrate_results2t_vec,
|cal_1t_res| {
let e = ErrorPredictions::predict_errors(HistogramCumSum::from_calibrate(
cal_1t_res, HIT_INDEX, MISS_INDEX,
));
PotentialThresholds::minimizing_total_error(e)
.median()
.unwrap()
},
&h,
)
.map_err(|e| FlushAndFlushError::Nix(e))?;
let asvp_best_av_errors: HashMap<AV, (ErrorPrediction, HashMap<SP, ThresholdError>)> =
accumulate(
analysis,
|asvp: ASVP| AV {
attacker: asvp.attacker,
victim: asvp.victim,
},
|| (ErrorPrediction::default(), HashMap::new()),
|acc: &mut (ErrorPrediction, HashMap<SP, ThresholdError>),
threshold_error,
asvp: ASVP,
av| {
assert_eq!(av.attacker, asvp.attacker);
assert_eq!(av.victim, asvp.victim);
let sp = SP {
slice: asvp.slice,
page: asvp.page,
};
acc.0 += threshold_error.error;
acc.1.insert(sp, threshold_error);
},
);
Ok(asvp_best_av_errors)
}
fn new_with_core_pairs(
core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
) -> Result<(Self, usize, usize), FlushAndFlushError> {
let m = MMappedMemory::new(PAGE_LEN, false);
let array: &[u8] = m.slice();
let mut res = Self::calibration_for_core_pairs(core_pairs, vec![array].into_iter())?;
let mut best_error_rate = 1.0;
let mut best_av = Default::default();
// Select the proper core
for (av, (global_error_pred, thresholds)) in res.iter() {
if global_error_pred.error_rate() < best_error_rate {
best_av = *av;
best_error_rate = global_error_pred.error_rate();
}
}
Self::new(best_av.attacker, best_av.victim)
.map(|this| (this, best_av.attacker, best_av.victim))
// Set no threshold as calibrated on local array that will get dropped.
}
pub fn new_any_single_core() -> Result<(Self, CpuSet, usize), FlushAndFlushError> {
// Generate core iterator
let mut core_pairs: Vec<(usize, usize)> = Vec::new();
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
for i in 0..CpuSet::count() {
if old.is_set(i).unwrap() {
core_pairs.push((i, i));
}
}
// Generate all single core pairs
// Call out to private constructor that takes a core pair list, determines best and makes the choice.
// The private constructor will set the correct affinity for main (attacker thread)
Self::new_with_core_pairs(core_pairs.into_iter()).map(|(channel, attacker, victim)| {
assert_eq!(attacker, victim);
(channel, old, attacker)
})
}
pub fn new_any_two_core(
distinct: bool,
) -> Result<(Self, CpuSet, usize, usize), FlushAndFlushError> {
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
let mut core_pairs: Vec<(usize, usize)> = Vec::new();
for i in 0..CpuSet::count() {
if old.is_set(i).unwrap() {
for j in 0..CpuSet::count() {
if old.is_set(j).unwrap() {
if i != j || !distinct {
core_pairs.push((i, j));
}
}
}
}
}
Self::new_with_core_pairs(core_pairs.into_iter()).map(|(channel, attacker, victim)| {
if distinct {
assert_ne!(attacker, victim);
}
(channel, old, attacker, victim)
})
}
fn get_slice(&self, addr: *const u8) -> Slice {
self.slicing.hash(addr as usize).unwrap()
}
pub fn set_cores(&mut self, attacker: usize, victim: usize) -> Result<(), FlushAndFlushError> {
let old_attacker = self.attacker_core;
let old_victim = self.victim_core;
self.attacker_core = attacker;
self.victim_core = victim;
let pages: Vec<VPN> = self
.thresholds
.keys()
.map(|sp: &SP| sp.page)
//.copied()
.collect();
match self.recalibrate(pages) {
Ok(()) => Ok(()),
Err(e) => {
self.attacker_core = old_attacker;
self.victim_core = old_victim;
Err(e)
}
}
}
fn recalibrate(
&mut self,
pages: impl IntoIterator<Item = VPN>,
) -> Result<(), FlushAndFlushError> {
// unset readiness status.
// Call calibration with core pairs with a single core pair
// Use results \o/ (or error out)
self.addresses_ready.clear();
// Fixme refactor in depth core pairs to make explicit main vs helper.
let core_pairs = vec![(self.attacker_core, self.victim_core)];
let pages: HashSet<&[u8]> = self
.thresholds
.keys()
.map(|sp: &SP| unsafe { &*slice_from_raw_parts(sp.page as *const u8, PAGE_LEN) })
.collect();
let mut res = Self::calibration_for_core_pairs(core_pairs.into_iter(), pages.into_iter())?;
assert_eq!(res.keys().count(), 1);
self.thresholds = res
.remove(&AV {
attacker: self.attacker_core,
victim: self.victim_core,
})
.unwrap()
.1;
Ok(())
}
unsafe fn test_impl<'a, 'b, 'c>(
&'a self,
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
limit: u32,
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> {
let mut result = Vec::new();
let mut tmp = Vec::new();
let mut i = 0;
for addr in addresses {
i += 1;
let t = unsafe { only_flush(*addr) };
tmp.push((addr, t));
if i == limit {
break;
}
}
for (addr, time) in tmp {
if !self.addresses_ready.contains(&addr) {
return Err(AddressNotReady(*addr));
}
let vpn: VPN = (*addr as usize) & (!0xfff); // FIXME
let slice = self.get_slice(*addr);
let threshold_error = &self.thresholds[&SP { slice, page: vpn }];
// refactor this into a struct threshold method ?
if threshold_error.threshold.is_hit(time) {
result.push((*addr, CacheStatus::Hit))
} else {
result.push((*addr, CacheStatus::Miss))
}
}
Ok(result)
}
unsafe fn prepare_impl<'a, 'b, 'c>(
&'a mut self,
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
limit: u32,
) -> Result<(), SideChannelError> {
use core::arch::x86_64 as arch_x86;
let mut i = 0;
let addresses_cloned = addresses.clone();
for addr in addresses_cloned {
i += 1;
let vpn: VPN = get_vpn(*addr);
let slice = self.get_slice(*addr);
if self.addresses_ready.contains(&addr) {
continue;
}
if !self.thresholds.contains_key(&SP { slice, page: vpn }) {
return Err(AddressNotCalibrated(*addr));
}
if i == limit {
break;
}
}
i = 0;
for addr in addresses {
i += 1;
unsafe { flush(*addr) };
//println!("{:p}", *addr);
self.addresses_ready.insert(*addr);
if i == limit {
break;
}
}
unsafe { arch_x86::_mm_mfence() };
Ok(())
}
}
impl Debug for FlushAndFlush {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("FlushAndFlush")
.field("thresholds", &self.thresholds)
.field("addresses_ready", &self.addresses_ready)
.field("slicing", &self.slicing)
.finish()
}
}
impl CoreSpec for FlushAndFlush {
fn main_core(&self) -> CpuSet {
let mut main = CpuSet::new();
main.set(self.attacker_core);
main
}
fn helper_core(&self) -> CpuSet {
let mut helper = CpuSet::new();
helper.set(self.victim_core);
helper
}
}
use cache_side_channel::CacheStatus::Hit;
use cache_utils::calibration::cum_sum;
use cache_utils::mmap::MMappedMemory;
use covert_channels_evaluation::{BitIterator, CovertChannel};
use std::ptr::slice_from_raw_parts;
impl MultipleAddrCacheSideChannel for FlushAndFlush {
const MAX_ADDR: u32 = 3;
unsafe fn test<'a, 'b, 'c>(
&'a mut self,
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
) -> Result<Vec<(*const u8, CacheStatus)>, SideChannelError> {
unsafe { self.test_impl(addresses, Self::MAX_ADDR) }
}
unsafe fn prepare<'a, 'b, 'c>(
&'a mut self,
addresses: &'b mut (impl Iterator<Item = &'c *const u8> + Clone),
) -> Result<(), SideChannelError> {
unsafe { self.prepare_impl(addresses, Self::MAX_ADDR) }
}
fn victim(&mut self, operation: &dyn Fn()) {
operation(); // TODO use a different helper core ?
}
// TODO
// To split into several functions
// Calibration
// Make predictions out of results -> probably in cache_utils
// Compute Threshold & Error
// Compute stats from (A,V,S,P) into (A,V), or other models -> in cache_utils
// Use a generic function ? fn <T> reduce (HashMap<(A,S,V,P), Result>, Fn (A,S,V,P) -> T, a reduction method)
// Determine best core (A,V) amongst options -> in here
// Extract results out of calibration -> in self.calibrate
unsafe fn calibrate(
&mut self,
addresses: impl IntoIterator<Item = *const u8> + Clone,
) -> Result<(), ChannelFatalError> {
let core_pair = vec![(self.attacker_core, self.victim_core)];
let pages = addresses
.into_iter()
.map(|addr: *const u8| unsafe {
&*slice_from_raw_parts(get_vpn(addr) as *const u8, PAGE_LEN)
})
.collect::<HashSet<&[u8]>>();
let mut res =
match Self::calibration_for_core_pairs(core_pair.into_iter(), pages.into_iter()) {
Err(e) => {
return Err(ChannelFatalError::Oops);
}
Ok(r) => r,
};
assert_eq!(res.keys().count(), 1);
let t = res
.remove(&AV {
attacker: self.attacker_core,
victim: self.victim_core,
})
.unwrap()
.1;
for (sp, threshold) in t {
//println!("Inserting sp: {:?} => Threshold: {:?}", sp, threshold);
self.thresholds.insert(sp, threshold);
}
Ok(())
}
}
unsafe impl Send for FlushAndFlush {}
unsafe impl Sync for FlushAndFlush {}
impl CovertChannel for SingleFlushAndFlush {
const BIT_PER_PAGE: usize = 1; //PAGE_SHIFT - 6; // FIXME MAGIC cache line size
unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
let mut offset = 0;
pub struct FFPrimitives {}
let page = self.0.preferred_address[&page];
if let Some(b) = bits.next() {
//println!("Transmitting {} on page {:p}", b, page);
if b {
unsafe { only_reload(page) };
} else {
unsafe { only_flush(page) };
}
}
}
unsafe fn receive(&self, page: *const u8) -> Vec<bool> {
let addresses: Vec<*const u8> = vec![self.0.preferred_address[&page]];
let r = unsafe { self.0.test_impl(&mut addresses.iter(), u32::max_value()) };
match r {
Err(e) => panic!("{:?}", e),
Ok(status_vec) => {
assert_eq!(status_vec.len(), 1);
let received = status_vec[0].1 == Hit;
return vec![received];
}
}
}
unsafe fn ready_page(&mut self, page: *const u8) {
let r = unsafe { self.0.calibrate(vec![page].into_iter()) }.unwrap();
let mut best_error_rate = 1.0;
let mut best_slice = 0;
for (sp, threshold_error) in self
.0
.thresholds
.iter()
.filter(|kv| kv.0.page == page as VPN)
{
if threshold_error.error.error_rate() < best_error_rate {
best_error_rate = threshold_error.error.error_rate();
best_slice = sp.slice;
}
}
for i in 0..PAGE_LEN {
let addr = unsafe { page.offset(i as isize) };
if self.0.get_slice(addr) == best_slice {
self.0.preferred_address.insert(page, addr);
let r = unsafe {
self.0
.prepare_impl(&mut vec![addr].iter(), u32::max_value())
}
.unwrap();
break;
}
}
}
}
impl CovertChannel for FlushAndFlush {
const BIT_PER_PAGE: usize = 1; //PAGE_SHIFT - 6; // FIXME MAGIC cache line size
unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
let mut offset = 0;
if Self::BIT_PER_PAGE == 1 {
let page = self.preferred_address[&page];
if let Some(b) = bits.next() {
//println!("Transmitting {} on page {:p}", b, page);
if b {
unsafe { only_reload(page) };
} else {
unsafe { only_flush(page) };
}
}
} else {
for i in 0..Self::BIT_PER_PAGE {
if let Some(b) = bits.next() {
if b {
offset += 1 << i + 6; // Magic FIXME cache line size
}
}
}
unsafe { maccess(page.offset(offset as isize)) };
impl TimingChannelPrimitives for FFPrimitives {
unsafe fn attack(&self, addr: *const u8) -> u64 {
unsafe { only_flush(addr) }
}
}
unsafe fn receive(&self, page: *const u8) -> Vec<bool> {
if Self::BIT_PER_PAGE == 1 {
let addresses: Vec<*const u8> = vec![self.preferred_address[&page]];
let r = unsafe { self.test_impl(&mut addresses.iter(), u32::max_value()) };
match r {
Err(e) => panic!("{:?}", e),
Ok(status_vec) => {
assert_eq!(status_vec.len(), 1);
let received = status_vec[0].1 == Hit;
//println!("Received {} on page {:p}", received, page);
return vec![received];
}
}
} else {
let addresses = (0..PAGE_LEN)
.step_by(64)
.map(|o| unsafe { page.offset(o as isize) })
.collect::<HashSet<*const u8>>();
let r = unsafe { self.test_impl(&mut addresses.iter(), u32::max_value()) };
match r {
Err(e) => panic!("{:?}", e),
Ok(status_vec) => {
for (addr, status) in status_vec {
if status == Hit {
let offset = unsafe { addr.offset_from(page) } >> 6; // Fixme cache line size magic
let mut res = Vec::new();
for i in 0..Self::BIT_PER_PAGE {
res.push((offset & (1 << i)) != 0);
}
return res;
}
}
}
}
vec![false; Self::BIT_PER_PAGE]
}
}
unsafe fn ready_page(&mut self, page: *const u8) {
let r = unsafe { self.calibrate(vec![page].into_iter()) }.unwrap();
if Self::BIT_PER_PAGE == 1 {
let mut best_error_rate = 1.0;
let mut best_slice = 0;
for (sp, threshold_error) in
self.thresholds.iter().filter(|kv| kv.0.page == page as VPN)
{
if threshold_error.error.error_rate() < best_error_rate {
best_error_rate = threshold_error.error.error_rate();
best_slice = sp.slice;
}
}
for i in 0..PAGE_LEN {
let addr = unsafe { page.offset(i as isize) };
if self.get_slice(addr) == best_slice {
self.preferred_address.insert(page, addr);
let r = unsafe { self.prepare_impl(&mut vec![addr].iter(), u32::max_value()) }
.unwrap();
pub type FlushAndFlush = TopologyAwareTimingChannel<FFPrimitives>;
break;
}
}
} else {
let addresses = (0..PAGE_LEN)
.step_by(64)
.map(|o| unsafe { page.offset(o as isize) })
.collect::<Vec<*const u8>>();
//println!("{:#?}", addresses);
let r = unsafe { self.prepare_impl(&mut addresses.iter(), u32::max_value()) }.unwrap();
}
}
}
pub type SingleFlushAndFlush = SingleChannel<FlushAndFlush>;
#[cfg(test)]
mod tests {

View File

@ -1,126 +1,4 @@
use cache_side_channel::{
CacheStatus, ChannelFatalError, CoreSpec, SideChannelError, SingleAddrCacheSideChannel,
};
use cache_utils::calibration::{get_vpn, only_flush, only_reload, VPN};
use cache_utils::flush;
use covert_channels_evaluation::{BitIterator, CovertChannel};
use nix::sched::{sched_getaffinity, CpuSet};
use nix::unistd::Pid;
use std::collections::HashMap;
use std::thread::current;
use crate::FFPrimitives;
use basic_timing_cache_channel::naive::NaiveTimingChannel;
#[derive(Debug)]
pub struct NaiveFlushAndFlush {
pub threshold: u64,
current: HashMap<VPN, *const u8>,
main_core: CpuSet,
helper_core: CpuSet,
}
impl NaiveFlushAndFlush {
pub fn from_threshold(threshold: u64) -> Self {
NaiveFlushAndFlush {
threshold,
current: Default::default(),
main_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
helper_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
}
}
unsafe fn test_impl(&self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
let vpn = get_vpn(addr);
if self.current.get(&vpn) != Some(&addr) {
return Err(SideChannelError::AddressNotReady(addr));
}
let t = unsafe { only_flush(addr) };
if t < self.threshold {
Ok(CacheStatus::Miss)
} else {
Ok(CacheStatus::Hit)
}
}
pub fn set_cores(&mut self, main_core: usize, helper_core: usize) {
self.main_core = CpuSet::new();
self.main_core.set(main_core).unwrap();
self.helper_core = CpuSet::new();
self.helper_core.set(helper_core).unwrap();
}
}
impl SingleAddrCacheSideChannel for NaiveFlushAndFlush {
/// # Safety
///
/// addr needs to be a valid pointer
unsafe fn test_single(&mut self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
unsafe { self.test_impl(addr) }
}
/// # Safety:
///
/// addr needs to be a valid pointer
unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
unsafe { flush(addr) };
let vpn = get_vpn(addr);
self.current.insert(vpn, addr);
Ok(())
}
fn victim_single(&mut self, operation: &dyn Fn()) {
operation()
}
/// # Safety
///
/// addr needs to be a valid pointer
unsafe fn calibrate_single(
&mut self,
_addresses: impl IntoIterator<Item = *const u8>,
) -> Result<(), ChannelFatalError> {
Ok(())
}
}
unsafe impl Send for NaiveFlushAndFlush {}
unsafe impl Sync for NaiveFlushAndFlush {}
impl CoreSpec for NaiveFlushAndFlush {
fn main_core(&self) -> CpuSet {
self.main_core
}
fn helper_core(&self) -> CpuSet {
self.helper_core
}
}
impl CovertChannel for NaiveFlushAndFlush {
const BIT_PER_PAGE: usize = 1;
unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
let vpn = get_vpn(page);
let addr = self.current.get(&vpn).unwrap();
if let Some(b) = bits.next() {
if b {
unsafe { only_reload(*addr) };
} else {
unsafe { only_flush(*addr) };
}
}
}
unsafe fn receive(&self, page: *const u8) -> Vec<bool> {
let r = unsafe { self.test_impl(page) };
match r {
Err(e) => panic!(),
Ok(status) => match status {
CacheStatus::Hit => vec![true],
CacheStatus::Miss => vec![false],
},
}
}
unsafe fn ready_page(&mut self, page: *const u8) {
unsafe { self.prepare_single(page) };
}
}
pub type NaiveFlushAndFlush = NaiveTimingChannel<FFPrimitives>;

View File

@ -11,3 +11,4 @@ cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
covert_channels_evaluation = {path = "../covert_channels_evaluation"}
nix = "0.18.0"
basic_timing_cache_channel = { path = "../basic_timing_cache_channel" }

View File

@ -1,12 +1,6 @@
#![feature(unsafe_block_in_unsafe_fn)]
#![deny(unsafe_op_in_unsafe_fn)]
use cache_side_channel::{
CacheStatus, ChannelFatalError, SideChannelError, SingleAddrCacheSideChannel,
};
use cache_utils::calibration::only_reload;
use cache_utils::flush;
pub mod naive;
#[cfg(test)]

View File

@ -1,127 +1,15 @@
use cache_side_channel::{
CacheStatus, ChannelFatalError, CoreSpec, SideChannelError, SingleAddrCacheSideChannel,
};
use cache_utils::calibration::{get_vpn, only_flush, only_reload, VPN};
use cache_utils::flush;
use covert_channels_evaluation::{BitIterator, CovertChannel};
use nix::sched::{sched_getaffinity, CpuSet};
use nix::unistd::Pid;
use std::collections::HashMap;
use std::thread::current;
use basic_timing_cache_channel::naive::NaiveTimingChannel;
use basic_timing_cache_channel::TimingChannelPrimitives;
use cache_utils::calibration::only_reload;
#[derive(Debug)]
pub struct NaiveFlushAndReload {
pub threshold: u64,
current: HashMap<VPN, *const u8>,
main_core: CpuSet,
helper_core: CpuSet,
}
pub struct NaiveFRPrimitives {}
impl NaiveFlushAndReload {
pub fn from_threshold(threshold: u64) -> Self {
NaiveFlushAndReload {
threshold,
current: Default::default(),
main_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
helper_core: sched_getaffinity(Pid::from_raw(0)).unwrap(),
}
}
unsafe fn test_impl(&self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
let vpn = get_vpn(addr);
if self.current.get(&vpn) != Some(&addr) {
return Err(SideChannelError::AddressNotReady(addr));
}
let t = unsafe { only_reload(addr) };
unsafe { flush(addr) };
if t > self.threshold {
Ok(CacheStatus::Miss)
} else {
Ok(CacheStatus::Hit)
impl TimingChannelPrimitives for NaiveFRPrimitives {
unsafe fn attack(&self, addr: *const u8) -> u64 {
unsafe { only_reload(addr) }
}
}
pub fn set_cores(&mut self, main_core: usize, helper_core: usize) {
self.main_core = CpuSet::new();
self.main_core.set(main_core).unwrap();
self.helper_core = CpuSet::new();
self.helper_core.set(helper_core).unwrap();
}
}
impl SingleAddrCacheSideChannel for NaiveFlushAndReload {
/// # Safety
///
/// addr needs to be a valid pointer
unsafe fn test_single(&mut self, addr: *const u8) -> Result<CacheStatus, SideChannelError> {
unsafe { self.test_impl(addr) }
}
/// # Safety:
///
/// addr needs to be a valid pointer
unsafe fn prepare_single(&mut self, addr: *const u8) -> Result<(), SideChannelError> {
unsafe { flush(addr) };
let vpn = get_vpn(addr);
self.current.insert(vpn, addr);
Ok(())
}
fn victim_single(&mut self, operation: &dyn Fn()) {
operation()
}
/// # Safety
///
/// addr needs to be a valid pointer
unsafe fn calibrate_single(
&mut self,
_addresses: impl IntoIterator<Item = *const u8>,
) -> Result<(), ChannelFatalError> {
Ok(())
}
}
unsafe impl Send for NaiveFlushAndReload {}
unsafe impl Sync for NaiveFlushAndReload {}
impl CoreSpec for NaiveFlushAndReload {
fn main_core(&self) -> CpuSet {
self.main_core
}
fn helper_core(&self) -> CpuSet {
self.helper_core
}
}
impl CovertChannel for NaiveFlushAndReload {
const BIT_PER_PAGE: usize = 1;
unsafe fn transmit<'a>(&self, page: *const u8, bits: &mut BitIterator<'a>) {
let vpn = get_vpn(page);
let addr = self.current.get(&vpn).unwrap();
if let Some(b) = bits.next() {
if b {
unsafe { only_reload(*addr) };
} else {
unsafe { only_flush(*addr) };
}
}
}
unsafe fn receive(&self, page: *const u8) -> Vec<bool> {
let r = unsafe { self.test_impl(page) };
match r {
Err(e) => panic!(),
Ok(status) => match status {
CacheStatus::Hit => vec![true],
CacheStatus::Miss => vec![false],
},
}
}
unsafe fn ready_page(&mut self, page: *const u8) {
unsafe { self.prepare_single(page) };
}
}
pub type NaiveFlushAndReload = NaiveTimingChannel<NaiveFRPrimitives>;

View File

@ -106,7 +106,7 @@ impl<T> TurnHandle<T> {
unsafe { self.guard() }
}
fn next(&self) {
pub fn next(&self) {
unsafe { self.raw.lock.next(self.index) };
}
}
@ -118,20 +118,10 @@ pub struct TurnLockGuard<'a, T> {
}
impl<'a, T> TurnLockGuard<'a, T> {
/*pub fn next(self) {
drop(self)
}*/
pub fn handle(&self) -> &TurnHandle<T> {
self.handle
}
}
/*
impl<'a, T> Drop for TurnLockGuard<'a, T> {
fn drop(&mut self) {
}
}*/
impl<'a, T> Deref for TurnLockGuard<'a, T> {
type Target = T;
@ -169,14 +159,6 @@ mod tests {
let t2 = v[2].wait();
drop(t2);
v[2].next();
let mut t0 = v[0].wait();
//drop(t0);
//assert_eq!(v[2].current(), 1);
//let t0_prime = v[0].wait();
//*t0 += 1;
//*t0_prime += 1;
//v[0].next();
//assert_eq!(*t0_prime, 2);
let t0 = v[0].wait();
}
}