diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..d67e119
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/basic_timing_cache_channel/src/lib.rs b/basic_timing_cache_channel/src/lib.rs
index 594cf0d..9fb0425 100644
--- a/basic_timing_cache_channel/src/lib.rs
+++ b/basic_timing_cache_channel/src/lib.rs
@@ -212,7 +212,7 @@ impl TopologyAwareTimingChannel {
pub fn new_with_core_pairs(
core_pairs: impl Iterator- + Clone,
) -> Result<(Self, usize, usize), TopologyAwareError> {
- let m = MMappedMemory::new(PAGE_LEN, false);
+ let m = MMappedMemory::new(PAGE_LEN, false, |i| i as u8);
let array: &[u8] = m.slice();
let t = Default::default();
diff --git a/cache_utils/src/bin/cache_info.rs b/cache_utils/src/bin/cache_info.rs
index cd230ec..0692ace 100644
--- a/cache_utils/src/bin/cache_info.rs
+++ b/cache_utils/src/bin/cache_info.rs
@@ -1,3 +1,5 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
use cache_utils::cache_info::get_cache_info;
use cache_utils::complex_addressing::cache_slicing;
use cpuid::MicroArchitecture;
diff --git a/cache_utils/src/bin/frequency_test.rs b/cache_utils/src/bin/frequency_test.rs
index 5ad7653..8cf347e 100644
--- a/cache_utils/src/bin/frequency_test.rs
+++ b/cache_utils/src/bin/frequency_test.rs
@@ -1,3 +1,5 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
use cache_utils::frequency::get_freq_cpufreq_kernel;
use cache_utils::rdtsc_fence;
use libc::sched_getcpu;
diff --git a/cache_utils/src/bin/l3.rs b/cache_utils/src/bin/l3.rs
index d643dfd..b9311d3 100644
--- a/cache_utils/src/bin/l3.rs
+++ b/cache_utils/src/bin/l3.rs
@@ -1,8 +1,10 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
use cache_utils::flush;
use cache_utils::mmap::MMappedMemory;
pub fn main() {
- let m = MMappedMemory::new(2 << 20, true);
+ let m = MMappedMemory::new(2 << 20, true, |i| i as u8);
let array = m.slice();
loop {
unsafe {
diff --git a/cache_utils/src/bin/sleep_freq_test_flush_only.rs b/cache_utils/src/bin/sleep_freq_test_flush_only.rs
index 68e8f59..37e903c 100644
--- a/cache_utils/src/bin/sleep_freq_test_flush_only.rs
+++ b/cache_utils/src/bin/sleep_freq_test_flush_only.rs
@@ -1,3 +1,5 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
use cache_utils::calibration::only_flush;
use cache_utils::frequency::get_freq_cpufreq_kernel;
use cache_utils::rdtsc_fence;
diff --git a/cache_utils/src/bin/sleep_freq_test_reload_only.rs b/cache_utils/src/bin/sleep_freq_test_reload_only.rs
index a7dee0a..0517a14 100644
--- a/cache_utils/src/bin/sleep_freq_test_reload_only.rs
+++ b/cache_utils/src/bin/sleep_freq_test_reload_only.rs
@@ -1,3 +1,5 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
use cache_utils::calibration::only_reload;
use cache_utils::frequency::get_freq_cpufreq_kernel;
use cache_utils::rdtsc_fence;
diff --git a/cache_utils/src/bin/two_thread_cal.rs b/cache_utils/src/bin/two_thread_cal.rs
index 281e78a..52e5aa1 100644
--- a/cache_utils/src/bin/two_thread_cal.rs
+++ b/cache_utils/src/bin/two_thread_cal.rs
@@ -1,3 +1,5 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+
use cache_utils::calibration::{
accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, flush_and_reload,
get_cache_slicing, load_and_flush, map_values, only_flush, only_reload, reduce,
@@ -18,15 +20,17 @@ use std::process::Command;
use std::str::from_utf8;
unsafe fn multiple_access(p: *const u8) {
- maccess::(p);
- maccess::(p);
- arch_x86::_mm_mfence();
- maccess::(p);
- arch_x86::_mm_mfence();
- maccess::(p);
- arch_x86::_mm_mfence();
- maccess::(p);
- maccess::(p);
+ unsafe {
+ maccess::(p);
+ maccess::(p);
+ arch_x86::_mm_mfence();
+ maccess::(p);
+ arch_x86::_mm_mfence();
+ maccess::(p);
+ arch_x86::_mm_mfence();
+ maccess::(p);
+ maccess::(p);
+ }
}
const SIZE: usize = 2 << 20;
@@ -105,7 +109,7 @@ fn main() {
println!("Number of cores per socket: {}", core_per_socket);
- let m = MMappedMemory::new(SIZE, true);
+ let m = MMappedMemory::new(SIZE, true, |i: usize| i as u8);
let array = m.slice();
let cache_line_size = 64;
diff --git a/cache_utils/src/calibrate_2t.rs b/cache_utils/src/calibrate_2t.rs
index 9778052..4537f77 100644
--- a/cache_utils/src/calibrate_2t.rs
+++ b/cache_utils/src/calibrate_2t.rs
@@ -10,10 +10,9 @@ use nix::unistd::Pid;
use nix::Error;
use std::cmp::min;
use std::ptr::null_mut;
-//use std::sync::atomic::Ordering;
-use std::mem::forget;
use std::sync::{Arc, Mutex};
use std::thread;
+use std::vec::Vec;
use turn_lock::TurnHandle;
pub struct CalibrateOperation2T<'a, T> {
@@ -111,7 +110,7 @@ fn calibrate_fixed_freq_2_thread_impl, T>(
},
);
- let mut helper_turn_handle = Arc::new(Mutex::new(turn_handles.pop().unwrap()));
+ let helper_turn_handle = Arc::new(Mutex::new(turn_handles.pop().unwrap()));
let mut main_turn_handle = turn_handles.pop().unwrap();
let mut params = main_turn_handle.wait();
diff --git a/cache_utils/src/calibration.rs b/cache_utils/src/calibration.rs
index f6b41c4..3331a72 100644
--- a/cache_utils/src/calibration.rs
+++ b/cache_utils/src/calibration.rs
@@ -9,17 +9,6 @@ use core::arch::x86_64 as arch_x86;
#[cfg(feature = "no_std")]
use polling_serial::{serial_print as print, serial_println as println};
-#[cfg(feature = "use_std")]
-use nix::unistd::Pid;
-//#[cfg(feature = "use_std")]
-//use nix::Error::Sys;
-#[cfg(feature = "use_std")]
-use nix::Error;
-#[cfg(feature = "use_std")]
-use std::sync::Arc;
-#[cfg(feature = "use_std")]
-use std::thread;
-
#[cfg(feature = "use_std")]
pub use crate::calibrate_2t::*;
@@ -28,11 +17,8 @@ use crate::calibration::Verbosity::*;
use alloc::vec;
use alloc::vec::Vec;
use core::cmp::min;
-use core::ptr::null_mut;
-use core::sync::atomic::{spin_loop_hint, AtomicBool, AtomicPtr, Ordering};
use itertools::Itertools;
-use atomic::Atomic;
use core::hash::Hash;
use core::ops::{Add, AddAssign};
#[cfg(feature = "no_std")]
@@ -71,44 +57,52 @@ impl CalibrationOptions {
}
pub unsafe fn only_reload(p: *const u8) -> u64 {
- let t = rdtsc_fence();
- maccess(p);
- rdtsc_fence() - t
+ let t = unsafe { rdtsc_fence() };
+ unsafe { maccess(p) };
+ (unsafe { rdtsc_fence() } - t)
}
pub unsafe fn flush_and_reload(p: *const u8) -> u64 {
- flush(p);
- only_reload(p)
+ unsafe {
+ flush(p);
+ only_reload(p)
+ }
}
pub unsafe fn reload_and_flush(p: *const u8) -> u64 {
- let r = only_reload(p);
- flush(p);
+ let r = unsafe { only_reload(p) };
+ unsafe { flush(p) };
r
}
pub unsafe fn only_flush(p: *const u8) -> u64 {
- let t = rdtsc_fence();
- flush(p);
- rdtsc_fence() - t
+ let t = unsafe { rdtsc_fence() };
+ unsafe { flush(p) };
+ (unsafe { rdtsc_fence() } - t)
}
pub unsafe fn load_and_flush(p: *const u8) -> u64 {
- maccess(p);
- only_flush(p)
+ unsafe {
+ maccess(p);
+ only_flush(p)
+ }
}
pub unsafe fn flush_and_flush(p: *const u8) -> u64 {
- flush(p);
- only_flush(p)
+ unsafe {
+ flush(p);
+ only_flush(p)
+ }
}
pub unsafe fn l3_and_reload(p: *const u8) -> u64 {
- flush(p);
- arch_x86::_mm_mfence();
- arch_x86::_mm_prefetch(p as *const i8, arch_x86::_MM_HINT_T2);
- arch_x86::__cpuid_count(0, 0);
- only_reload(p)
+ unsafe {
+ flush(p);
+ arch_x86::_mm_mfence();
+ arch_x86::_mm_prefetch::<{ arch_x86::_MM_HINT_T2 }>(p as *const i8);
+ arch_x86::__cpuid_count(0, 0);
+ only_reload(p)
+ }
}
pub const PAGE_SHIFT: usize = 12;
diff --git a/cache_utils/src/lib.rs b/cache_utils/src/lib.rs
index de5e86f..8ec4180 100644
--- a/cache_utils/src/lib.rs
+++ b/cache_utils/src/lib.rs
@@ -1,6 +1,7 @@
#![cfg_attr(feature = "no_std", no_std)]
#![feature(ptr_internals)]
#![allow(clippy::missing_safety_doc)]
+#![deny(unsafe_op_in_unsafe_fn)]
use static_assertions::assert_cfg;
@@ -29,23 +30,23 @@ use core::ptr;
// rdtsc no fence
pub unsafe fn rdtsc_nofence() -> u64 {
- arch_x86::_rdtsc()
+ unsafe { arch_x86::_rdtsc() }
}
// rdtsc (has mfence before and after)
pub unsafe fn rdtsc_fence() -> u64 {
- arch_x86::_mm_mfence();
- let tsc: u64 = arch_x86::_rdtsc();
- arch_x86::_mm_mfence();
+ unsafe { arch_x86::_mm_mfence() };
+ let tsc: u64 = unsafe { arch_x86::_rdtsc() };
+ unsafe { arch_x86::_mm_mfence() };
tsc
}
pub unsafe fn maccess(p: *const T) {
- ptr::read_volatile(p);
+ unsafe { ptr::read_volatile(p) };
}
// flush (cflush)
pub unsafe fn flush(p: *const u8) {
- arch_x86::_mm_clflush(p);
+ unsafe { arch_x86::_mm_clflush(p) };
}
pub fn noop(_: *const T) {}
diff --git a/cache_utils/src/main.rs b/cache_utils/src/main.rs
index c4a7502..e938589 100644
--- a/cache_utils/src/main.rs
+++ b/cache_utils/src/main.rs
@@ -33,7 +33,7 @@ struct Page {
}
*/
pub fn main() {
- let m = MMappedMemory::new(SIZE, true);
+ let m = MMappedMemory::new(SIZE, true, |i| i as u8);
let array = m.slice();
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
diff --git a/cache_utils/src/mmap.rs b/cache_utils/src/mmap.rs
index dcc3b5c..fa307a5 100644
--- a/cache_utils/src/mmap.rs
+++ b/cache_utils/src/mmap.rs
@@ -2,8 +2,9 @@
use core::borrow::{Borrow, BorrowMut};
use core::ffi::c_void;
-use core::mem::size_of;
+use core::mem::{size_of, MaybeUninit};
use core::ops::{Deref, DerefMut};
+use core::ptr;
use core::ptr::null_mut;
use core::ptr::Unique;
use core::slice::{from_raw_parts, from_raw_parts_mut};
@@ -19,14 +20,18 @@ use nix::sys::mman;
#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
*/
-
+/** Safety issue : if T is non triviably constructable and destructable this is dangerous */
pub struct MMappedMemory {
pointer: Unique,
size: usize,
}
impl MMappedMemory {
- pub fn try_new(size: usize, huge: bool) -> Result, nix::Error> {
+ pub fn try_new(
+ size: usize,
+ huge: bool,
+ initializer: impl Fn(usize) -> T,
+ ) -> Result, nix::Error> {
assert_ne!(size_of::(), 0);
if let Some(p) = unsafe {
let p = mman::mmap(
@@ -46,14 +51,48 @@ impl MMappedMemory {
let pointer_T = p as *mut T;
Unique::new(pointer_T)
} {
- Ok(MMappedMemory { pointer: p, size })
+ let mut s = MMappedMemory { pointer: p, size };
+ for i in 0..s.size {
+ unsafe { ptr::write(s.pointer.as_ptr().add(i), initializer(i)) };
+ }
+ Ok(s)
} else {
Err(nix::Error::Sys(EINVAL))
}
}
-
- pub fn new(size: usize, huge: bool) -> MMappedMemory {
- Self::try_new(size, huge).unwrap()
+ /*
+ pub fn try_new_uninit(
+ size: usize,
+ huge: bool,
+ ) -> Result>, nix::Error> {
+ assert_ne!(size_of::(), 0);
+ if let Some(p) = unsafe {
+ let p = mman::mmap(
+ null_mut(),
+ size * size_of::(),
+ mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE,
+ mman::MapFlags::MAP_PRIVATE
+ | mman::MapFlags::MAP_ANONYMOUS
+ | if huge {
+ mman::MapFlags::MAP_HUGETLB
+ } else {
+ mman::MapFlags::MAP_ANONYMOUS
+ },
+ -1,
+ 0,
+ )?;
+ let pointer_T = p as *mut T;
+ Unique::new(pointer_T)
+ } {
+ let mut s = MMappedMemory { pointer: p, size };
+ Ok(s)
+ } else {
+ Err(nix::Error::Sys(EINVAL))
+ }
+ }
+ */
+ pub fn new(size: usize, huge: bool, init: impl Fn(usize) -> T) -> MMappedMemory {
+ Self::try_new(size, huge, init).unwrap()
}
pub fn slice(&self) -> &[T] {
@@ -67,6 +106,9 @@ impl MMappedMemory {
impl Drop for MMappedMemory {
fn drop(&mut self) {
+ for i in 0..self.size {
+ unsafe { ptr::drop_in_place(self.pointer.as_ptr().add(i)) };
+ }
unsafe {
mman::munmap(self.pointer.as_ptr() as *mut c_void, self.size).unwrap();
}
diff --git a/covert_channels_evaluation/src/lib.rs b/covert_channels_evaluation/src/lib.rs
index 29df623..6240dac 100644
--- a/covert_channels_evaluation/src/lib.rs
+++ b/covert_channels_evaluation/src/lib.rs
@@ -165,13 +165,10 @@ pub fn benchmark_channel(
let old_affinity = set_affinity(&channel.main_core()).unwrap();
let size = num_pages * PAGE_SIZE;
- let mut m = MMappedMemory::new(size, false);
+ let mut m = MMappedMemory::new(size, false, |i| (i / PAGE_SIZE) as u8);
let mut receiver_turn_handles = Vec::new();
let mut transmit_turn_handles = Vec::new();
- for i in 0..num_pages {
- m.slice_mut()[i * PAGE_SIZE] = i as u8;
- }
let array: &[u8] = m.slice();
for i in 0..num_pages {
let addr = &array[i * PAGE_SIZE] as *const u8;
diff --git a/prefetcher_reverse/src/lib.rs b/prefetcher_reverse/src/lib.rs
index 5599a83..cedcd77 100644
--- a/prefetcher_reverse/src/lib.rs
+++ b/prefetcher_reverse/src/lib.rs
@@ -1,6 +1,4 @@
#![deny(unsafe_op_in_unsafe_fn)]
-#![feature(const_generics)]
-#![feature(const_evaluatable_checked)]
use crate::Probe::{Flush, FullFlush, Load};
use basic_timing_cache_channel::{TopologyAwareError, TopologyAwareTimingChannel};
@@ -166,15 +164,14 @@ impl Prober {
};
for i in 0..num_pages {
- let mut p = match MMappedMemory::::try_new(PAGE_LEN * GS, false) {
+ let mut p = match MMappedMemory::::try_new(PAGE_LEN * GS, false, |j| {
+ (j / CACHE_LINE_LEN + i * PAGE_CACHELINE_LEN) as u8
+ }) {
Ok(p) => p,
Err(e) => {
return Err(ProberError::NoMem(e));
}
};
- for j in 0..(PAGE_LEN * GS) {
- p[j] = (i * PAGE_CACHELINE_LEN + j) as u8;
- }
let page_addresses = ((0..(PAGE_LEN * GS)).step_by(CACHE_LINE_LEN))
.map(|offset| &p[offset] as *const u8);
let ff_page_handles = unsafe { ff_channel.calibrate(page_addresses.clone()) }.unwrap();