Finish the function placement module (IP tool)
This commit is contained in:
parent
ffd72b84d5
commit
206d45b823
@ -303,7 +303,7 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
|
||||
core_pairs: impl Iterator<Item = (usize, usize)> + Clone,
|
||||
strat: CalibrationStrategy,
|
||||
) -> Result<(Self, usize, usize), TopologyAwareError> {
|
||||
let m = MMappedMemory::new(PAGE_LEN, false, |i| i as u8);
|
||||
let m = MMappedMemory::new(PAGE_LEN, false, false, |i| i as u8);
|
||||
let array: &[u8] = m.slice();
|
||||
|
||||
let t = Default::default();
|
||||
@ -429,7 +429,7 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
|
||||
)?
|
||||
}
|
||||
ThresholdStrat::AV(_) | ThresholdStrat::AVSockets(_) => {
|
||||
let m = MMappedMemory::new(PAGE_LEN, false, |i| i as u8);
|
||||
let m = MMappedMemory::new(PAGE_LEN, false, false, |i| i as u8);
|
||||
let array: &[u8] = m.slice();
|
||||
let mut hashset = HashSet::new();
|
||||
hashset.insert(array);
|
||||
|
@ -4,7 +4,7 @@ use cache_utils::flush;
|
||||
use cache_utils::mmap::MMappedMemory;
|
||||
|
||||
pub fn main() {
|
||||
let m = MMappedMemory::new(2 << 20, true, |i| i as u8);
|
||||
let m = MMappedMemory::new(2 << 20, true, false, |i| i as u8);
|
||||
let array = m.slice();
|
||||
loop {
|
||||
unsafe {
|
||||
|
@ -111,7 +111,7 @@ fn main() {
|
||||
|
||||
println!("Number of cores per socket: {}", core_per_socket);
|
||||
|
||||
let m = MMappedMemory::new(SIZE, true, |i: usize| i as u8);
|
||||
let m = MMappedMemory::new(SIZE, true, false, |i: usize| i as u8);
|
||||
let array = m.slice();
|
||||
|
||||
let cache_line_size = 64;
|
||||
@ -147,7 +147,7 @@ fn main() {
|
||||
display_name: "clflush remote hit",
|
||||
t: &(),
|
||||
},
|
||||
/* CalibrateOperation2T {
|
||||
/* CalibrateOperation2T {
|
||||
prepare: maccess::<u8>,
|
||||
op: load_and_flush_wrap,
|
||||
name: "clflush_shared_hit",
|
||||
@ -175,7 +175,7 @@ fn main() {
|
||||
display_name: "clflush miss - n",
|
||||
t: &(),
|
||||
},
|
||||
/* CalibrateOperation2T {
|
||||
/* CalibrateOperation2T {
|
||||
prepare: noop::<u8>,
|
||||
op: load_and_flush_wrap,
|
||||
name: "clflush_local_hit_n",
|
||||
@ -298,7 +298,7 @@ fn main() {
|
||||
Err(e) => panic!("Error: {}", e),
|
||||
};
|
||||
|
||||
/* asvp_analysis[&ASVP {
|
||||
/* asvp_analysis[&ASVP {
|
||||
attacker: 0,
|
||||
slice: 0,
|
||||
victim: 0,
|
||||
|
@ -33,7 +33,7 @@ struct Page {
|
||||
}
|
||||
*/
|
||||
pub fn main() {
|
||||
let m = MMappedMemory::new(SIZE, true, |i| i as u8);
|
||||
let m = MMappedMemory::new(SIZE, true, false, |i| i as u8);
|
||||
let array = m.slice();
|
||||
|
||||
let old = sched_getaffinity(Pid::from_raw(0)).unwrap();
|
||||
|
@ -30,6 +30,7 @@ impl<T> MMappedMemory<T> {
|
||||
pub fn try_new(
|
||||
size: usize,
|
||||
huge: bool,
|
||||
executable: bool,
|
||||
initializer: impl Fn(usize) -> T,
|
||||
) -> Result<MMappedMemory<T>, nix::Error> {
|
||||
assert_ne!(size_of::<T>(), 0);
|
||||
@ -37,7 +38,13 @@ impl<T> MMappedMemory<T> {
|
||||
let p = mman::mmap(
|
||||
null_mut(),
|
||||
size * size_of::<T>(),
|
||||
mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE,
|
||||
mman::ProtFlags::PROT_READ
|
||||
| mman::ProtFlags::PROT_WRITE
|
||||
| if executable {
|
||||
mman::ProtFlags::PROT_EXEC
|
||||
} else {
|
||||
mman::ProtFlags::PROT_READ
|
||||
},
|
||||
mman::MapFlags::MAP_PRIVATE
|
||||
| mman::MapFlags::MAP_ANONYMOUS
|
||||
| if huge {
|
||||
@ -91,8 +98,13 @@ impl<T> MMappedMemory<T> {
|
||||
}
|
||||
}
|
||||
*/
|
||||
pub fn new(size: usize, huge: bool, init: impl Fn(usize) -> T) -> MMappedMemory<T> {
|
||||
Self::try_new(size, huge, init).unwrap()
|
||||
pub fn new(
|
||||
size: usize,
|
||||
huge: bool,
|
||||
executable: bool,
|
||||
init: impl Fn(usize) -> T,
|
||||
) -> MMappedMemory<T> {
|
||||
Self::try_new(size, huge, executable, init).unwrap()
|
||||
}
|
||||
|
||||
pub fn slice(&self) -> &[T] {
|
||||
|
@ -165,7 +165,7 @@ pub fn benchmark_channel<T: 'static + Send + CovertChannel>(
|
||||
let old_affinity = set_affinity(&channel.main_core()).unwrap();
|
||||
|
||||
let size = num_pages * PAGE_SIZE;
|
||||
let mut m = MMappedMemory::new(size, false, |i| (i / PAGE_SIZE) as u8);
|
||||
let mut m = MMappedMemory::new(size, false, false, |i| (i / PAGE_SIZE) as u8);
|
||||
let mut receiver_turn_handles = Vec::new();
|
||||
let mut transmit_turn_handles = Vec::new();
|
||||
|
||||
|
@ -62,14 +62,19 @@ impl WXRange {
|
||||
mask: usize,
|
||||
round_mask: usize,
|
||||
) -> Result<*mut u8, ()> {
|
||||
// In each range, we want to find base = 2^a * k such that start <= base + align < start + 2^a
|
||||
// In each range, we want to find base = 2^a * k such that start <= base + offset < start + 2^a
|
||||
// This can be done with k = ceil(start - align / 2^a).
|
||||
// 2^a * k can likely be computed with some clever bit tricks.
|
||||
// \o/
|
||||
let start = self.start;
|
||||
let mut candidate = (start - offset + mask) & round_mask + offset;
|
||||
println!(
|
||||
"offset: {:x}, align: {:x}, start: {:x}, mask {:x}, round_mask {:x}",
|
||||
offset, align, start, mask, round_mask
|
||||
);
|
||||
|
||||
let mut candidate = ((start - offset + mask) & round_mask) + offset;
|
||||
assert_eq!(candidate & mask, offset);
|
||||
assert!(candidate > start);
|
||||
assert!(candidate >= start);
|
||||
while candidate + length <= self.end {
|
||||
let bit_range = &mut self.bitmap[(candidate - start)..(candidate - start + length)];
|
||||
if !bit_range.any() {
|
||||
@ -80,6 +85,14 @@ impl WXRange {
|
||||
}
|
||||
Err(())
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&mut self, p: *const u8, size: usize) {
|
||||
let offset = p as usize - self.start;
|
||||
if !self.bitmap[offset..(offset + size)].all() {
|
||||
panic!("deallocating invalid data");
|
||||
}
|
||||
self.bitmap[offset..(offset + size)].set_all(false);
|
||||
}
|
||||
}
|
||||
|
||||
impl WXAllocator {
|
||||
@ -94,13 +107,73 @@ impl WXAllocator {
|
||||
}
|
||||
let mask = align - 1;
|
||||
let round_mask = !mask;
|
||||
for range in self.ranges.iter_mut() {
|
||||
if let Ok(p) = unsafe { range.allocate(align, offset, length, mask, round_mask) } {
|
||||
return Ok(p);
|
||||
loop {
|
||||
for range in self.ranges.iter_mut() {
|
||||
if let Ok(p) = unsafe { range.allocate(align, offset, length, mask, round_mask) } {
|
||||
return Ok(p);
|
||||
}
|
||||
}
|
||||
const PAGE_SIZE: usize = 1 << 12;
|
||||
let size = (length + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
|
||||
let new_page = MMappedMemory::try_new(size, false, true, |size| 0xcc as u8);
|
||||
match new_page {
|
||||
Err(_) => return Err(()),
|
||||
Ok(new_page) => {
|
||||
let start = &new_page.slice()[0] as *const u8 as usize;
|
||||
let end = start + new_page.len() - 1;
|
||||
let mut cursor = self.ranges.cursor_front_mut();
|
||||
loop {
|
||||
if let Some(current) = cursor.current() {
|
||||
if current.end == start {
|
||||
current.end = end;
|
||||
current.bitmap.append(&mut bitvec![0; end - start]);
|
||||
current.pages.push(new_page);
|
||||
break;
|
||||
}
|
||||
if current.start < start {
|
||||
cursor.move_next()
|
||||
} else {
|
||||
if end == current.start {
|
||||
current.start = start;
|
||||
let mut bitmap = bitvec![0; end - start];
|
||||
bitmap.append(&mut current.bitmap);
|
||||
current.bitmap = bitmap;
|
||||
let mut pages = vec![new_page];
|
||||
pages.append(&mut current.pages);
|
||||
current.pages = pages;
|
||||
break;
|
||||
} else {
|
||||
cursor.insert_before(WXRange {
|
||||
start,
|
||||
end,
|
||||
bitmap: bitvec![0;end-start],
|
||||
pages: vec![new_page],
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cursor.insert_before(WXRange {
|
||||
start,
|
||||
end,
|
||||
bitmap: bitvec![0;end-start],
|
||||
pages: vec![new_page],
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn deallocate(&mut self, p: *const u8, size: usize) {
|
||||
let start = p as usize;
|
||||
for range in self.ranges.iter_mut() {
|
||||
if range.start <= start && start + size - 1 <= range.end {
|
||||
unsafe { range.deallocate(p, size) };
|
||||
}
|
||||
}
|
||||
// Now we need to allocate a new page ^^'
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,7 +189,10 @@ impl Function {
|
||||
return Err(()); // FIXME Error type.
|
||||
}
|
||||
let mask = align - 1;
|
||||
let real_offset = (offset - (template.ip as usize) + (template.start as usize)) & mask;
|
||||
let real_offset = (offset
|
||||
.wrapping_add(template.start as usize)
|
||||
.wrapping_sub(template.ip as usize))
|
||||
& mask;
|
||||
let length = (template.end as usize) - (template.start as usize);
|
||||
|
||||
let p = unsafe { allocator.allocate(align, real_offset, length) }?;
|
||||
@ -136,7 +212,10 @@ impl Function {
|
||||
impl Drop for Function {
|
||||
fn drop(&mut self) {
|
||||
// Find the correct range, and deallocate all the bits
|
||||
todo!()
|
||||
let p = self.fun as *mut u8;
|
||||
unsafe { std::ptr::write_bytes(p, 0xcc, self.size) };
|
||||
let mut allocator = wx_allocator.lock().unwrap();
|
||||
unsafe { allocator.deallocate(self.fun as *const u8, self.size) };
|
||||
}
|
||||
}
|
||||
|
||||
@ -213,4 +292,10 @@ pub fn tmp_test() {
|
||||
let p = &mem as *const u8;
|
||||
println!("maccess {:p} : {}", p, unsafe { timed_maccess_template(p) });
|
||||
println!("clflush {:p} : {}", p, unsafe { timed_clflush_template(p) });
|
||||
|
||||
let f = Function::try_new(1, 0, TIMED_CLFLUSH).unwrap();
|
||||
|
||||
println!("{:p}", f.fun as *const u8);
|
||||
let r = unsafe { (f.fun)(p) };
|
||||
println!("relocate clflush {:p}, {}", (f.fun) as *const u8, r);
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#![feature(global_asm)]
|
||||
#![feature(linked_list_cursors)]
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
use crate::Probe::{Flush, FullFlush, Load};
|
||||
@ -171,7 +172,7 @@ impl<const GS: usize> Prober<GS> {
|
||||
};
|
||||
|
||||
for i in 0..num_pages {
|
||||
let mut p = match MMappedMemory::<u8>::try_new(PAGE_LEN * GS, false, |j| {
|
||||
let mut p = match MMappedMemory::<u8>::try_new(PAGE_LEN * GS, false, false, |j| {
|
||||
(j / CACHE_LINE_LEN + i * PAGE_CACHELINE_LEN) as u8
|
||||
}) {
|
||||
Ok(p) => p,
|
||||
|
Loading…
Reference in New Issue
Block a user