General updates

This commit is contained in:
Guillume DIDIER 2022-01-25 14:18:03 +01:00
parent c734b5ce53
commit 559a4ecdf8
13 changed files with 429 additions and 49 deletions

3
Cargo.lock generated
View File

@ -110,9 +110,11 @@ name = "cache_utils"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"atomic", "atomic",
"bitvec",
"cpuid", "cpuid",
"hashbrown", "hashbrown",
"itertools", "itertools",
"lazy_static",
"libc", "libc",
"nix", "nix",
"polling_serial", "polling_serial",
@ -376,6 +378,7 @@ dependencies = [
"cache_utils", "cache_utils",
"flush_flush", "flush_flush",
"flush_reload", "flush_reload",
"itertools",
"lazy_static", "lazy_static",
"nix", "nix",
"rand", "rand",

View File

@ -16,3 +16,4 @@ nix = "0.20.0"
rand = "0.8.3" rand = "0.8.3"
lazy_static = "1.4.0" lazy_static = "1.4.0"
bitvec = "0.22.3" bitvec = "0.22.3"
itertools = "0.10.0"

View File

@ -0,0 +1,9 @@
On Nehalem :
Bit 1 L2 Prefetcher
Bit 2 L2 Adjacent Cache line
Bit 3 L1 DCU next cache line
Bit 4 L1 IP based prefetcher
This is confirmed for Sandy Bridge in Table 2-20, which is supported by most processors of later generation (up to KabyLake at least)
For strides, consider f, e and 7

View File

@ -0,0 +1,8 @@
#!/bin/bash
PREFETCH_MSR=$1
sudo wrmsr -a 0x1a4 $PREFETCH_MSR
sudo echo wrmsr -a 0x1a4 $PREFETCH_MSR
sudo rdmsr -a 0x1a4
cargo run --release --bin exhaustive_access_pattern > eap-with-${PREFETCH_MSR}-prefetcher.log
sudo rdmsr -a 0x1a4

View File

@ -0,0 +1,165 @@
/*
Objective : run an exploration of patterns of a length given as an arg and test all the possible ones,
Then proceed with some analysis.
Probably will use library functions for a lot of it
(Auto pattern generation belongs in lib.rs, the analysis part may be a little bit more subtle)
Output, detailed CSV, and well chosen slices + summaries ?
Alternatively, limit to 3 accesses ?
*/
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
use itertools::Itertools;
use nix::sched::sched_yield;
use prefetcher_reverse::{
pattern_helper, FullPageDualProbeResults, PatternAccess, Prober, PAGE_CACHELINE_LEN,
};
pub const NUM_ITERATION: u32 = 1 << 10;
pub const WARMUP: u32 = 100;
struct Params {
limit: usize,
same_ip: bool,
unique_ip: bool,
}
fn print_tagged_csv(tag: &str, results: Vec<FullPageDualProbeResults>, len: usize) {
// Print Header,
println!("{}Functions:i,Addr", tag);
if !results.is_empty() {
let first = &results[0];
for (i, p) in first.pattern.iter().enumerate() {
println!("{}Functions:{},{:p}", tag, i, p.function.ip)
}
}
println!(
"{}:{}ProbeAddr,Probe_SF_H,Probe_SF_HR,Probe_SR_H,Probe_SR_HR,Probe_FF_H,Probe_FF_HR",
tag,
(0..len)
.map(|i| {
format!(
"Offset_{i},\
Offset_{i}_SF_H,Offset_{i}_SF_HR,\
Offset_{i}_SR_H,Offset_{i}_SR_HR,\
Offset_{i}_FF_H,Offset_{i}_FF_HR,",
i = i
)
})
.format(""),
);
// Print each line,
// TODO : double check with the impl in lib.rs how to extract the various piece of info.
for res in results {
assert_eq!(res.pattern.len(), len);
for probe_addr in 0..PAGE_CACHELINE_LEN {
let sf_h = res.single_probe_results[probe_addr].flush.probe_result;
let sr_h = res.single_probe_results[probe_addr].load.probe_result;
let ff_h = res.full_flush_results.probe_result[probe_addr];
println!(
"{}:{}{},{},{},{},{},{},{}",
tag,
(0..len)
.map(|i| {
let sf_h = res.single_probe_results[probe_addr].flush.pattern_result[i];
let sf_hr = sf_h as f32 / res.num_iteration as f32;
let sr_h = res.single_probe_results[probe_addr].load.pattern_result[i];
let sr_hr = sr_h as f32 / res.num_iteration as f32;
let ff_h = res.full_flush_results.pattern_result[i];
let ff_hr = ff_h as f32 / res.num_iteration as f32;
format!(
"{},{},{},{},{},{},{},",
res.pattern[i].offset, sf_h, sf_hr, sr_h, sr_hr, ff_h, ff_hr
)
})
.format(""),
probe_addr,
sf_h,
sf_h as f32 / res.num_iteration as f32,
sr_h,
sr_h as f32 / res.num_iteration as f32,
ff_h,
ff_h as f32 / res.num_iteration as f32
);
}
}
}
fn exp(
i: usize,
patterns: &Vec<Vec<usize>>,
same_ip: bool,
unique_ip: bool,
prober: &mut Prober<1>,
) {
if same_ip {
let single_reload = Function::try_new(1, 0, TIMED_MACCESS).unwrap();
let mut results = Vec::new();
for pattern in patterns {
eprintln!("Single IP pattern: {:?}", pattern);
let single_ip_pattern = pattern_helper(pattern, &single_reload);
let result = prober.full_page_probe(single_ip_pattern, NUM_ITERATION, WARMUP);
results.push(result);
sched_yield().unwrap();
}
print_tagged_csv(&format!("SingleIP{}", i), results, i);
// generate the vec with a single IP
}
if unique_ip {
let mut functions = Vec::new();
let rounded_i = i.next_power_of_two();
for j in 0..i {
functions.push(Function::try_new(rounded_i, j, TIMED_MACCESS).unwrap());
}
let mut results = Vec::new();
for pattern in patterns {
eprintln!("Unique IP pattern: {:?}", pattern);
let unique_ip_pattern = pattern
.iter()
.enumerate()
.map(|(i, &offset)| PatternAccess {
function: &functions[i],
offset,
})
.collect();
let result = prober.full_page_probe(unique_ip_pattern, NUM_ITERATION, WARMUP);
results.push(result);
sched_yield().unwrap();
}
print_tagged_csv(&format!("UniqueIPs{}", i), results, i);
}
}
fn main() {
// TODO Argument parsing
let args = Params {
limit: 2,
same_ip: true,
unique_ip: true,
};
let mut prober = Prober::<1>::new(63).unwrap();
let mut patterns: Vec<Vec<usize>> = Vec::new();
patterns.push(Vec::new());
exp(0, &patterns, args.same_ip, args.unique_ip, &mut prober);
for i in 0..args.limit {
let mut new_patterns = Vec::new();
for pattern in patterns {
for i in 0..PAGE_CACHELINE_LEN {
let mut np = pattern.clone();
np.push(i);
new_patterns.push(np);
}
}
patterns = new_patterns;
exp(i + 1, &patterns, args.same_ip, args.unique_ip, &mut prober);
}
}

View File

@ -0,0 +1,183 @@
/*
Objective : run an exploration of patterns of a length given as an arg and test all the possible ones,
Then proceed with some analysis.
Probably will use library functions for a lot of it
(Auto pattern generation belongs in lib.rs, the analysis part may be a little bit more subtle)
Output, detailed CSV, and well chosen slices + summaries ?
Alternatively, limit to 3 accesses ?
*/
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
use itertools::Itertools;
use nix::sched::sched_yield;
use prefetcher_reverse::{
pattern_helper, FullPageDualProbeResults, PatternAccess, Prober, PAGE_CACHELINE_LEN,
};
pub const NUM_ITERATION: u32 = 1 << 10;
pub const WARMUP: u32 = 100;
struct Params {
limit: usize,
same_ip: bool,
unique_ip: bool,
}
fn print_tagged_csv(tag: &str, results: Vec<FullPageDualProbeResults>, len: usize) {
// Print Header,
println!("{}Functions:i,Addr", tag);
if !results.is_empty() {
let first = &results[0];
for (i, p) in first.pattern.iter().enumerate() {
println!("{}Functions:{},{:p}", tag, i, p.function.ip)
}
}
println!(
"{}:{}ProbeAddr,Probe_SF_H,Probe_SF_HR,Probe_SR_H,Probe_SR_HR,Probe_FF_H,Probe_FF_HR",
tag,
(0..len)
.map(|i| {
format!(
"Offset_{i},\
Offset_{i}_SF_H,Offset_{i}_SF_HR,\
Offset_{i}_SR_H,Offset_{i}_SR_HR,\
Offset_{i}_FF_H,Offset_{i}_FF_HR,",
i = i
)
})
.format(""),
);
// Print each line,
// TODO : double check with the impl in lib.rs how to extract the various piece of info.
for res in results {
assert_eq!(res.pattern.len(), len);
for probe_addr in 0..PAGE_CACHELINE_LEN {
let sf_h = res.single_probe_results[probe_addr].flush.probe_result;
let sr_h = res.single_probe_results[probe_addr].load.probe_result;
let ff_h = res.full_flush_results.probe_result[probe_addr];
println!(
"{}:{}{},{},{},{},{},{},{}",
tag,
(0..len)
.map(|i| {
let sf_h = res.single_probe_results[probe_addr].flush.pattern_result[i];
let sf_hr = sf_h as f32 / res.num_iteration as f32;
let sr_h = res.single_probe_results[probe_addr].load.pattern_result[i];
let sr_hr = sr_h as f32 / res.num_iteration as f32;
let ff_h = res.full_flush_results.pattern_result[i];
let ff_hr = ff_h as f32 / res.num_iteration as f32;
format!(
"{},{},{},{},{},{},{},",
res.pattern[i].offset, sf_h, sf_hr, sr_h, sr_hr, ff_h, ff_hr
)
})
.format(""),
probe_addr,
sf_h,
sf_h as f32 / res.num_iteration as f32,
sr_h,
sr_h as f32 / res.num_iteration as f32,
ff_h,
ff_h as f32 / res.num_iteration as f32
);
}
}
}
fn exp(
i: usize,
patterns: &Vec<Vec<usize>>,
same_ip: bool,
unique_ip: bool,
prober: &mut Prober<1>,
) {
if same_ip {
let single_reload = Function::try_new(1, 0, TIMED_MACCESS).unwrap();
let mut results = Vec::new();
for pattern in patterns {
eprintln!("Single IP pattern: {:?}", pattern);
let single_ip_pattern = pattern_helper(pattern, &single_reload);
let result = prober.full_page_probe(single_ip_pattern, NUM_ITERATION, WARMUP);
results.push(result);
sched_yield().unwrap();
}
print_tagged_csv(&format!("SingleIP{}", i), results, i);
// generate the vec with a single IP
}
if unique_ip {
let mut functions = Vec::new();
let rounded_i = i.next_power_of_two();
for j in 0..i {
functions.push(Function::try_new(rounded_i, j, TIMED_MACCESS).unwrap());
}
let mut results = Vec::new();
for pattern in patterns {
eprintln!("Unique IP pattern: {:?}", pattern);
let unique_ip_pattern = pattern
.iter()
.enumerate()
.map(|(i, &offset)| PatternAccess {
function: &functions[i],
offset,
})
.collect();
let result = prober.full_page_probe(unique_ip_pattern, NUM_ITERATION, WARMUP);
results.push(result);
sched_yield().unwrap();
}
print_tagged_csv(&format!("UniqueIPs{}", i), results, i);
}
}
/* TODO change access patterns
- We want patterns for i,j in [0,64]^2
A (i,i+k,j)
B (i,i-k,j)
C (i,j,j+k)
D (i,j,j-k)
with k in 1,2,3,8, plus possibly others.
4 access patterns will probably come in later
In addition consider base + stride + len patterns, with a well chosen set of length, strides and bases
len to be considered 2,3,4
Identifiers :
E 2
F 3
G 4
*/
fn main() {
// TODO Argument parsing
let args = Params {
limit: 2,
same_ip: true,
unique_ip: true,
};
let mut prober = Prober::<1>::new(63).unwrap();
let mut patterns: Vec<Vec<usize>> = Vec::new();
patterns.push(Vec::new());
exp(0, &patterns, args.same_ip, args.unique_ip, &mut prober);
for i in 0..args.limit {
let mut new_patterns = Vec::new();
for pattern in patterns {
for i in 0..PAGE_CACHELINE_LEN {
let mut np = pattern.clone();
np.push(i);
new_patterns.push(np);
}
}
patterns = new_patterns;
exp(i + 1, &patterns, args.same_ip, args.unique_ip, &mut prober);
}
}

View File

@ -1,4 +1,4 @@
use prefetcher_reverse::ip_tool::tmp_test; use cache_utils::ip_tool::tmp_test;
fn main() { fn main() {
tmp_test(); tmp_test();

View File

@ -1,4 +1,4 @@
use prefetcher_reverse::ip_tool::{Function, TIMED_MACCESS}; use cache_utils::ip_tool::{Function, TIMED_MACCESS};
use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN}; use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
pub const NUM_ITERATION: usize = 1 << 10; pub const NUM_ITERATION: usize = 1 << 10;
@ -7,7 +7,7 @@ fn exp(delay: u64, reload: &Function) {
let mut prober = Prober::<2>::new(63).unwrap(); let mut prober = Prober::<2>::new(63).unwrap();
prober.set_delay(delay); prober.set_delay(delay);
let pattern = (0usize..(PAGE_CACHELINE_LEN * 2usize)).collect::<Vec<usize>>(); let pattern = (0usize..(PAGE_CACHELINE_LEN * 2usize)).collect::<Vec<usize>>();
let p = pattern_helper(pattern, reload); let p = pattern_helper(&pattern, reload);
let result = prober.full_page_probe(p, NUM_ITERATION as u32, 100); let result = prober.full_page_probe(p, NUM_ITERATION as u32, 100);
println!("{}", result); println!("{}", result);

View File

@ -5,12 +5,12 @@ use cache_side_channel::{
set_affinity, ChannelHandle, CoreSpec, MultipleAddrCacheSideChannel, SingleAddrCacheSideChannel, set_affinity, ChannelHandle, CoreSpec, MultipleAddrCacheSideChannel, SingleAddrCacheSideChannel,
}; };
use cache_utils::calibration::PAGE_LEN; use cache_utils::calibration::PAGE_LEN;
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
use cache_utils::maccess; use cache_utils::maccess;
use cache_utils::mmap; use cache_utils::mmap;
use cache_utils::mmap::MMappedMemory; use cache_utils::mmap::MMappedMemory;
use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush}; use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush};
use nix::Error; use nix::Error;
use prefetcher_reverse::ip_tool::{Function, TIMED_MACCESS};
use prefetcher_reverse::{ use prefetcher_reverse::{
pattern_helper, reference_patterns, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN, pattern_helper, reference_patterns, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN,
}; };
@ -23,7 +23,7 @@ pub const NUM_PAGES: usize = 256;
fn exp(delay: u64, reload: &Function) { fn exp(delay: u64, reload: &Function) {
for (name, pattern) in reference_patterns() { for (name, pattern) in reference_patterns() {
let p = pattern_helper(pattern, reload); let p = pattern_helper(&pattern, reload);
let mut prober = Prober::<1>::new(63).unwrap(); let mut prober = Prober::<1>::new(63).unwrap();
println!("{}", name); println!("{}", name);

View File

@ -1,5 +1,5 @@
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
use cache_utils::{flush, maccess}; use cache_utils::{flush, maccess};
use prefetcher_reverse::ip_tool::{Function, TIMED_MACCESS};
use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN}; use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
use std::arch::x86_64 as arch_x86; use std::arch::x86_64 as arch_x86;
@ -44,7 +44,7 @@ fn exp(stride: usize, num_steps: i32, delay: u64, reload: &Function) {
stride * num_steps as usize stride * num_steps as usize
}; };
let pattern = (2usize..limit).step_by(stride).collect::<Vec<_>>(); let pattern = (2usize..limit).step_by(stride).collect::<Vec<_>>();
let p = pattern_helper(pattern, reload); let p = pattern_helper(&pattern, reload);
let pl2 = Function { let pl2 = Function {
fun: prefetch_l2, fun: prefetch_l2,
@ -67,13 +67,13 @@ fn exp(stride: usize, num_steps: i32, delay: u64, reload: &Function) {
size: 0, size: 0,
}; };
let mut pattern_pl2 = pattern_helper((0..(2 * PAGE_CACHELINE_LEN)).collect(), &pl2); let mut pattern_pl2 = pattern_helper(&(0..(2 * PAGE_CACHELINE_LEN)).collect(), &pl2);
pattern_pl2.extend(p.iter().cloned()); pattern_pl2.extend(p.iter().cloned());
let mut pattern_pl3 = pattern_helper((0..(2 * PAGE_CACHELINE_LEN)).collect(), &pl3); let mut pattern_pl3 = pattern_helper(&(0..(2 * PAGE_CACHELINE_LEN)).collect(), &pl3);
pattern_pl3.extend(p.iter().cloned()); pattern_pl3.extend(p.iter().cloned());
let mut pattern_pl1 = pattern_helper((0..(2 * PAGE_CACHELINE_LEN)).collect(), &pl1); let mut pattern_pl1 = pattern_helper(&(0..(2 * PAGE_CACHELINE_LEN)).collect(), &pl1);
pattern_pl1.extend(p.iter().cloned()); pattern_pl1.extend(p.iter().cloned());
println!("With no sw prefetch"); println!("With no sw prefetch");

View File

@ -1,4 +1,4 @@
use prefetcher_reverse::ip_tool::{Function, TIMED_MACCESS}; use cache_utils::ip_tool::{Function, TIMED_MACCESS};
use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN}; use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
pub const NUM_ITERATION: usize = 1 << 10; pub const NUM_ITERATION: usize = 1 << 10;
@ -12,7 +12,7 @@ fn exp(stride: usize, num_steps: i32, delay: u64, reload: &Function) {
stride * num_steps as usize stride * num_steps as usize
}; };
let pattern = (2usize..limit).step_by(stride).collect::<Vec<_>>(); let pattern = (2usize..limit).step_by(stride).collect::<Vec<_>>();
let p = pattern_helper(pattern, reload); let p = pattern_helper(&pattern, reload);
let result = prober.full_page_probe(p, NUM_ITERATION as u32, 100); let result = prober.full_page_probe(p, NUM_ITERATION as u32, 100);
println!("{}", result); println!("{}", result);

View File

@ -1,8 +1,14 @@
#![feature(global_asm)] #![feature(global_asm)]
#![feature(linked_list_cursors)]
#![deny(unsafe_op_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)]
use crate::Probe::{Flush, FullFlush, Load}; use std::fmt::{Display, Error, Formatter};
use std::iter::{Cycle, Peekable};
use std::ops::Range;
use std::{thread, time};
use nix::sys::stat::stat;
use rand::seq::SliceRandom;
use basic_timing_cache_channel::{ use basic_timing_cache_channel::{
CalibrationStrategy, TopologyAwareError, TopologyAwareTimingChannel, CalibrationStrategy, TopologyAwareError, TopologyAwareTimingChannel,
}; };
@ -12,21 +18,14 @@ use cache_side_channel::{
SingleAddrCacheSideChannel, SingleAddrCacheSideChannel,
}; };
use cache_utils::calibration::{only_reload, Threshold, PAGE_LEN}; use cache_utils::calibration::{only_reload, Threshold, PAGE_LEN};
use cache_utils::ip_tool::Function;
use cache_utils::mmap::MMappedMemory; use cache_utils::mmap::MMappedMemory;
use cache_utils::rdtsc_nofence; use cache_utils::rdtsc_nofence;
use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush}; use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush};
use flush_reload::naive::{NFRHandle, NaiveFlushAndReload}; use flush_reload::naive::{NFRHandle, NaiveFlushAndReload};
use flush_reload::{FRHandle, FRPrimitives, FlushAndReload}; use flush_reload::{FRHandle, FRPrimitives, FlushAndReload};
use nix::sys::stat::stat;
use rand::seq::SliceRandom;
use std::fmt::{Display, Error, Formatter};
use std::iter::{Cycle, Peekable};
use std::ops::Range;
use std::{thread, time};
pub mod ip_tool; use crate::Probe::{Flush, FullFlush, Load};
use ip_tool::Function;
// NB these may need to be changed / dynamically measured. // NB these may need to be changed / dynamically measured.
pub const CACHE_LINE_LEN: usize = 64; pub const CACHE_LINE_LEN: usize = 64;
@ -37,12 +36,12 @@ pub const CALIBRATION_STRAT: CalibrationStrategy = CalibrationStrategy::ASVP;
pub struct Prober<const GS: usize> { pub struct Prober<const GS: usize> {
pages: Vec<MMappedMemory<u8>>, pages: Vec<MMappedMemory<u8>>,
ff_handles: Vec<Vec<FFHandle>>, ff_handles: Vec<Vec<FFHandle>>,
//fr_handles: Vec<Vec<FRHandle>>, fr_handles: Vec<Vec<FRHandle>>,
fr_handles: Vec<Vec<NFRHandle>>, //fr_handles: Vec<Vec<NFRHandle>>,
page_indexes: Peekable<Cycle<Range<usize>>>, page_indexes: Peekable<Cycle<Range<usize>>>,
ff_channel: FlushAndFlush, ff_channel: FlushAndFlush,
//fr_channel: FlushAndReload, fr_channel: FlushAndReload,
fr_channel: NaiveFlushAndReload, //fr_channel: NaiveFlushAndReload,
delay: u64, delay: u64,
} }
@ -120,7 +119,7 @@ pub struct FullPageDualProbeResults<'a> {
pub full_flush_results: DPRItem<FullPR>, pub full_flush_results: DPRItem<FullPR>,
} }
#[derive(Debug)] #[derive(Debug, Clone)]
pub struct SingleProbeResult { pub struct SingleProbeResult {
pub probe_offset: usize, pub probe_offset: usize,
pub pattern_result: Vec<u64>, pub pattern_result: Vec<u64>,
@ -177,16 +176,16 @@ impl<const GS: usize> Prober<GS> {
Ok(old) => old, Ok(old) => old,
Err(nixerr) => return Err(ProberError::Nix(nixerr)), Err(nixerr) => return Err(ProberError::Nix(nixerr)),
}; };
let mut fr_channel = NaiveFlushAndReload::new(Threshold { /*let mut fr_channel = NaiveFlushAndReload::new(Threshold {
bucket_index: 315, bucket_index: 315,
miss_faster_than_hit: false, miss_faster_than_hit: false,
}); });*/
/*let mut fr_channel = match FlushAndReload::new(core, core, CALIBRATION_STRAT) { let mut fr_channel = match FlushAndReload::new(core, core, CALIBRATION_STRAT) {
Ok(res) => res, Ok(res) => res,
Err(err) => { Err(err) => {
return Err(ProberError::TopologyError(err)); return Err(ProberError::TopologyError(err));
} }
};*/ };
for i in 0..num_pages { for i in 0..num_pages {
let mut p = match MMappedMemory::<u8>::try_new(PAGE_LEN * GS, false, false, |j| { let mut p = match MMappedMemory::<u8>::try_new(PAGE_LEN * GS, false, false, |j| {
@ -246,7 +245,9 @@ impl<const GS: usize> Prober<GS> {
self.page_indexes.next(); self.page_indexes.next();
let page_index = *self.page_indexes.peek().unwrap(); let page_index = *self.page_indexes.peek().unwrap();
let mut ff_handles = self.ff_handles[page_index].iter_mut().collect(); let mut ff_handles = self.ff_handles[page_index]
.iter_mut() /*.rev()*/
.collect();
unsafe { self.ff_channel.prepare(&mut ff_handles) }; unsafe { self.ff_channel.prepare(&mut ff_handles) };
@ -293,7 +294,7 @@ impl<const GS: usize> Prober<GS> {
if let ProbeOutput::Full(vstatus) = probe_out { if let ProbeOutput::Full(vstatus) = probe_out {
for (i, status) in vstatus.iter().enumerate() { for (i, status) in vstatus.iter().enumerate() {
if status.1 == Hit { if status.1 == Hit {
v[i] += 1; v[/*63 -*/ i] += 1;
} }
} }
} else { } else {
@ -347,16 +348,26 @@ impl<const GS: usize> Prober<GS> {
pattern: pattern.pattern.clone(), pattern: pattern.pattern.clone(),
probe_type, probe_type,
num_iteration, num_iteration,
results: vec![], results: vec![
SingleProbeResult {
probe_offset: 0,
pattern_result: vec![],
probe_result: 0
};
64
],
}; };
for offset in 0..(PAGE_CACHELINE_LEN * GS) { for offset in (0..(PAGE_CACHELINE_LEN * GS))
/*.rev()*/
{
// Reversed FIXME
pattern.probe = match probe_type { pattern.probe = match probe_type {
ProbeType::Load => Probe::Load(offset), ProbeType::Load => Probe::Load(offset),
ProbeType::Flush => Probe::Flush(offset), ProbeType::Flush => Probe::Flush(offset),
ProbeType::FullFlush => FullFlush, ProbeType::FullFlush => FullFlush,
}; };
let r = self.probe_pattern(pattern, num_iteration, warmup); let r = self.probe_pattern(pattern, num_iteration, warmup);
result.results.push(SingleProbeResult { result.results[offset] = SingleProbeResult {
probe_offset: offset, probe_offset: offset,
pattern_result: r.pattern_result, pattern_result: r.pattern_result,
probe_result: match r.probe_result { probe_result: match r.probe_result {
@ -364,7 +375,7 @@ impl<const GS: usize> Prober<GS> {
ProbeResult::Flush(r) => r, ProbeResult::Flush(r) => r,
ProbeResult::FullFlush(r) => r[offset], ProbeResult::FullFlush(r) => r[offset],
}, },
}); };
} }
result result
} }
@ -537,12 +548,12 @@ pub fn reference_patterns() -> [(&'static str, Vec<usize>); 9] {
] ]
} }
pub fn pattern_helper<'a>(offsets: Vec<usize>, function: &'a Function) -> Vec<PatternAccess<'a>> { pub fn pattern_helper<'a>(offsets: &Vec<usize>, function: &'a Function) -> Vec<PatternAccess<'a>> {
offsets offsets
.into_iter() .into_iter()
.map(|i| PatternAccess { .map(|i| PatternAccess {
function, function,
offset: i, offset: *i,
}) })
.collect() .collect()
} }

View File

@ -5,12 +5,12 @@ use cache_side_channel::{
set_affinity, ChannelHandle, CoreSpec, MultipleAddrCacheSideChannel, SingleAddrCacheSideChannel, set_affinity, ChannelHandle, CoreSpec, MultipleAddrCacheSideChannel, SingleAddrCacheSideChannel,
}; };
use cache_utils::calibration::PAGE_LEN; use cache_utils::calibration::PAGE_LEN;
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
use cache_utils::maccess; use cache_utils::maccess;
use cache_utils::mmap; use cache_utils::mmap;
use cache_utils::mmap::MMappedMemory; use cache_utils::mmap::MMappedMemory;
use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush}; use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush};
use nix::Error; use nix::Error;
use prefetcher_reverse::ip_tool::{Function, TIMED_MACCESS};
use prefetcher_reverse::{ use prefetcher_reverse::{
pattern_helper, PatternAccess, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN, pattern_helper, PatternAccess, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN,
}; };
@ -201,8 +201,8 @@ fn main() {
let reload = Function::try_new(1, 0, TIMED_MACCESS).unwrap(); let reload = Function::try_new(1, 0, TIMED_MACCESS).unwrap();
let pattern = pattern_helper(generate_pattern(0, 3, 12).unwrap(), &reload); let pattern = pattern_helper(&generate_pattern(0, 3, 12).unwrap(), &reload);
let pattern4 = pattern_helper(generate_pattern(0, 4, 12).unwrap(), &reload); let pattern4 = pattern_helper(&generate_pattern(0, 4, 12).unwrap(), &reload);
let mut new_prober = Prober::<1>::new(63).unwrap(); let mut new_prober = Prober::<1>::new(63).unwrap();
let result = new_prober.full_page_probe(pattern.clone(), NUM_ITERATION as u32, 100); let result = new_prober.full_page_probe(pattern.clone(), NUM_ITERATION as u32, 100);
println!("{}", result); println!("{}", result);
@ -212,27 +212,27 @@ fn main() {
println!("{}", result2); println!("{}", result2);
let result4 = new_prober.full_page_probe(pattern4, NUM_ITERATION as u32, 100); let result4 = new_prober.full_page_probe(pattern4, NUM_ITERATION as u32, 100);
println!("{}", result4); println!("{}", result4);
let pattern5 = pattern_helper(generate_pattern(0, 5, 8).unwrap(), &reload); let pattern5 = pattern_helper(&generate_pattern(0, 5, 8).unwrap(), &reload);
let result5 = new_prober.full_page_probe(pattern5, NUM_ITERATION as u32, 100); let result5 = new_prober.full_page_probe(pattern5, NUM_ITERATION as u32, 100);
println!("{}", result5); println!("{}", result5);
let pattern5 = pattern_helper(generate_pattern(0, 5, 4).unwrap(), &reload); let pattern5 = pattern_helper(&generate_pattern(0, 5, 4).unwrap(), &reload);
let result5 = new_prober.full_page_probe(pattern5, NUM_ITERATION as u32, 100); let result5 = new_prober.full_page_probe(pattern5, NUM_ITERATION as u32, 100);
println!("{}", result5); println!("{}", result5);
let pattern = pattern_helper(generate_pattern(0, 10, 4).unwrap(), &reload); let pattern = pattern_helper(&generate_pattern(0, 10, 4).unwrap(), &reload);
let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100); let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100);
println!("{}", result); println!("{}", result);
let pattern = pattern_helper(generate_pattern(0, 6, 8).unwrap(), &reload); let pattern = pattern_helper(&generate_pattern(0, 6, 8).unwrap(), &reload);
let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100); let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100);
println!("{}", result); println!("{}", result);
let pattern = pattern_helper(generate_pattern(2, 6, 0).unwrap(), &reload); let pattern = pattern_helper(&generate_pattern(2, 6, 0).unwrap(), &reload);
let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100); let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100);
println!("{}", result); println!("{}", result);
let pattern = pattern_helper(vec![0, 0, 8, 8, 16, 16, 24, 24], &reload); let pattern = pattern_helper(&vec![0, 0, 8, 8, 16, 16, 24, 24], &reload);
let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100); let result = new_prober.full_page_probe(pattern, NUM_ITERATION as u32, 100);
println!("{}", result); println!("{}", result);