Compare commits
10 Commits
960d7d942c
...
8edd90b6a3
Author | SHA1 | Date | |
---|---|---|---|
|
8edd90b6a3 | ||
|
30d9527ceb | ||
|
dcc84e8916 | ||
|
9419b0c58f | ||
|
8b227b640b | ||
|
2e8c82f347 | ||
|
bc684eca89 | ||
|
0765552240 | ||
|
ffb763bed4 | ||
|
856bf5e4a1 |
6
.idea/DendrobatesTinctoriusAzureus.iml
generated
6
.idea/DendrobatesTinctoriusAzureus.iml
generated
@ -2,7 +2,7 @@
|
||||
<module type="CPP_MODULE" version="4">
|
||||
<component name="FacetManager">
|
||||
<facet type="Python" name="Python facet">
|
||||
<configuration sdkName="Python 3.7 (dendrobates-t-azureus)" />
|
||||
<configuration sdkName="" />
|
||||
</facet>
|
||||
</component>
|
||||
<component name="NewModuleRootManager">
|
||||
@ -40,6 +40,9 @@
|
||||
<sourceFolder url="file://$MODULE_DIR$/turn_lock/src" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/covert_channels_benchmark/src" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/prefetcher_reverse/src" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/CacheObserver/src" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/dendrobates-t-azureus/src" isTestSource="false" />
|
||||
<sourceFolder url="file://$MODULE_DIR$/dendrobates-t-azureus/tests" isTestSource="true" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/cache_info/target" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/cache_utils/target" />
|
||||
<excludeFolder url="file://$MODULE_DIR$/kernel/target" />
|
||||
@ -50,6 +53,5 @@
|
||||
</content>
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
<orderEntry type="library" name="Python 3.7 (dendrobates-t-azureus) interpreter library" level="application" />
|
||||
</component>
|
||||
</module>
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "prefetcher_reverse"
|
||||
name = "CacheObserver"
|
||||
version = "0.1.0"
|
||||
authors = ["Guillume DIDIER <guillaume.didier@inria.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
77
CacheObserver/Readme.md
Normal file
77
CacheObserver/Readme.md
Normal file
@ -0,0 +1,77 @@
|
||||
CacheObserver - monitor what happens in the cache when doing memory accesses
|
||||
============================================================================
|
||||
|
||||
This framework, derived from https://github.com/MIAOUS-group/calibration-done-right,
|
||||
is built to help reverse engineer prefetchers on Intel CPUs.
|
||||
|
||||
The main entry point of the framework is the `CacheObserver` crate.
|
||||
|
||||
The code presented runs under Fedora 30, and can also be made to run on Ubuntu 18.04 LTS with minor tweaks
|
||||
|
||||
(Notably, lib cpupower may also be called lib cpufreq)
|
||||
|
||||
## Usage
|
||||
|
||||
Requires rust nightly features. Install rust nightly using rustup,
|
||||
known working versions are listed at the end of the document.
|
||||
|
||||
This tool needs access to MSR and thus requires sudo access.
|
||||
The setup.sh script disables turbo boost and makes sure the frequency is set to the max
|
||||
non-boosted frequency.
|
||||
|
||||
One can run all the experiments with the following instructions :
|
||||
|
||||
```
|
||||
cd CacheObserver
|
||||
mkdir results-xxx
|
||||
cd results-xxx
|
||||
sudo ../setup.sh
|
||||
../run-msr-all.sh 15
|
||||
../run-msr-all.sh 14
|
||||
../run-msr-all.sh 13
|
||||
../run-msr-all.sh 12
|
||||
../run-msr-all.sh 0
|
||||
# Do not forget to re-enable turbo-boost and set the cpupower frequency governor back
|
||||
```
|
||||
|
||||
This results in a set of log files that can then be analyzed.
|
||||
|
||||
**Note for default settings, this results in several GB worth of logs**
|
||||
|
||||
## General Architecture
|
||||
|
||||
`prefetcher_reverse` is where the experiments used to reverse engineer prefetcher lives.
|
||||
It contains the Prober structure, along with binaries generating patterns for the experiments
|
||||
to run and feeding them to the Prober struct.
|
||||
|
||||
The `analysis` folder contains the scripts we used to turn the logs into figures.
|
||||
To be documented. We used Julia with the Plots and PGFPlotsX backend to generate figures.
|
||||
|
||||
The flow is to first use `extract_analysis_csv.sh` to extract the CSV for each experiment from the logs.
|
||||
|
||||
Then one can use the makeplots Julia scripts (those are unfortunately not optimized and may run for several hours, as the LaTeX backend is not thread-safe and generates many figures).
|
||||
|
||||
Those scripts expect to find the CSVs at a specific path and require their output folder
|
||||
by MSR 420 (0x1A4) values to exist beforehand (so 15,14,13,12,0 must exist beforehand).
|
||||
They are still quite rough and undocumented, rough edges are to be expected.
|
||||
(A better version could be released if the paper is accepted)
|
||||
|
||||
The resulting figures can then be sorted into subfolders for easier browsing, and the change colormap script can be used to tweak the tikz file colormaps for use in papers
|
||||
|
||||
Crates originally from the *Calibration done right* framework, slightly modified :
|
||||
|
||||
- `basic_timing_cache_channel` contains generic implementations of Naive and Optimised cache side channels, that just require providing the actual operation used
|
||||
- `cache_side_channel` defines the interface cache side channels have to implement
|
||||
- `cache_utils` contains utilities related to cache attacks
|
||||
- `cpuid` is a small crate that handles CPU microarchitecture identification and provides info about what is known about it
|
||||
- `flush_flush` and `flush_reload` are tiny crates that use `basic_timing_cache_channel` to export Flush+Flush and Flush+Reload primitives
|
||||
- `turn_lock` is the synchronization primitive used by `cache_utils`
|
||||
|
||||
|
||||
### Rust versions
|
||||
|
||||
Known good nightly :
|
||||
|
||||
- rustc 1.54.0-nightly (eab201df7 2021-06-09)
|
||||
- rustc 1.55.0-nightly (885399992 2021-07-06)
|
||||
- rustc 1.57.0-nightly (9a28ac83c 2021-09-18)
|
7
CacheObserver/run-msr-all.sh
Executable file
7
CacheObserver/run-msr-all.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
PREFETCH_MSR=$1
|
||||
sudo wrmsr -a 0x1a4 $PREFETCH_MSR
|
||||
sudo rdmsr -a 0x1a4
|
||||
cargo run --bin all_experiments --release > with-${PREFETCH_MSR}-prefetcher.log
|
||||
|
||||
|
8
CacheObserver/run-msr-bonus.sh
Executable file
8
CacheObserver/run-msr-bonus.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
PREFETCH_MSR=$1
|
||||
sudo wrmsr -a 0x1a4 $PREFETCH_MSR
|
||||
sudo echo wrmsr -a 0x1a4 $PREFETCH_MSR
|
||||
sudo rdmsr -a 0x1a4
|
||||
cargo run --release --bin bonus_access_pattern > bonusap-with-${PREFETCH_MSR}-prefetcher.log
|
||||
sudo rdmsr -a 0x1a4
|
||||
|
8
CacheObserver/run-msr-extrap.sh
Executable file
8
CacheObserver/run-msr-extrap.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
PREFETCH_MSR=$1
|
||||
sudo wrmsr -a 0x1a4 $PREFETCH_MSR
|
||||
sudo echo wrmsr -a 0x1a4 $PREFETCH_MSR
|
||||
sudo rdmsr -a 0x1a4
|
||||
cargo run --release --bin extra_access_pattern > extrap-with-${PREFETCH_MSR}-prefetcher.log
|
||||
sudo rdmsr -a 0x1a4
|
||||
|
@ -2,6 +2,6 @@
|
||||
PREFETCH_MSR=$1
|
||||
sudo wrmsr -a 0x1a4 $PREFETCH_MSR
|
||||
sudo rdmsr -a 0x1a4
|
||||
cargo run --bin prefetcher_reverse --release > with-${PREFETCH_MSR}-prefetcher.log
|
||||
cargo run --bin CacheObserver --release > with-${PREFETCH_MSR}-prefetcher.log
|
||||
|
||||
|
287
CacheObserver/src/bin/all_experiments.rs
Normal file
287
CacheObserver/src/bin/all_experiments.rs
Normal file
@ -0,0 +1,287 @@
|
||||
/*
|
||||
Objective : run an exploration of patterns of a length given as an arg and test all the possible ones,
|
||||
Then proceed with some analysis.
|
||||
|
||||
Probably will use library functions for a lot of it
|
||||
(Auto pattern generation belongs in lib.rs, the analysis part may be a little bit more subtle)
|
||||
|
||||
Output, detailed CSV, and well chosen slices + summaries ?
|
||||
|
||||
Alternatively, limit to 3 accesses ?
|
||||
|
||||
*/
|
||||
|
||||
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
|
||||
use itertools::Itertools;
|
||||
use nix::sched::sched_yield;
|
||||
use CacheObserver::{
|
||||
pattern_helper, FullPageDualProbeResults, PatternAccess, Prober, PAGE_CACHELINE_LEN,
|
||||
};
|
||||
|
||||
pub const NUM_ITERATION: u32 = 1 << 10;
|
||||
pub const WARMUP: u32 = 100;
|
||||
|
||||
struct Params {
|
||||
limit: usize,
|
||||
same_ip: bool,
|
||||
unique_ip: bool,
|
||||
}
|
||||
|
||||
fn print_tagged_csv(tag: &str, results: Vec<FullPageDualProbeResults>, len: usize) {
|
||||
// Print Header,
|
||||
println!("{}Functions:i,Addr", tag);
|
||||
if !results.is_empty() {
|
||||
let first = &results[0];
|
||||
for (i, p) in first.pattern.iter().enumerate() {
|
||||
println!("{}Functions:{},{:p}", tag, i, p.function.ip)
|
||||
}
|
||||
}
|
||||
println!(
|
||||
"{}:{}ProbeAddr,Probe_SF_H,Probe_SF_HR,Probe_SR_H,Probe_SR_HR,Probe_FF_H,Probe_FF_HR",
|
||||
tag,
|
||||
(0..len)
|
||||
.map(|i| {
|
||||
format!(
|
||||
"Offset_{i},\
|
||||
Offset_{i}_SF_H,Offset_{i}_SF_HR,\
|
||||
Offset_{i}_SR_H,Offset_{i}_SR_HR,\
|
||||
Offset_{i}_FF_H,Offset_{i}_FF_HR,",
|
||||
i = i
|
||||
)
|
||||
})
|
||||
.format(""),
|
||||
);
|
||||
// Print each line,
|
||||
// TODO : double check with the impl in lib.rs how to extract the various piece of info.
|
||||
for res in results {
|
||||
assert_eq!(res.pattern.len(), len);
|
||||
|
||||
for probe_addr in 0..PAGE_CACHELINE_LEN {
|
||||
let sf_h = res.single_probe_results[probe_addr].flush.probe_result;
|
||||
let sr_h = res.single_probe_results[probe_addr].load.probe_result;
|
||||
let ff_h = res.full_flush_results.probe_result[probe_addr];
|
||||
println!(
|
||||
"{}:{}{},{},{},{},{},{},{}",
|
||||
tag,
|
||||
(0..len)
|
||||
.map(|i| {
|
||||
let sf_h = res.single_probe_results[probe_addr].flush.pattern_result[i];
|
||||
let sf_hr = sf_h as f32 / res.num_iteration as f32;
|
||||
let sr_h = res.single_probe_results[probe_addr].load.pattern_result[i];
|
||||
let sr_hr = sr_h as f32 / res.num_iteration as f32;
|
||||
let ff_h = res.full_flush_results.pattern_result[i];
|
||||
let ff_hr = ff_h as f32 / res.num_iteration as f32;
|
||||
format!(
|
||||
"{},{},{},{},{},{},{},",
|
||||
res.pattern[i].offset, sf_h, sf_hr, sr_h, sr_hr, ff_h, ff_hr
|
||||
)
|
||||
})
|
||||
.format(""),
|
||||
probe_addr,
|
||||
sf_h,
|
||||
sf_h as f32 / res.num_iteration as f32,
|
||||
sr_h,
|
||||
sr_h as f32 / res.num_iteration as f32,
|
||||
ff_h,
|
||||
ff_h as f32 / res.num_iteration as f32
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn exp(
|
||||
i: usize,
|
||||
tag: &str,
|
||||
patterns: &Vec<Vec<usize>>,
|
||||
same_ip: bool,
|
||||
unique_ip: bool,
|
||||
prober: &mut Prober<1>,
|
||||
) {
|
||||
if same_ip {
|
||||
let single_reload = Function::try_new(1, 0, TIMED_MACCESS).unwrap();
|
||||
let mut results = Vec::new();
|
||||
for pattern in patterns {
|
||||
eprintln!("Single IP pattern: {:?}", pattern);
|
||||
let single_ip_pattern = pattern_helper(pattern, &single_reload);
|
||||
let result = prober.full_page_probe(single_ip_pattern, NUM_ITERATION, WARMUP);
|
||||
results.push(result);
|
||||
sched_yield().unwrap();
|
||||
}
|
||||
print_tagged_csv(&format!("SingleIP{}", tag), results, i);
|
||||
// generate the vec with a single IP
|
||||
}
|
||||
if unique_ip {
|
||||
let mut functions = Vec::new();
|
||||
let rounded_i = i.next_power_of_two();
|
||||
for j in 0..i {
|
||||
functions.push(Function::try_new(rounded_i, j, TIMED_MACCESS).unwrap());
|
||||
}
|
||||
let mut results = Vec::new();
|
||||
for pattern in patterns {
|
||||
eprintln!("Unique IP pattern: {:?}", pattern);
|
||||
|
||||
let unique_ip_pattern = pattern
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, &offset)| PatternAccess {
|
||||
function: &functions[i],
|
||||
offset,
|
||||
})
|
||||
.collect();
|
||||
let result = prober.full_page_probe(unique_ip_pattern, NUM_ITERATION, WARMUP);
|
||||
results.push(result);
|
||||
sched_yield().unwrap();
|
||||
}
|
||||
print_tagged_csv(&format!("UniqueIPs{}", tag), results, i);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO change access patterns
|
||||
- We want patterns for i,j in [0,64]^2
|
||||
A (i,i+k,j)
|
||||
B (i,i-k,j)
|
||||
C (i,j,j+k)
|
||||
D (i,j,j-k)
|
||||
|
||||
with k in 1,2,3,8, plus possibly others.
|
||||
4 access patterns will probably come in later
|
||||
|
||||
In addition consider base + stride + len patterns, with a well chosen set of length, strides and bases
|
||||
len to be considered 2,3,4
|
||||
Identifiers :
|
||||
E 2
|
||||
*/
|
||||
|
||||
fn main() {
|
||||
// TODO Argument parsing
|
||||
let args = Params {
|
||||
limit: 2,
|
||||
same_ip: true,
|
||||
unique_ip: true,
|
||||
};
|
||||
|
||||
let mut experiments: Vec<(
|
||||
String,
|
||||
usize,
|
||||
usize,
|
||||
Box<dyn Fn(usize, usize) -> Vec<usize>>,
|
||||
)> = vec![];
|
||||
for class in [(
|
||||
"",
|
||||
Box::new(|k: usize| {
|
||||
let f = Box::new(move |i, j| {
|
||||
let mut v = vec![i, j];
|
||||
v.truncate(k);
|
||||
v
|
||||
}) as Box<dyn Fn(usize, usize) -> Vec<usize>>;
|
||||
let i_limit = if k > 0 { PAGE_CACHELINE_LEN } else { 1 };
|
||||
let j_limit = if k > 1 { PAGE_CACHELINE_LEN } else { 1 };
|
||||
(i_limit, j_limit, f)
|
||||
}) as Box<dyn Fn(usize) -> (usize, usize, Box<dyn Fn(usize, usize) -> Vec<usize>>)>,
|
||||
)] {
|
||||
for k in [0, 1, 2] {
|
||||
let exp = class.1(k);
|
||||
experiments.push((format!("{}{}", class.0, k), exp.0, exp.1, exp.2));
|
||||
}
|
||||
}
|
||||
|
||||
for class in [
|
||||
(
|
||||
"A",
|
||||
Box::new(|k| {
|
||||
Box::new(move |i, j| vec![i, (i + k) % PAGE_CACHELINE_LEN, j])
|
||||
as Box<dyn Fn(usize, usize) -> Vec<usize>>
|
||||
}) as Box<dyn Fn(usize) -> Box<dyn Fn(usize, usize) -> Vec<usize>>>,
|
||||
),
|
||||
(
|
||||
"B",
|
||||
Box::new(|k| {
|
||||
Box::new(move |i, j| vec![i, (i - k) % PAGE_CACHELINE_LEN, j])
|
||||
as Box<dyn Fn(usize, usize) -> Vec<usize>>
|
||||
}) as Box<dyn Fn(usize) -> Box<dyn Fn(usize, usize) -> Vec<usize>>>,
|
||||
),
|
||||
(
|
||||
"C",
|
||||
Box::new(|k| {
|
||||
Box::new(move |i, j| vec![i, j, (j + k) % PAGE_CACHELINE_LEN])
|
||||
as Box<dyn Fn(usize, usize) -> Vec<usize>>
|
||||
}) as Box<dyn Fn(usize) -> Box<dyn Fn(usize, usize) -> Vec<usize>>>,
|
||||
),
|
||||
(
|
||||
"D",
|
||||
Box::new(|k| {
|
||||
Box::new(move |i, j| vec![i, j, (j - k) % PAGE_CACHELINE_LEN])
|
||||
as Box<dyn Fn(usize, usize) -> Vec<usize>>
|
||||
}) as Box<dyn Fn(usize) -> Box<dyn Fn(usize, usize) -> Vec<usize>>>,
|
||||
),
|
||||
] {
|
||||
for k in [1, 2, 3, 4, 8] {
|
||||
experiments.push((
|
||||
format!("{}{}", class.0, k),
|
||||
PAGE_CACHELINE_LEN,
|
||||
PAGE_CACHELINE_LEN,
|
||||
class.1(k),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
for class in [(
|
||||
"E",
|
||||
Box::new(|len: usize| {
|
||||
Box::new(move |base, stride| {
|
||||
let mut res = vec![base];
|
||||
for i in 0..len {
|
||||
res.push((base + stride * (i + 1)) % PAGE_CACHELINE_LEN)
|
||||
}
|
||||
res
|
||||
}) as Box<dyn Fn(usize, usize) -> Vec<usize>>
|
||||
}) as Box<dyn Fn(usize) -> Box<dyn Fn(usize, usize) -> Vec<usize>>>,
|
||||
)] {
|
||||
for len in [2, 3, 4] {
|
||||
experiments.push((
|
||||
format!("{}{}", class.0, len),
|
||||
PAGE_CACHELINE_LEN,
|
||||
PAGE_CACHELINE_LEN,
|
||||
class.1(len),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
for class in [(
|
||||
"F",
|
||||
Box::new(|k: isize| {
|
||||
Box::new(move |i, j| {
|
||||
vec![
|
||||
i,
|
||||
(i as isize + k + PAGE_CACHELINE_LEN as isize) as usize % PAGE_CACHELINE_LEN,
|
||||
j,
|
||||
(i as isize + 2 * k + PAGE_CACHELINE_LEN as isize) as usize
|
||||
% PAGE_CACHELINE_LEN,
|
||||
]
|
||||
}) as Box<dyn Fn(usize, usize) -> Vec<usize>>
|
||||
}) as Box<dyn Fn(isize) -> Box<dyn Fn(usize, usize) -> Vec<usize>>>,
|
||||
)] {
|
||||
for k in [4 as isize, 3, 2, 1, -1, -2, -3, -4] {
|
||||
experiments.push((
|
||||
format!("{}{}", class.0, k),
|
||||
PAGE_CACHELINE_LEN,
|
||||
PAGE_CACHELINE_LEN,
|
||||
class.1(k),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let mut prober = Prober::<1>::new(63).unwrap();
|
||||
|
||||
for experiment in experiments {
|
||||
let tag = &experiment.0;
|
||||
let mut patterns = vec![];
|
||||
for i in 0..experiment.1 {
|
||||
for j in 0..experiment.2 {
|
||||
patterns.push(experiment.3(i, j))
|
||||
}
|
||||
}
|
||||
let i = patterns[0].len();
|
||||
exp(i, tag, &patterns, args.same_ip, args.unique_ip, &mut prober);
|
||||
}
|
||||
}
|
@ -14,7 +14,7 @@ Alternatively, limit to 3 accesses ?
|
||||
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
|
||||
use itertools::Itertools;
|
||||
use nix::sched::sched_yield;
|
||||
use prefetcher_reverse::{
|
||||
use CacheObserver::{
|
||||
pattern_helper, FullPageDualProbeResults, PatternAccess, Prober, PAGE_CACHELINE_LEN,
|
||||
};
|
||||
|
@ -14,7 +14,7 @@ Alternatively, limit to 3 accesses ?
|
||||
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
|
||||
use itertools::Itertools;
|
||||
use nix::sched::sched_yield;
|
||||
use prefetcher_reverse::{
|
||||
use CacheObserver::{
|
||||
pattern_helper, FullPageDualProbeResults, PatternAccess, Prober, PAGE_CACHELINE_LEN,
|
||||
};
|
||||
|
@ -14,7 +14,7 @@ Alternatively, limit to 3 accesses ?
|
||||
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
|
||||
use itertools::Itertools;
|
||||
use nix::sched::sched_yield;
|
||||
use prefetcher_reverse::{
|
||||
use CacheObserver::{
|
||||
pattern_helper, FullPageDualProbeResults, PatternAccess, Prober, PAGE_CACHELINE_LEN,
|
||||
};
|
||||
|
@ -8,7 +8,7 @@ use cache_utils::calibration::{Threshold, PAGE_LEN};
|
||||
use cache_utils::maccess;
|
||||
use cache_utils::mmap::MMappedMemory;
|
||||
use flush_flush::naive::NaiveFlushAndFlush;
|
||||
use prefetcher_reverse::CACHE_LINE_LEN;
|
||||
use CacheObserver::CACHE_LINE_LEN;
|
||||
|
||||
const ITERATIONS: i32 = 128;
|
||||
const THRESHOLD: usize = 175; // For Cyber Cobaye
|
@ -1,5 +1,5 @@
|
||||
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
|
||||
use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
|
||||
use CacheObserver::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
|
||||
|
||||
pub const NUM_ITERATION: usize = 1 << 10;
|
||||
|
@ -11,12 +11,12 @@ use cache_utils::mmap;
|
||||
use cache_utils::mmap::MMappedMemory;
|
||||
use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush};
|
||||
use nix::Error;
|
||||
use prefetcher_reverse::{
|
||||
pattern_helper, reference_patterns, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN,
|
||||
};
|
||||
use rand::seq::SliceRandom;
|
||||
use std::iter::Cycle;
|
||||
use std::ops::Range;
|
||||
use CacheObserver::{
|
||||
pattern_helper, reference_patterns, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN,
|
||||
};
|
||||
|
||||
pub const NUM_ITERATION: usize = 1 << 10;
|
||||
pub const NUM_PAGES: usize = 256;
|
@ -1,7 +1,7 @@
|
||||
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
|
||||
use cache_utils::{flush, maccess};
|
||||
use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
|
||||
use std::arch::x86_64 as arch_x86;
|
||||
use CacheObserver::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
|
||||
|
||||
pub const NUM_ITERATION: usize = 1 << 10;
|
||||
|
@ -1,5 +1,5 @@
|
||||
use cache_utils::ip_tool::{Function, TIMED_MACCESS};
|
||||
use prefetcher_reverse::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
|
||||
use CacheObserver::{pattern_helper, Prober, PAGE_CACHELINE_LEN};
|
||||
|
||||
pub const NUM_ITERATION: usize = 1 << 10;
|
||||
|
@ -11,11 +11,9 @@ use cache_utils::mmap;
|
||||
use cache_utils::mmap::MMappedMemory;
|
||||
use flush_flush::{FFHandle, FFPrimitives, FlushAndFlush};
|
||||
use nix::Error;
|
||||
use prefetcher_reverse::{
|
||||
pattern_helper, PatternAccess, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN,
|
||||
};
|
||||
use rand::seq::SliceRandom;
|
||||
use std::iter::Cycle;
|
||||
use CacheObserver::{pattern_helper, PatternAccess, Prober, CACHE_LINE_LEN, PAGE_CACHELINE_LEN};
|
||||
|
||||
pub const NUM_ITERATION: usize = 1 << 10;
|
||||
pub const NUM_PAGES: usize = 256;
|
232
Cargo.lock
generated
232
Cargo.lock
generated
@ -3,17 +3,18 @@
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "aes-t-tables"
|
||||
name = "CacheObserver"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"basic_timing_cache_channel",
|
||||
"bitvec",
|
||||
"cache_side_channel",
|
||||
"cache_utils",
|
||||
"flush_flush",
|
||||
"flush_reload",
|
||||
"memmap2",
|
||||
"nix",
|
||||
"openssl",
|
||||
"openssl-sys",
|
||||
"itertools 0.10.1",
|
||||
"lazy_static",
|
||||
"nix 0.20.0",
|
||||
"rand",
|
||||
]
|
||||
|
||||
@ -30,24 +31,24 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "arrayref"
|
||||
version = "0.3.6"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
|
||||
checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545"
|
||||
|
||||
[[package]]
|
||||
name = "atomic"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281"
|
||||
checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytemuck",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||
|
||||
[[package]]
|
||||
name = "basic_timing_cache_channel"
|
||||
@ -55,20 +56,14 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"cache_side_channel",
|
||||
"cache_utils",
|
||||
"nix",
|
||||
"nix 0.28.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit_field"
|
||||
version = "0.9.0"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56"
|
||||
|
||||
[[package]]
|
||||
name = "bit_field"
|
||||
version = "0.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4"
|
||||
checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
@ -76,6 +71,12 @@ version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
|
||||
|
||||
[[package]]
|
||||
name = "bitvec"
|
||||
version = "0.22.3"
|
||||
@ -90,19 +91,25 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "bootloader"
|
||||
version = "0.9.18"
|
||||
version = "0.9.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a3c1ceed1cd9e61c7998100cc18c13d413aa40d018992b871ab8e7435ce6372"
|
||||
checksum = "365861702868e2a37b4247aaecc7bd8f4389baec8d025497ad8ba7ff37ee9440"
|
||||
dependencies = [
|
||||
"bit_field 0.10.1",
|
||||
"bit_field",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bytemuck"
|
||||
version = "1.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5"
|
||||
|
||||
[[package]]
|
||||
name = "cache_side_channel"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bit_field 0.10.1",
|
||||
"nix",
|
||||
"bit_field",
|
||||
"nix 0.28.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -113,10 +120,10 @@ dependencies = [
|
||||
"bitvec",
|
||||
"cpuid",
|
||||
"hashbrown",
|
||||
"itertools",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"nix",
|
||||
"nix 0.28.0",
|
||||
"polling_serial",
|
||||
"static_assertions",
|
||||
"turn_lock",
|
||||
@ -136,6 +143,12 @@ version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cfg_aliases"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
|
||||
|
||||
[[package]]
|
||||
name = "covert_channels_benchmark"
|
||||
version = "0.1.0"
|
||||
@ -145,17 +158,17 @@ dependencies = [
|
||||
"covert_channels_evaluation",
|
||||
"flush_flush",
|
||||
"flush_reload",
|
||||
"nix",
|
||||
"nix 0.20.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "covert_channels_evaluation"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bit_field 0.10.1",
|
||||
"bit_field",
|
||||
"cache_side_channel",
|
||||
"cache_utils",
|
||||
"nix",
|
||||
"nix 0.28.0",
|
||||
"rand",
|
||||
"turn_lock",
|
||||
]
|
||||
@ -164,7 +177,7 @@ dependencies = [
|
||||
name = "cpuid"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"itertools",
|
||||
"itertools 0.13.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -195,7 +208,7 @@ dependencies = [
|
||||
"basic_timing_cache_channel",
|
||||
"cache_side_channel",
|
||||
"cache_utils",
|
||||
"nix",
|
||||
"nix 0.28.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -205,24 +218,9 @@ dependencies = [
|
||||
"basic_timing_cache_channel",
|
||||
"cache_side_channel",
|
||||
"cache_utils",
|
||||
"nix",
|
||||
"nix 0.28.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "funty"
|
||||
version = "1.2.0"
|
||||
@ -258,6 +256,24 @@ dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
@ -269,88 +285,59 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.96"
|
||||
version = "0.2.155"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5600b4e6efc5421841a2138a6b082e07fe12f9aaa12783d50e5d13325b26b4fc"
|
||||
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
|
||||
|
||||
[[package]]
|
||||
name = "linked_list_allocator"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0b725207570aa16096962d0b20c79f8a543df2280bd3c903022b9b0b4d7ea68"
|
||||
checksum = "549ce1740e46b291953c4340adcd74c59bcf4308f4cac050fd33ba91b7168f4a"
|
||||
dependencies = [
|
||||
"spinning_top",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.4"
|
||||
version = "0.4.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb"
|
||||
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap2"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "723e3ebdcdc5c023db1df315364573789f8857c11b631a2fdfad7c00f5c046b4"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fa9b4819da1bc61c0ea48b63b7bc8604064dd43013e7cc325df098d49cd7c18a"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"bitflags 1.2.1",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
|
||||
dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"cfg-if",
|
||||
"cfg_aliases",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.63"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
|
||||
[[package]]
|
||||
name = "polling_serial"
|
||||
version = "0.1.0"
|
||||
@ -366,22 +353,6 @@ version = "0.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
|
||||
|
||||
[[package]]
|
||||
name = "prefetcher_reverse"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"basic_timing_cache_channel",
|
||||
"bitvec",
|
||||
"cache_side_channel",
|
||||
"cache_utils",
|
||||
"flush_flush",
|
||||
"flush_reload",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
"nix",
|
||||
"rand",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "radium"
|
||||
version = "0.6.2"
|
||||
@ -390,14 +361,13 @@ checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.8.3"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
|
||||
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_hc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -420,13 +390,10 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.3.0"
|
||||
name = "rustversion"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
@ -451,9 +418,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "spinning_top"
|
||||
version = "0.2.4"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75adad84ee84b521fb2cca2d4fd0f1dab1d8d026bda3c5bea4ca63b5f9f9293c"
|
||||
checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
]
|
||||
@ -474,12 +441,6 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
|
||||
name = "turn_lock"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.3"
|
||||
@ -524,11 +485,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "x86_64"
|
||||
version = "0.14.3"
|
||||
version = "0.15.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b7c54a17492391c594753ce2a180142bec7ad2876543565c2a08aa11cddef251"
|
||||
checksum = "4bc79523af8abf92fb1a970c3e086c5a343f6bcc1a0eb890f575cbb3b45743df"
|
||||
dependencies = [
|
||||
"bit_field 0.9.0",
|
||||
"bitflags",
|
||||
"bit_field",
|
||||
"bitflags 2.5.0",
|
||||
"rustversion",
|
||||
"volatile 0.4.4",
|
||||
]
|
||||
|
61
Cargo.toml
61
Cargo.toml
@ -5,7 +5,7 @@ members = [
|
||||
"polling_serial",
|
||||
"cache_utils",
|
||||
"cpuid",
|
||||
"aes-t-tables",
|
||||
#"aes-t-tables",
|
||||
"covert_channels_benchmark",
|
||||
"covert_channels_evaluation",
|
||||
"cache_side_channel",
|
||||
@ -13,59 +13,6 @@ members = [
|
||||
"flush_flush",
|
||||
"basic_timing_cache_channel",
|
||||
"turn_lock",
|
||||
"prefetcher_reverse",
|
||||
]
|
||||
|
||||
[package]
|
||||
name = "dendrobates_tinctoreus_azureus"
|
||||
version = "0.1.0"
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.bootimage]
|
||||
#run-command = ["./scripts/bochs.sh", "{}"]
|
||||
run-command = ["./scripts/run.sh", "{}"]
|
||||
test-args = ["qemu"]
|
||||
run-args = ["bochs"]
|
||||
#run-command = ["qemu-system-x86_64", "-drive", "format=raw,file={}"]
|
||||
#test-args = ["-device", "isa-debug-exit,iobase=0xf4,iosize=0x04"]
|
||||
test-success-exit-code = 33 # (0x10 << 1) | 1
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
x86_64 = "0.14.3"
|
||||
vga_buffer = { path = "vga_buffer" }
|
||||
polling_serial = { path = "polling_serial" }
|
||||
volatile = "0.4.4"
|
||||
linked_list_allocator = "0.9.0"
|
||||
cache_utils = { path = "cache_utils", features = ["no_std"], default-features = false }
|
||||
arrayref = "0.3.6"
|
||||
|
||||
[dependencies.lazy_static]
|
||||
version = "1.4.0"
|
||||
features = ["spin_no_std"]
|
||||
|
||||
[dependencies.bootloader]
|
||||
version = "0.9.16"
|
||||
features = ["sse", "map_physical_memory"]
|
||||
|
||||
#[patch.crates-io]
|
||||
#bootloader = { path = "../bootloader" }
|
||||
|
||||
[profile.dev]
|
||||
opt-level = 1
|
||||
debug = 2
|
||||
|
||||
|
||||
[profile.test]
|
||||
opt-level = 1
|
||||
debug = 2
|
||||
|
||||
[[test]]
|
||||
name = "panic_test"
|
||||
harness = false
|
||||
|
||||
[[test]]
|
||||
name = "stack_overflow"
|
||||
harness = false
|
||||
"CacheObserver",
|
||||
"dendrobates-t-azureus"
|
||||
]
|
88
README-Calibration.md
Normal file
88
README-Calibration.md
Normal file
@ -0,0 +1,88 @@
|
||||
# calibration-done-right
|
||||
Public code for the "calibration done right" paper
|
||||
|
||||
The code base is written entirely in rust.
|
||||
|
||||
There are three crates with binaries providing results :
|
||||
- **aes-t-tables** runs the T-table attack using the 3 side channels
|
||||
- **cache_utils** `two_thread_cal` runs a full calibration on all core pairs for Flush+Flush
|
||||
and analyses the results to provide error rate predictions in various attacker models
|
||||
- **covert_channel_beanchmark** is the crate that runs covert channel benchmarks on the various covert channels
|
||||
|
||||
The code presented runs under Fedora 30, and can also be made run on Ubuntu 18.04 LTS with minor tweaks
|
||||
|
||||
(Notably lib cpupower may also be called lib cpufreq)
|
||||
|
||||
On recent fedora the package is `kernel-tools-libs-devel`, on debian `libcpupower-dev`
|
||||
|
||||
You will also need utility `cpupower` (from `kernel-tools` / `linux-cpupower`)
|
||||
|
||||
# Usage
|
||||
|
||||
## General set-up
|
||||
|
||||
Requires rust nightly features. Install rust nightly using rustup
|
||||
|
||||
One should disable turbo boost and other source of idle frequency scaling
|
||||
|
||||
On Intel platforms, the following should work.
|
||||
|
||||
```shell
|
||||
# performance cpu frequency governor
|
||||
cpupower frequency-set -g performance
|
||||
|
||||
# No Turbo Boost
|
||||
echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo
|
||||
```
|
||||
|
||||
Depending on the experiment you may be interested in disabling prefetchers.
|
||||
|
||||
## Two thread calibration set-up and usage
|
||||
|
||||
In addition to the general set-up you need to enable 2MB hugepage and ensure at least one is available.
|
||||
|
||||
Then you can run `cargo run --release --bin two_thread_cal > result.log`
|
||||
|
||||
Various scripts are also included that have been used to parse the log.
|
||||
|
||||
`analyse.sh` -> `analyse_csv.py` -> `analyse_median.py` Is used to analyse the timing histograms
|
||||
`extract_analysis_csv.sh` Is used to extract the attacker model results.
|
||||
|
||||
The python scripts requires an environment (such as a virtual env) with the packages in `cache_utils/requirements.txt`
|
||||
|
||||
## AES T-table set-up and usage
|
||||
|
||||
One needs an OpenSSL built with the no-asm and the no-hw flags install in ~/openssl (the path is in aes-t-tables/cargo.sh and can be changed).
|
||||
|
||||
You the need to extract the T-table addresses, this can be done using `nm libcrypto.so | "grep Te[0-4]"`, and update those in aes-t-tables/src/main.rs
|
||||
|
||||
You'll also want to update the thresholds in main.rs using the results from the calibration.
|
||||
|
||||
You can then run `./cargo.sh run --release > result.log`
|
||||
|
||||
|
||||
## Covert Channel benchmark
|
||||
|
||||
Do the general set-up, update the thresholds for Naive channels in main.rs and then run `cargo run --release | tee results.log`
|
||||
|
||||
|
||||
# Crate documentation
|
||||
|
||||
- `cpuid` is a small crate that handles CPU microarchitecture indentification and provides info about what is known about it
|
||||
- `cache_utils` contains utilities related to cache attacks
|
||||
- `cache_side_channel` defines the interface cache side channels have to implement
|
||||
- `basic_timing_cache_channel` contains generic implementations of Naive and Optimised cache side channels, that just require providing the actual operation used
|
||||
- `flush_flush` and `flush_reload` are tiny crates that use `basic_timing_cache_channel` to export Flush+Flush and Flush+Reload primitives
|
||||
- `turn_lock` is the synchronisation primitive used by `cache_utils` and the `covert_channel_evaluation`.
|
||||
- `covert_channel_evaluation` is a generic implementation of a `covert_channel` benchmark
|
||||
- `covert_channel_benchmark` calls the previous implementation over the 3 channels.
|
||||
|
||||
# rust version
|
||||
|
||||
Known good nightly :
|
||||
- rustc 1.54.0-nightly (eab201df7 2021-06-09)
|
||||
- rustc 1.55.0-nightly (885399992 2021-07-06)
|
||||
|
||||
# License
|
||||
|
||||
Apache-2.0 or MIT
|
3
README.md
Normal file
3
README.md
Normal file
@ -0,0 +1,3 @@
|
||||
See README-Calibration.md
|
||||
|
||||
Merging of the readmes of various paper code in progress.
|
@ -3,16 +3,16 @@ name = "aes-t-tables"
|
||||
version = "0.1.0"
|
||||
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
|
||||
edition = "2018"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
openssl-sys = "0.9.61"
|
||||
openssl = "0.10.33"
|
||||
cache_utils = { path = "../cache_utils" }
|
||||
memmap2 = "0.2.1"
|
||||
rand = "0.8.3"
|
||||
nix = "0.20.0"
|
||||
memmap2 = "0.9.4"
|
||||
rand = "0.8.5"
|
||||
nix = "0.28.0"
|
||||
cache_side_channel = { path = "../cache_side_channel" }
|
||||
flush_flush = { path = "../flush_flush" }
|
||||
flush_reload = { path = "../flush_reload" }
|
||||
|
@ -1,4 +1,11 @@
|
||||
# SPDX-FileCopyrightText: 2021 Guillaume DIDIER
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
#/bin/bash
|
||||
|
||||
# Change this to the correct openssl installation path
|
||||
export OPENSSL_DIR=$(readlink -f ~/openssl)
|
||||
export X86_64_UNKNOWN_LINUX_GNU_OPENSSL_DIR=$OPENSSL_DIR
|
||||
export PKG_CONFIG_PATH=$OPENSSL_DIR
|
||||
|
7
analysis/change_colour_map.sh
Executable file
7
analysis/change_colour_map.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
sed -i.old 's/plots1/squarecolourmap/g' */_/1/*{FF,SF,SR}.tikz
|
||||
sed -i.old 's/plots1/cubecolourmap/g' */*/*/*AllProbes.tikz
|
||||
sed -i.old 's/plots1/slicecolourmap/g' */*/*/*Slice_*.tikz
|
||||
sed -i.old 's/plots1/maxcolourmap/g' max_*/*.tikz
|
||||
sed -i.old 's/plots1/diffcolourmap/g' diff_*/*.tikz
|
128
analysis/extract_analysis_csv.sh
Executable file
128
analysis/extract_analysis_csv.sh
Executable file
@ -0,0 +1,128 @@
|
||||
#!/bin/sh
|
||||
|
||||
grep '^SingleIP0:' "$1.log" | cut -b 11- > "$1.S0.csv"
|
||||
grep '^UniqueIPs0:' "$1.log" | cut -b 12- > "$1.U0.csv"
|
||||
grep '^SingleIP1:' "$1.log" | cut -b 11- > "$1.S1.csv"
|
||||
grep '^UniqueIPs1:' "$1.log" | cut -b 12- > "$1.U1.csv"
|
||||
grep '^SingleIP2:' "$1.log" | cut -b 11- > "$1.S2.csv"
|
||||
grep '^UniqueIPs2:' "$1.log" | cut -b 12- > "$1.U2.csv"
|
||||
|
||||
grep '^SingleIPA1:' "$1.log" | cut -b 12- > "$1.SA1.csv"
|
||||
grep '^UniqueIPsA1:' "$1.log" | cut -b 13- > "$1.UA1.csv"
|
||||
grep '^SingleIPA2:' "$1.log" | cut -b 12- > "$1.SA2.csv"
|
||||
grep '^UniqueIPsA2:' "$1.log" | cut -b 13- > "$1.UA2.csv"
|
||||
grep '^SingleIPA3:' "$1.log" | cut -b 12- > "$1.SA3.csv"
|
||||
grep '^UniqueIPsA3:' "$1.log" | cut -b 13- > "$1.UA3.csv"
|
||||
grep '^SingleIPA4:' "$1.log" | cut -b 12- > "$1.SA4.csv"
|
||||
grep '^UniqueIPsA4:' "$1.log" | cut -b 13- > "$1.UA4.csv"
|
||||
grep '^SingleIPA8:' "$1.log" | cut -b 12- > "$1.SA8.csv"
|
||||
grep '^UniqueIPsA8:' "$1.log" | cut -b 13- > "$1.UA8.csv"
|
||||
grep '^SingleIPB1:' "$1.log" | cut -b 12- > "$1.SB1.csv"
|
||||
grep '^UniqueIPsB1:' "$1.log" | cut -b 13- > "$1.UB1.csv"
|
||||
grep '^SingleIPB2:' "$1.log" | cut -b 12- > "$1.SB2.csv"
|
||||
grep '^UniqueIPsB2:' "$1.log" | cut -b 13- > "$1.UB2.csv"
|
||||
grep '^SingleIPB3:' "$1.log" | cut -b 12- > "$1.SB3.csv"
|
||||
grep '^UniqueIPsB3:' "$1.log" | cut -b 13- > "$1.UB3.csv"
|
||||
grep '^SingleIPB4:' "$1.log" | cut -b 12- > "$1.SB4.csv"
|
||||
grep '^UniqueIPsB4:' "$1.log" | cut -b 13- > "$1.UB4.csv"
|
||||
grep '^SingleIPB8:' "$1.log" | cut -b 12- > "$1.SB8.csv"
|
||||
grep '^UniqueIPsB8:' "$1.log" | cut -b 13- > "$1.UB8.csv"
|
||||
grep '^SingleIPC1:' "$1.log" | cut -b 12- > "$1.SC1.csv"
|
||||
grep '^UniqueIPsC1:' "$1.log" | cut -b 13- > "$1.UC1.csv"
|
||||
grep '^SingleIPC2:' "$1.log" | cut -b 12- > "$1.SC2.csv"
|
||||
grep '^UniqueIPsC2:' "$1.log" | cut -b 13- > "$1.UC2.csv"
|
||||
grep '^SingleIPC3:' "$1.log" | cut -b 12- > "$1.SC3.csv"
|
||||
grep '^UniqueIPsC3:' "$1.log" | cut -b 13- > "$1.UC3.csv"
|
||||
grep '^SingleIPC4:' "$1.log" | cut -b 12- > "$1.SC4.csv"
|
||||
grep '^UniqueIPsC4:' "$1.log" | cut -b 13- > "$1.UC4.csv"
|
||||
grep '^SingleIPC8:' "$1.log" | cut -b 12- > "$1.SC8.csv"
|
||||
grep '^UniqueIPsC8:' "$1.log" | cut -b 13- > "$1.UC8.csv"
|
||||
grep '^SingleIPD1:' "$1.log" | cut -b 12- > "$1.SD1.csv"
|
||||
grep '^UniqueIPsD1:' "$1.log" | cut -b 13- > "$1.UD1.csv"
|
||||
grep '^SingleIPD2:' "$1.log" | cut -b 12- > "$1.SD2.csv"
|
||||
grep '^UniqueIPsD2:' "$1.log" | cut -b 13- > "$1.UD2.csv"
|
||||
grep '^SingleIPD3:' "$1.log" | cut -b 12- > "$1.SD3.csv"
|
||||
grep '^UniqueIPsD3:' "$1.log" | cut -b 13- > "$1.UD3.csv"
|
||||
grep '^SingleIPD4:' "$1.log" | cut -b 12- > "$1.SD4.csv"
|
||||
grep '^UniqueIPsD4:' "$1.log" | cut -b 13- > "$1.UD4.csv"
|
||||
grep '^SingleIPD8:' "$1.log" | cut -b 12- > "$1.SD8.csv"
|
||||
grep '^UniqueIPsD8:' "$1.log" | cut -b 13- > "$1.UD8.csv"
|
||||
grep '^SingleIPE2:' "$1.log" | cut -b 12- > "$1.SE2.csv"
|
||||
grep '^UniqueIPsE2:' "$1.log" | cut -b 13- > "$1.UE2.csv"
|
||||
grep '^SingleIPE3:' "$1.log" | cut -b 12- > "$1.SE3.csv"
|
||||
grep '^UniqueIPsE3:' "$1.log" | cut -b 13- > "$1.UE3.csv"
|
||||
grep '^SingleIPE4:' "$1.log" | cut -b 12- > "$1.SE4.csv"
|
||||
grep '^UniqueIPsE4:' "$1.log" | cut -b 13- > "$1.UE4.csv"
|
||||
|
||||
grep '^SingleIPF1:' "$1.log" | cut -b 12- > "$1.SF1.csv"
|
||||
grep '^SingleIPF-1:' "$1.log" | cut -b 13- > "$1.SF-1.csv"
|
||||
grep '^SingleIPF2:' "$1.log" | cut -b 12- > "$1.SF2.csv"
|
||||
grep '^SingleIPF-2:' "$1.log" | cut -b 13- > "$1.SF-2.csv"
|
||||
grep '^SingleIPF3:' "$1.log" | cut -b 12- > "$1.SF3.csv"
|
||||
grep '^SingleIPF-3:' "$1.log" | cut -b 13- > "$1.SF-3.csv"
|
||||
grep '^SingleIPF4:' "$1.log" | cut -b 12- > "$1.SF4.csv"
|
||||
grep '^SingleIPF-4:' "$1.log" | cut -b 13- > "$1.SF-4.csv"
|
||||
grep '^UniqueIPsF1:' "$1.log" | cut -b 13- > "$1.UF1.csv"
|
||||
grep '^UniqueIPsF-1:' "$1.log" | cut -b 14- > "$1.UF-1.csv"
|
||||
grep '^UniqueIPsF2:' "$1.log" | cut -b 13- > "$1.UF2.csv"
|
||||
grep '^UniqueIPsF-2:' "$1.log" | cut -b 14- > "$1.UF-2.csv"
|
||||
grep '^UniqueIPsF3:' "$1.log" | cut -b 13- > "$1.UF3.csv"
|
||||
grep '^UniqueIPsF-3:' "$1.log" | cut -b 14- > "$1.UF-3.csv"
|
||||
grep '^UniqueIPsF4:' "$1.log" | cut -b 13- > "$1.UF4.csv"
|
||||
grep '^UniqueIPsF-4:' "$1.log" | cut -b 14- > "$1.UF-4.csv"
|
||||
|
||||
|
||||
#SingleIPA1Functions:i,Addr
|
||||
#UniqueIPsA1Functions:i,Addr
|
||||
#SingleIPA2Functions:i,Addr
|
||||
#UniqueIPsA2Functions:i,Addr
|
||||
#SingleIPA3Functions:i,Addr
|
||||
#UniqueIPsA3Functions:i,Addr
|
||||
#SingleIPA4Functions:i,Addr
|
||||
#UniqueIPsA4Functions:i,Addr
|
||||
#SingleIPA8Functions:i,Addr
|
||||
#UniqueIPsA8Functions:i,Addr
|
||||
#SingleIPB1Functions:i,Addr
|
||||
#UniqueIPsB1Functions:i,Addr
|
||||
#SingleIPB2Functions:i,Addr
|
||||
#UniqueIPsB2Functions:i,Addr
|
||||
#SingleIPB3Functions:i,Addr
|
||||
#UniqueIPsB3Functions:i,Addr
|
||||
#SingleIPB4Functions:i,Addr
|
||||
#UniqueIPsB4Functions:i,Addr
|
||||
#SingleIPB8Functions:i,Addr
|
||||
#UniqueIPsB8Functions:i,Addr
|
||||
#SingleIPC1Functions:i,Addr
|
||||
#UniqueIPsC1Functions:i,Addr
|
||||
#SingleIPC2Functions:i,Addr
|
||||
#UniqueIPsC2Functions:i,Addr
|
||||
#SingleIPC3Functions:i,Addr
|
||||
#UniqueIPsC3Functions:i,Addr
|
||||
#SingleIPC4Functions:i,Addr
|
||||
#UniqueIPsC4Functions:i,Addr
|
||||
#SingleIPC8Functions:i,Addr
|
||||
#UniqueIPsC8Functions:i,Addr
|
||||
#SingleIPD1Functions:i,Addr
|
||||
#UniqueIPsD1Functions:i,Addr
|
||||
#SingleIPD2Functions:i,Addr
|
||||
#UniqueIPsD2Functions:i,Addr
|
||||
#SingleIPD3Functions:i,Addr
|
||||
#UniqueIPsD3Functions:i,Addr
|
||||
#SingleIPD4Functions:i,Addr
|
||||
#UniqueIPsD4Functions:i,Addr
|
||||
#SingleIPD8Functions:i,Addr
|
||||
#UniqueIPsD8Functions:i,Addr
|
||||
#SingleIPE2Functions:i,Addr
|
||||
#UniqueIPsE2Functions:i,Addr
|
||||
#SingleIPE3Functions:i,Addr
|
||||
#UniqueIPsE3Functions:i,Addr
|
||||
#SingleIPE4Functions:i,Addr
|
||||
#UniqueIPsE4Functions:i,Addr
|
||||
|
||||
|
||||
#grep '^SingleIP0:' "$1.log" | cut -b 11- > "$1.S0.csv"
|
||||
#grep '^UniqueIPs0:' "$1.log" | cut -b 12- > "$1.U0.csv"
|
||||
#grep '^SingleIP1:' "$1.log" | cut -b 11- > "$1.S1.csv"
|
||||
#grep '^UniqueIPs1:' "$1.log" | cut -b 12- > "$1.U1.csv"
|
||||
#grep '^SingleIP2:' "$1.log" | cut -b 11- > "$1.S2.csv"
|
||||
#grep '^UniqueIPs2:' "$1.log" | cut -b 12- > "$1.U2.csv"
|
414
analysis/makeplots-bonus.jl
Normal file
414
analysis/makeplots-bonus.jl
Normal file
@ -0,0 +1,414 @@
|
||||
using CSV
|
||||
using Plots
|
||||
pgfplotsx()
|
||||
|
||||
function myfill(element, dimensions)
|
||||
res = fill(element, dimensions)
|
||||
res = map(x -> deepcopy(x), res)
|
||||
res
|
||||
end
|
||||
|
||||
# Generals TODO : Fix the ticks, add legends
|
||||
#eaps = [12,13]
|
||||
eaps = [14,15]
|
||||
#eaps = [0,12,13,14,15]
|
||||
len_eaps = length(eaps)
|
||||
#types = ["S","U"]
|
||||
types = ["S"]
|
||||
#functions_identifier = ["A", "B", "C", "D", "E"]
|
||||
functions_identifier = ["F"]
|
||||
|
||||
function A_index(k, x, y, z)
|
||||
if (x + k) % 64 != y
|
||||
print(string("Argh, k = ", k, ", x = ", x, ", y = ", y, ", z = ", z))
|
||||
@assert false
|
||||
end
|
||||
(x,z)
|
||||
end
|
||||
|
||||
function B_index(k, x, y, z)
|
||||
@assert (x - k + 64) % 64 == y
|
||||
(x,z)
|
||||
end
|
||||
|
||||
function C_index(k, x, y, z)
|
||||
@assert (y + k) % 64 == z
|
||||
(x,y)
|
||||
end
|
||||
|
||||
function D_index(k, x, y, z)
|
||||
@assert (y - k + 64) % 64 == z
|
||||
(x,y)
|
||||
end
|
||||
|
||||
function E_index(k, x, y, z)
|
||||
@assert (y - x + 64) % 64 == (z - y + 64) % 64
|
||||
(x, (y - x + 64) % 64)
|
||||
end
|
||||
|
||||
function F_index(k, x1, x2, x3, x4)
|
||||
@assert (x1 + k + 64) % 64 == x2
|
||||
@assert (x2 + k + 64) % 64 == x4
|
||||
(x1, x3)
|
||||
end
|
||||
|
||||
#functions_index = [A_index, B_index, C_index, D_index, E_index]
|
||||
functions_index = [F_index]
|
||||
#types = ["S"]
|
||||
ks = [[1, 2, 3, 4, 8],
|
||||
[1, 2, 3, 4, 8],
|
||||
[1, 2, 3, 4, 8],
|
||||
[1, 2, 3, 4, 8],
|
||||
[2, 3, 4]
|
||||
]
|
||||
|
||||
ks = [[4, 3, 2, 1, -1, -2, -3, -4]]
|
||||
#ks = [[1]]
|
||||
methods = ["SF", "SR", "FF"]
|
||||
|
||||
|
||||
|
||||
plot_lock = ReentrantLock()
|
||||
|
||||
slices_offset_0 = [0, 1, 2, 8, 14, 15, 30, 31, 32, 55, 56, 61, 62, 63]
|
||||
#slices_offset_0 = []
|
||||
|
||||
#diff_slices_offset_0 = [0, 1, 2, 61, 62, 63]
|
||||
|
||||
|
||||
|
||||
function make_name(eap, type, f, k)
|
||||
string("bonusap/bonusap-with-", eap, "-prefetcher.", type, f, k, ".csv")
|
||||
end
|
||||
|
||||
all_file_names = myfill((0,0,0,[]), (length(eaps), length(types), length(functions_index)))
|
||||
|
||||
|
||||
|
||||
for x in 1:len_eaps
|
||||
for (y,type) in enumerate(types)
|
||||
for (z,f) in enumerate(functions_identifier)
|
||||
all_file_names[x,y,z] = (x,y,z,[])
|
||||
for (i,k) in enumerate(ks[z])
|
||||
# change me : insert file names into list
|
||||
push!(all_file_names[x,y,z][4] , (x, y, z, k, make_name(eaps[x], type, f, k) ) )
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
print(all_file_names)
|
||||
|
||||
|
||||
|
||||
#files = Matrix(CSV, length(eaps), length(types), length(levels))
|
||||
files = Array{
|
||||
Union{
|
||||
Nothing,
|
||||
Tuple{Int64, Int64, Int64, Vector{
|
||||
Tuple{ Int64, Int64, Int64, Int64, CSV.File }
|
||||
}}
|
||||
},3
|
||||
}(nothing, length(eaps), length(types), length(functions_identifier))
|
||||
|
||||
|
||||
|
||||
Threads.@threads for f in all_file_names
|
||||
x = f[1]
|
||||
y = f[2]
|
||||
z = f[3]
|
||||
files[x,y,z] = (x,y,z,[])
|
||||
for (x,y,z,k,name) in f[4]
|
||||
push!(files[x,y,z][4], (x,y,z,k, CSV.File(name)))
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
||||
# TODO :
|
||||
#
|
||||
# - Split this function in a load data into square / cube structure and a plot function
|
||||
# - Refactor the code below to compute the various squares / cubes and then do the plots.
|
||||
# - Refactor the Slicing function too
|
||||
# - Create a custom diagonal slice function ?
|
||||
|
||||
preamble_printed = false
|
||||
|
||||
push!(PGFPlotsX.CUSTOM_PREAMBLE,raw"\newcommand{\gdfigurewidth}{150mm}")
|
||||
push!(PGFPlotsX.CUSTOM_PREAMBLE,raw"\newcommand{\gdfigureheight}{100mm}")
|
||||
|
||||
function graph2d(name, matrix, xlabel, ylabel)
|
||||
x = range(0, 63)
|
||||
y = range(0, 63)
|
||||
function hmp2d(x, y)
|
||||
matrix[x + 1, y + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, hmp2d, minorgrid=true, height = raw"{\gdfigureheight}}, width = {{\gdfigurewidth}", xlabel = xlabel, ylabel = ylabel, c = :blues, extra_kwargs =:subplot)
|
||||
if !preamble_printed
|
||||
global preamble_printed = true
|
||||
print(Plots.pgfx_preamble(graph))
|
||||
end
|
||||
savefig(graph, string(name, ".tikz"))
|
||||
savefig(graph, string(name, ".pdf"))
|
||||
end
|
||||
end
|
||||
|
||||
function graph2dclims(name, matrix, clims, xlabel, ylabel)
|
||||
x = range(0, 63)
|
||||
y = range(0, 63)
|
||||
function hmp2d(x, y)
|
||||
matrix[x + 1, y + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, hmp2d, clims = clims, minorgrid=true, height = raw"{\gdfigureheight}}, width = {{\gdfigurewidth}", xlabel = xlabel, ylabel = ylabel, extra_kwargs =:subplot)
|
||||
savefig(graph, string(name, ".tikz"))
|
||||
savefig(graph, string(name, ".pdf"))
|
||||
end
|
||||
end
|
||||
|
||||
function cube_flatten_z(cubes)
|
||||
len = length(cubes)
|
||||
res = myfill(myfill(0.0,(64,64)), len)
|
||||
for k in range(1,64)
|
||||
Threads.@threads for i in range(1,64)
|
||||
for j in range(1,64)
|
||||
for l in range(1,len)
|
||||
res[l][i,j] += cubes[l][i,j,k]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
function slice_extract_x(cubes, slices)
|
||||
slice_length = length(slices)
|
||||
cube_length = length(cubes)
|
||||
res = myfill(myfill(myfill(0.0, (64, 64)), slice_length), cube_length)
|
||||
for i in range(1,64)
|
||||
for j in range(1,64)
|
||||
for (k,slice) in enumerate(slices)
|
||||
for l in range(1, cube_length)
|
||||
res[l][k][i, j] = cubes[l][slice+1, i, j]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
function graph_2(basename, csv, k, index_function)
|
||||
|
||||
result = fill(-1.0, (3, 64,64,64))
|
||||
|
||||
|
||||
# Fill in the 3D cube, then create the various slices and flattenings
|
||||
# Flattened Cube with x = first addr, y = second addr, compute the sum of prefetches ?
|
||||
# Grab a few random first adresses and look at them with x = second addr, y = probe addr
|
||||
# 0,1, 62,63 14, 15 plus one other depending on what appears
|
||||
|
||||
for row in csv
|
||||
probe = row.ProbeAddr
|
||||
offset_0 = Int64(row.Offset_0)
|
||||
offset_1 = Int64(row.Offset_1)
|
||||
offset_2 = Int64(row.Offset_2)
|
||||
offset_3 = Int64(row.Offset_3)
|
||||
|
||||
index = index_function(k, offset_0, offset_1, offset_2, offset_3)
|
||||
i = index[1] + 1
|
||||
j = index[2] + 1
|
||||
@assert result[:, i, j, probe + 1] == [-1.0,-1.0,-1.0]
|
||||
result[1, i, j, probe + 1] = row.Probe_SF_HR
|
||||
result[2, i, j, probe + 1] = row.Probe_SR_HR
|
||||
result[3, i, j, probe + 1] = row.Probe_FF_HR
|
||||
|
||||
end
|
||||
|
||||
allprobes = cube_flatten_z([result[1,:,:,:], result[2,:,:,:], result[3,:,:,:]])
|
||||
sf_probe_heatmap_allprobes = allprobes[1]
|
||||
sr_probe_heatmap_allprobes = allprobes[2]
|
||||
ff_probe_heatmap_allprobes = allprobes[3]
|
||||
|
||||
|
||||
|
||||
all_slices = slice_extract_x([result[1,:,:,:], result[2,:,:,:], result[3,:,:,:]], slices_offset_0)
|
||||
sf_probe_slices_heatmaps = all_slices[1]
|
||||
sr_probe_slices_heatmaps = all_slices[2]
|
||||
ff_probe_slices_heatmaps = all_slices[3]
|
||||
|
||||
|
||||
|
||||
graph2d(string(basename, "_SF_AllProbes"), sf_probe_heatmap_allprobes, "i", "j")
|
||||
graph2d(string(basename, "_SR_AllProbes"), sr_probe_heatmap_allprobes, "i", "j")
|
||||
graph2d(string(basename, "_FF_AllProbes"), ff_probe_heatmap_allprobes, "i", "j")
|
||||
|
||||
|
||||
for (i, offset_0) in enumerate(slices_offset_0)
|
||||
print(offset_0)
|
||||
data = sf_probe_slices_heatmaps[i]
|
||||
graph2dclims(string(basename, "_SF_Slice_", offset_0),sf_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
graph2dclims(string(basename, "_SR_Slice_", offset_0),sr_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
graph2dclims(string(basename, "_FF_Slice_", offset_0),ff_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
end
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
|
||||
cubes = myfill([], (length(eaps), length(types), length(functions_identifier)))
|
||||
|
||||
# need to push (k, cube)
|
||||
|
||||
Threads.@threads for experiment in files
|
||||
for (eap, type, f, k, file) in experiment[4]
|
||||
name = string(eaps[eap], "/julia_eap_", eaps[eap], "_", types[type], functions_identifier[f], k)
|
||||
print(string(name,"\n"))
|
||||
cube_3 = graph_2(name, file, k, functions_index[f])
|
||||
push!(cubes[eap, type, f], cube_3)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
if false
|
||||
|
||||
print("Computing 14 union 13...")
|
||||
|
||||
function cube_max(cubes_1, cubes_2)
|
||||
@assert size(cubes_1) == size(cubes_2)
|
||||
sizes = size(cubes_1)
|
||||
@assert length(sizes) == 5
|
||||
res = fill(0.0, sizes)
|
||||
for i in range(1,sizes[1])
|
||||
for j in range(1,sizes[2])
|
||||
Threads.@threads for k in range(1,64)
|
||||
for l in range(1, 64)
|
||||
for m in range(1, 64)
|
||||
res[i,j,k,l,m] = max(cubes_1[i,j,k,l,m], cubes_2[i,j,k,l,m])
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
index_0 = findfirst(isequal(0), eaps)
|
||||
index_12 = findfirst(isequal(12), eaps)
|
||||
index_13 = findfirst(isequal(13), eaps)
|
||||
index_14 = findfirst(isequal(14), eaps)
|
||||
|
||||
cube_max_13_14 = cube_max(cubes[index_13,:,:,:,:,:], cubes[index_14,:,:,:,:,:])
|
||||
|
||||
function do_cubes(name, cubes)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cubes[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string(name, "_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
for slice in diff_slices_offset_0
|
||||
graph2d(string(name,"_", types[type], "2_", methods[method], "_Slice_", slice), cubes[type, method, slice+1,:,:], "j", "probe")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
graph_13_14 = @task begin
|
||||
do_cubes("julia_max_13_14", cube_max_13_14)
|
||||
#cube_list = []
|
||||
#index_list = []
|
||||
#for type in range(1,length(types))
|
||||
# for method in range(1,3)
|
||||
# push!(cube_list, cube_max_13_14[type,method,:,:,:])
|
||||
# push!(index_list, (type, method))
|
||||
# end
|
||||
#end
|
||||
#allgraphs = cube_flatten_z(cube_list)
|
||||
#for (i,(type,method)) in enumerate(index_list)
|
||||
# graph2d(string("julia_max_13_14_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
#end
|
||||
end
|
||||
schedule(graph_13_14)
|
||||
|
||||
|
||||
print(" OK\n")
|
||||
|
||||
print("Computing Any difference between 0 and 12...")
|
||||
|
||||
function cube_differences(cubes_1, cubes_2)
|
||||
@assert size(cubes_1) == size(cubes_2)
|
||||
sizes = size(cubes_1)
|
||||
@assert length(sizes) == 5
|
||||
res = fill(0.0, sizes)
|
||||
for i in range(1,sizes[1])
|
||||
for j in range(1,sizes[2])
|
||||
Threads.@threads for k in range(1,64)
|
||||
for l in range(1, 64)
|
||||
for m in range(1, 64)
|
||||
res[i,j,k,l,m] = abs(cubes_1[i,j,k,l,m] - cubes_2[i,j,k,l,m])
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
cube_diff_0_12 = cube_differences(cubes[index_0,:,:,:,:,:], cubes[index_12,:,:,:,:,:])
|
||||
|
||||
graph_0_12 = @task begin
|
||||
do_cubes("julia_diff_0_12", cube_diff_0_12)
|
||||
#cube_list = []
|
||||
#index_list = []
|
||||
#for type in range(1,length(types))
|
||||
# for method in range(1,3)
|
||||
# push!(cube_list, cube_diff_0_12[type,method,:,:,:])
|
||||
# push!(index_list, (type, method))
|
||||
# end
|
||||
#end
|
||||
#allgraphs = cube_flatten_z(cube_list)
|
||||
#for (i,(type,method)) in enumerate(index_list)
|
||||
# graph2d(string("julia_diff_0_12_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
#end
|
||||
end
|
||||
schedule(graph_0_12)
|
||||
|
||||
print(" OK\n")
|
||||
|
||||
|
||||
|
||||
print("Computing Differences between 12 and (13 union 14)...")
|
||||
|
||||
cube_diff_12_1314 = cube_differences(cubes[index_0,:,:,:,:,:], cube_max_13_14)
|
||||
|
||||
graph_12_1314 = @task begin
|
||||
do_cubes("julia_diff_12_1314", cube_diff_12_1314)
|
||||
#cube_list = []
|
||||
#index_list = []
|
||||
#for type in range(1,length(types))
|
||||
# for method in range(1,3)
|
||||
# push!(cube_list, cube_diff_12_1314[type,method,:,:,:])
|
||||
# push!(index_list, (type, method))
|
||||
# end
|
||||
#end
|
||||
#allgraphs = cube_flatten_z(cube_list)
|
||||
#for (i,(type,method)) in enumerate(index_list)
|
||||
# graph2d(string("julia_diff_12_1314", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
# for slice in diff_slices_offset_0
|
||||
#
|
||||
# end
|
||||
#end
|
||||
end
|
||||
schedule(graph_12_1314)
|
||||
|
||||
wait(graph_13_14)
|
||||
wait(graph_0_12)
|
||||
wait(graph_12_1314)
|
||||
print("done\n")
|
||||
|
||||
end
|
403
analysis/makeplots-extra.jl
Normal file
403
analysis/makeplots-extra.jl
Normal file
@ -0,0 +1,403 @@
|
||||
using CSV
|
||||
using Plots
|
||||
pgfplotsx()
|
||||
|
||||
function myfill(element, dimensions)
|
||||
res = fill(element, dimensions)
|
||||
res = map(x -> deepcopy(x), res)
|
||||
res
|
||||
end
|
||||
|
||||
# Generals TODO : Fix the ticks, add legends
|
||||
#eaps = [12,13]
|
||||
#eaps = [0,14,15]
|
||||
eaps = [0,12,13,14,15]
|
||||
len_eaps = length(eaps)
|
||||
types = ["S","U"]
|
||||
functions_identifier = ["A", "B", "C", "D", "E"]
|
||||
#functions_identifier = ["B"]
|
||||
|
||||
function A_index(k, x, y, z)
|
||||
if (x + k) % 64 != y
|
||||
print(string("Argh, k = ", k, ", x = ", x, ", y = ", y, ", z = ", z))
|
||||
@assert false
|
||||
end
|
||||
(x,z)
|
||||
end
|
||||
|
||||
function B_index(k, x, y, z)
|
||||
@assert (x - k + 64) % 64 == y
|
||||
(x,z)
|
||||
end
|
||||
|
||||
function C_index(k, x, y, z)
|
||||
@assert (y + k) % 64 == z
|
||||
(x,y)
|
||||
end
|
||||
|
||||
function D_index(k, x, y, z)
|
||||
@assert (y - k + 64) % 64 == z
|
||||
(x,y)
|
||||
end
|
||||
|
||||
function E_index(k, x, y, z)
|
||||
@assert (y - x + 64) % 64 == (z - y + 64) % 64
|
||||
(x, (y - x + 64) % 64)
|
||||
end
|
||||
|
||||
functions_index = [A_index, B_index, C_index, D_index, E_index]
|
||||
#functions_index = [B_index]
|
||||
#types = ["S"]
|
||||
ks = [[1, 2, 3, 4, 8],
|
||||
[1, 2, 3, 4, 8],
|
||||
[1, 2, 3, 4, 8],
|
||||
[1, 2, 3, 4, 8],
|
||||
[2, 3, 4]
|
||||
]
|
||||
|
||||
#ks = [[8]]
|
||||
|
||||
|
||||
methods = ["SF", "SR", "FF"]
|
||||
|
||||
|
||||
|
||||
plot_lock = ReentrantLock()
|
||||
|
||||
slices_offset_0 = [0, 1, 2, 8, 14, 15, 30, 31, 32, 55, 56, 61, 62, 63]
|
||||
#slices_offset_0 = []
|
||||
|
||||
diff_slices_offset_0 = [0, 1, 2, 61, 62, 63]
|
||||
|
||||
|
||||
|
||||
function make_name(eap, type, f, k)
|
||||
string("extrap/extrap-with-", eap, "-prefetcher.", type, f, k, ".csv")
|
||||
end
|
||||
|
||||
all_file_names = myfill((0,0,0,[]), (length(eaps), length(types), length(functions_index)))
|
||||
|
||||
|
||||
|
||||
for x in 1:len_eaps
|
||||
for (y,type) in enumerate(types)
|
||||
for (z,f) in enumerate(functions_identifier)
|
||||
all_file_names[x,y,z] = (x,y,z,[])
|
||||
for (i,k) in enumerate(ks[z])
|
||||
# change me : insert file names into list
|
||||
push!(all_file_names[x,y,z][4] , (x, y, z, k, make_name(eaps[x], type, f, k) ) )
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
print(all_file_names)
|
||||
|
||||
|
||||
|
||||
#files = Matrix(CSV, length(eaps), length(types), length(levels))
|
||||
files = Array{
|
||||
Union{
|
||||
Nothing,
|
||||
Tuple{Int64, Int64, Int64, Vector{
|
||||
Tuple{ Int64, Int64, Int64, Int64, CSV.File }
|
||||
}}
|
||||
},3
|
||||
}(nothing, length(eaps), length(types), length(functions_identifier))
|
||||
|
||||
|
||||
|
||||
Threads.@threads for f in all_file_names
|
||||
x = f[1]
|
||||
y = f[2]
|
||||
z = f[3]
|
||||
files[x,y,z] = (x,y,z,[])
|
||||
for (x,y,z,k,name) in f[4]
|
||||
push!(files[x,y,z][4], (x,y,z,k, CSV.File(name)))
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
||||
# TODO :
|
||||
#
|
||||
# - Split this function in a load data into square / cube structure and a plot function
|
||||
# - Refactor the code below to compute the various squares / cubes and then do the plots.
|
||||
# - Refactor the Slicing function too
|
||||
# - Create a custom diagonal slice function ?
|
||||
|
||||
|
||||
preamble_printed = false
|
||||
|
||||
push!(PGFPlotsX.CUSTOM_PREAMBLE,raw"\newcommand{\gdfigurewidth}{150mm}")
|
||||
push!(PGFPlotsX.CUSTOM_PREAMBLE,raw"\newcommand{\gdfigureheight}{100mm}")
|
||||
|
||||
function graph2d(name, matrix, xlabel, ylabel)
|
||||
x = range(0, 63)
|
||||
y = range(0, 63)
|
||||
function hmp2d(x, y)
|
||||
matrix[x + 1, y + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, hmp2d, minorgrid=true, height = raw"{\gdfigureheight}}, width = {{\gdfigurewidth}", xlabel = xlabel, ylabel = ylabel, c = :blues, extra_kwargs =:subplot)
|
||||
if !preamble_printed
|
||||
global preamble_printed = true
|
||||
print(Plots.pgfx_preamble(graph))
|
||||
end
|
||||
savefig(graph, string(name, ".tikz"))
|
||||
savefig(graph, string(name, ".pdf"))
|
||||
end
|
||||
end
|
||||
|
||||
function graph2dclims(name, matrix, clims, xlabel, ylabel)
|
||||
x = range(0, 63)
|
||||
y = range(0, 63)
|
||||
function hmp2d(x, y)
|
||||
matrix[x + 1, y + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, hmp2d, clims = clims, minorgrid=true, height = raw"{\gdfigureheight}}, width = {{\gdfigurewidth}", xlabel = xlabel, ylabel = ylabel, extra_kwargs =:subplot)
|
||||
savefig(graph, string(name, ".tikz"))
|
||||
savefig(graph, string(name, ".pdf"))
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function cube_flatten_z(cubes)
|
||||
len = length(cubes)
|
||||
res = myfill(myfill(0.0,(64,64)), len)
|
||||
for k in range(1,64)
|
||||
Threads.@threads for i in range(1,64)
|
||||
for j in range(1,64)
|
||||
for l in range(1,len)
|
||||
res[l][i,j] += cubes[l][i,j,k]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
function slice_extract_x(cubes, slices)
|
||||
slice_length = length(slices)
|
||||
cube_length = length(cubes)
|
||||
res = myfill(myfill(myfill(0.0, (64, 64)), slice_length), cube_length)
|
||||
for i in range(1,64)
|
||||
for j in range(1,64)
|
||||
for (k,slice) in enumerate(slices)
|
||||
for l in range(1, cube_length)
|
||||
res[l][k][i, j] = cubes[l][slice+1, i, j]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
function graph_2(basename, csv, k, index_function)
|
||||
|
||||
result = fill(-1.0, (3, 64,64,64))
|
||||
|
||||
|
||||
# Fill in the 3D cube, then create the various slices and flattenings
|
||||
# Flattened Cube with x = first addr, y = second addr, compute the sum of prefetches ?
|
||||
# Grab a few random first adresses and look at them with x = second addr, y = probe addr
|
||||
# 0,1, 62,63 14, 15 plus one other depending on what appears
|
||||
|
||||
for row in csv
|
||||
probe = row.ProbeAddr
|
||||
offset_0 = row.Offset_0
|
||||
offset_1 = row.Offset_1
|
||||
offset_2 = row.Offset_2
|
||||
|
||||
index = index_function(k, offset_0, offset_1, offset_2)
|
||||
i = index[1] + 1
|
||||
j = index[2] + 1
|
||||
@assert result[:, i, j, probe + 1] == [-1.0,-1.0,-1.0]
|
||||
result[1, i, j, probe + 1] = row.Probe_SF_HR
|
||||
result[2, i, j, probe + 1] = row.Probe_SR_HR
|
||||
result[3, i, j, probe + 1] = row.Probe_FF_HR
|
||||
|
||||
end
|
||||
|
||||
allprobes = cube_flatten_z([result[1,:,:,:], result[2,:,:,:], result[3,:,:,:]])
|
||||
sf_probe_heatmap_allprobes = allprobes[1]
|
||||
sr_probe_heatmap_allprobes = allprobes[2]
|
||||
ff_probe_heatmap_allprobes = allprobes[3]
|
||||
|
||||
|
||||
|
||||
all_slices = slice_extract_x([result[1,:,:,:], result[2,:,:,:], result[3,:,:,:]], slices_offset_0)
|
||||
sf_probe_slices_heatmaps = all_slices[1]
|
||||
sr_probe_slices_heatmaps = all_slices[2]
|
||||
ff_probe_slices_heatmaps = all_slices[3]
|
||||
|
||||
|
||||
|
||||
graph2d(string(basename, "_SF_AllProbes"), sf_probe_heatmap_allprobes, "i", "j")
|
||||
graph2d(string(basename, "_SR_AllProbes"), sr_probe_heatmap_allprobes, "i", "j")
|
||||
graph2d(string(basename, "_FF_AllProbes"), ff_probe_heatmap_allprobes, "i", "j")
|
||||
|
||||
|
||||
for (i, offset_0) in enumerate(slices_offset_0)
|
||||
print(offset_0)
|
||||
data = sf_probe_slices_heatmaps[i]
|
||||
graph2dclims(string(basename, "_SF_Slice_", offset_0),sf_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
graph2dclims(string(basename, "_SR_Slice_", offset_0),sr_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
graph2dclims(string(basename, "_FF_Slice_", offset_0),ff_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
end
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
|
||||
cubes = myfill([], (length(eaps), length(types), length(functions_identifier)))
|
||||
|
||||
# need to push (k, cube)
|
||||
|
||||
Threads.@threads for experiment in files
|
||||
for (eap, type, f, k, file) in experiment[4]
|
||||
name = string(eaps[eap], "/julia_eap_", eaps[eap], "_", types[type], functions_identifier[f], k)
|
||||
print(string(name,"\n"))
|
||||
cube_3 = graph_2(name, file, k, functions_index[f])
|
||||
push!(cubes[eap, type, f], cube_3)
|
||||
end
|
||||
end
|
||||
|
||||
print("Computing 14 union 13...")
|
||||
|
||||
function cube_max(cubes_1, cubes_2)
|
||||
@assert size(cubes_1) == size(cubes_2)
|
||||
sizes = size(cubes_1)
|
||||
@assert length(sizes) == 5
|
||||
res = fill(0.0, sizes)
|
||||
for i in range(1,sizes[1])
|
||||
for j in range(1,sizes[2])
|
||||
Threads.@threads for k in range(1,64)
|
||||
for l in range(1, 64)
|
||||
for m in range(1, 64)
|
||||
res[i,j,k,l,m] = max(cubes_1[i,j,k,l,m], cubes_2[i,j,k,l,m])
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
index_0 = findfirst(isequal(0), eaps)
|
||||
index_12 = findfirst(isequal(12), eaps)
|
||||
index_13 = findfirst(isequal(13), eaps)
|
||||
index_14 = findfirst(isequal(14), eaps)
|
||||
|
||||
cube_max_13_14 = cube_max(cubes[index_13,:,:,:,:,:], cubes[index_14,:,:,:,:,:])
|
||||
|
||||
function do_cubes(name, cubes)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cubes[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string(name, "_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
for slice in diff_slices_offset_0
|
||||
graph2d(string(name,"_", types[type], "2_", methods[method], "_Slice_", slice), cubes[type, method, slice+1,:,:], "j", "probe"))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
graph_13_14 = @task begin
|
||||
do_cubes("julia_max_13_14", cube_max_13_14)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cube_max_13_14[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string("julia_max_13_14_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
end
|
||||
end
|
||||
schedule(graph_13_14)
|
||||
|
||||
|
||||
print(" OK\n")
|
||||
|
||||
print("Computing Any difference between 0 and 12...")
|
||||
|
||||
function cube_differences(cubes_1, cubes_2)
|
||||
@assert size(cubes_1) == size(cubes_2)
|
||||
sizes = size(cubes_1)
|
||||
@assert length(sizes) == 5
|
||||
res = fill(0.0, sizes)
|
||||
for i in range(1,sizes[1])
|
||||
for j in range(1,sizes[2])
|
||||
Threads.@threads for k in range(1,64)
|
||||
for l in range(1, 64)
|
||||
for m in range(1, 64)
|
||||
res[i,j,k,l,m] = abs(cubes_1[i,j,k,l,m] - cubes_2[i,j,k,l,m])
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
cube_diff_0_12 = cube_differences(cubes[index_0,:,:,:,:,:], cubes[index_12,:,:,:,:,:])
|
||||
|
||||
graph_0_12 = @task begin
|
||||
do_cubes("julia_diff_0_12", cube_diff_0_12)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cube_diff_0_12[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string("julia_diff_0_12_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
end
|
||||
end
|
||||
schedule(graph_0_12)
|
||||
|
||||
print(" OK\n")
|
||||
|
||||
|
||||
|
||||
print("Computing Differences between 12 and (13 union 14)...")
|
||||
|
||||
cube_diff_12_1314 = cube_differences(cubes[index_0,:,:,:,:,:], cube_max_13_14)
|
||||
|
||||
graph_12_1314 = @task begin
|
||||
do_cubes("julia_diff_12_1314", cube_diff_12_1314)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cube_diff_12_1314[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string("julia_diff_12_1314", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
for slice in diff_slices_offset_0
|
||||
end
|
||||
end
|
||||
end
|
||||
schedule(graph_12_1314)
|
||||
|
||||
wait(graph_13_14)
|
||||
wait(graph_0_12)
|
||||
wait(graph_12_1314)
|
||||
print("done\n")
|
419
analysis/makeplots.jl
Normal file
419
analysis/makeplots.jl
Normal file
@ -0,0 +1,419 @@
|
||||
using CSV
|
||||
using Plots
|
||||
pgfplotsx()
|
||||
|
||||
# Generals TODO : Fix the ticks, add legends
|
||||
#eaps = [0,12,13,14]
|
||||
eaps = [0,12,13,14,15]
|
||||
len_eaps = length(eaps)
|
||||
types = ["S","U"]
|
||||
#types = ["S"]
|
||||
levels = [0,1,2]
|
||||
|
||||
methods = ["SF", "SR", "FF"]
|
||||
|
||||
plot_lock = ReentrantLock()
|
||||
|
||||
slices_offset_0 = [0, 1, 2, 8, 14, 15, 30, 31, 32, 55, 56, 61, 62, 63]
|
||||
#slices_offset_0 = []
|
||||
|
||||
diff_slices_offset_0 = [0, 1, 2, 61, 62, 63]
|
||||
|
||||
function make_name(eap, type, level)
|
||||
string("eap/eap-with-", eap, "-prefetcher.", type, level, ".csv")
|
||||
end
|
||||
|
||||
all_file_names = fill((0,0,0,""), length(eaps), length(types), length(levels))
|
||||
Threads.@threads for x in 1:len_eaps
|
||||
for (y,type) in enumerate(types)
|
||||
for (z,level) in enumerate(levels)
|
||||
all_file_names[x,y,z] = (x,y,z,make_name(eaps[x], type, level))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
#files = Matrix(CSV, length(eaps), length(types), length(levels))
|
||||
files = Array{Union{Nothing, Tuple{Int64,Int64,Int64,CSV.File}},3}(nothing, length(eaps), length(types), length(levels))
|
||||
|
||||
Threads.@threads for f in all_file_names
|
||||
x = f[1]
|
||||
y = f[2]
|
||||
z = f[3]
|
||||
name = f[4]
|
||||
files[x,y,z] = (x,y,z,CSV.File(name))
|
||||
end
|
||||
|
||||
function graph_0(name, csv)
|
||||
data = [csv.Probe_FF_HR, csv.Probe_SR_HR, csv.Probe_SF_HR]
|
||||
x = range(0, 63)
|
||||
y = range(0, 2)
|
||||
function f(x, y)
|
||||
data[y + 1][x + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, f, yticks = ([0,1,2], ["FF", "SR", "SF"]), clims = (0, 1), xlabel="probe")
|
||||
savefig(graph, string("julia_", name, ".tikz"))
|
||||
savefig(graph, string("julia_", name, ".pdf"))
|
||||
end
|
||||
end # Todo double check if something better can be done wrt y names ?
|
||||
|
||||
# TODO :
|
||||
#
|
||||
# - Split this function in a load data into square / cube structure and a plot function
|
||||
# - Refactor the code below to compute the various squares / cubes and then do the plots.
|
||||
# - Refactor the Slicing function too
|
||||
# - Create a custom diagonal slice function ?
|
||||
|
||||
|
||||
preamble_printed = false
|
||||
|
||||
push!(PGFPlotsX.CUSTOM_PREAMBLE,raw"\newcommand{\gdfigurewidth}{150mm}")
|
||||
push!(PGFPlotsX.CUSTOM_PREAMBLE,raw"\newcommand{\gdfigureheight}{100mm}")
|
||||
|
||||
function graph2d(name, matrix, xlabel, ylabel)
|
||||
x = range(0, 63)
|
||||
y = range(0, 63)
|
||||
function hmp2d(x, y)
|
||||
matrix[x + 1, y + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, hmp2d, minorgrid=true, height = raw"{\gdfigureheight}}, width = {{\gdfigurewidth}", xlabel = xlabel, ylabel = ylabel, c = :blues, extra_kwargs =:subplot)
|
||||
if !preamble_printed
|
||||
global preamble_printed = true
|
||||
print(Plots.pgfx_preamble(graph))
|
||||
end
|
||||
savefig(graph, string(name, ".tikz"))
|
||||
savefig(graph, string(name, ".pdf"))
|
||||
end
|
||||
end
|
||||
|
||||
function graph2dclims(name, matrix, clims, xlabel, ylabel)
|
||||
x = range(0, 63)
|
||||
y = range(0, 63)
|
||||
function hmp2d(x, y)
|
||||
matrix[x + 1, y + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, hmp2d, clims = clims, minorgrid=true, height = raw"{\gdfigureheight}}, width = {{\gdfigurewidth}", xlabel = xlabel, ylabel = ylabel, extra_kwargs =:subplot)
|
||||
savefig(graph, string(name, ".tikz"))
|
||||
savefig(graph, string(name, ".pdf"))
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function graph_1(basename, csv)
|
||||
# define the 2D arrays for the 3 heatmaps
|
||||
sf_probe_heatmap = fill(-1.0, 64, 64)
|
||||
sr_probe_heatmap = fill(-1.0, 64, 64)
|
||||
ff_probe_heatmap = fill(-1.0, 64, 64)
|
||||
# define 3 1D arrays to build the heatmap for average time of the first access in FF/SR and SF modes
|
||||
sf_offset_hit_time = fill(-1.0, 64)
|
||||
sr_offset_hit_time = fill(-1.0, 64)
|
||||
ff_offset_hit_time = fill(-1.0, 64)
|
||||
# iterates on the rows and fill in the 2D arrays.
|
||||
for row in csv
|
||||
offset = row.Offset_0
|
||||
probe = row.ProbeAddr
|
||||
@assert sf_probe_heatmap[offset+1,probe+1] == -1.0
|
||||
sf_probe_heatmap[offset + 1, probe + 1] = row.Probe_SF_HR
|
||||
sr_probe_heatmap[offset + 1, probe + 1] = row.Probe_SR_HR
|
||||
ff_probe_heatmap[offset + 1, probe + 1] = row.Probe_FF_HR
|
||||
|
||||
if probe == 0
|
||||
@assert sf_offset_hit_time[offset + 1] == -1.0
|
||||
sf_offset_hit_time[offset + 1] = 0.0
|
||||
end
|
||||
sf_offset_hit_time[offset + 1] += row.Offset_0_SF_HR
|
||||
sr_offset_hit_time[offset + 1] += row.Offset_0_SR_HR
|
||||
ff_offset_hit_time[offset + 1] += row.Offset_0_FF_HR
|
||||
if probe == 63
|
||||
sf_offset_hit_time[offset + 1] /= 64
|
||||
sr_offset_hit_time[offset + 1] /= 64
|
||||
ff_offset_hit_time[offset + 1] /= 64
|
||||
end
|
||||
end
|
||||
|
||||
graph2dclims(string("julia_", basename, "_SF"), sf_probe_heatmap, (0,1), "i", "probe")
|
||||
graph2dclims(string("julia_", basename, "_SR"), sr_probe_heatmap, (0,1), "i", "probe")
|
||||
graph2dclims(string("julia_", basename, "_FF"), ff_probe_heatmap, (0,1), "i", "probe")
|
||||
|
||||
data = [ff_offset_hit_time, sr_offset_hit_time, sf_offset_hit_time]
|
||||
x = range(0, 63)
|
||||
y = range(0, 2)
|
||||
function f(x, y)
|
||||
data[y + 1][x + 1]
|
||||
end
|
||||
lock(plot_lock) do
|
||||
graph = heatmap(x, y, f)
|
||||
savefig(graph, string("julia_", basename, "_Offset_0_HT.tikz"))
|
||||
savefig(graph, string("julia_", basename, "_Offset_0_HT.pdf"))
|
||||
end
|
||||
end
|
||||
|
||||
function myfill(element, dimensions)
|
||||
res = fill(element, dimensions)
|
||||
res = map(x -> deepcopy(x), res)
|
||||
res
|
||||
end
|
||||
|
||||
function cube_flatten_z(cubes)
|
||||
len = length(cubes)
|
||||
res = myfill(myfill(0.0,(64,64)), len)
|
||||
for k in range(1,64)
|
||||
Threads.@threads for i in range(1,64)
|
||||
for j in range(1,64)
|
||||
for l in range(1,len)
|
||||
res[l][i,j] += cubes[l][i,j,k]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
function slice_extract_x(cubes, slices)
|
||||
slice_length = length(slices)
|
||||
cube_length = length(cubes)
|
||||
res = myfill(myfill(myfill(0.0, (64, 64)), slice_length), cube_length)
|
||||
for i in range(1,64)
|
||||
for j in range(1,64)
|
||||
for (k,slice) in enumerate(slices)
|
||||
for l in range(1, cube_length)
|
||||
res[l][k][i, j] = cubes[l][slice+1, i, j]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
function graph_2(basename, csv)
|
||||
# First define a 3D cube for the resulting data ?
|
||||
sf_probe_heatmap = myfill(-1.0, (64, 64, 64))
|
||||
sr_probe_heatmap = myfill(-1.0, (64, 64, 64))
|
||||
ff_probe_heatmap = myfill(-1.0, (64, 64, 64))
|
||||
# Fill in the 3D cube, then create the various slices and flattenings
|
||||
# Flattened Cube with x = first addr, y = second addr, compute the sum of prefetches ?
|
||||
# Grab a few random first adresses and look at them with x = second addr, y = probe addr
|
||||
# 0,1, 62,63 14, 15 plus one other depending on what appears
|
||||
|
||||
# Also define and fill in a 2D matrix of offset1-offset2 hit time.
|
||||
sf_offset_hit_time = myfill(-1.0, (64, 64))
|
||||
sr_offset_hit_time = myfill(-1.0, (64, 64))
|
||||
ff_offset_hit_time = myfill(-1.0, (64, 64))
|
||||
|
||||
for row in csv
|
||||
probe = row.ProbeAddr
|
||||
offset_0 = row.Offset_0
|
||||
offset_1 = row.Offset_1
|
||||
@assert sf_probe_heatmap[offset_0 + 1, offset_1 + 1, probe + 1] == -1.0
|
||||
sf_probe_heatmap[offset_0 + 1, offset_1 + 1, probe + 1] = row.Probe_SF_HR
|
||||
sr_probe_heatmap[offset_0 + 1, offset_1 + 1, probe + 1] = row.Probe_SR_HR
|
||||
ff_probe_heatmap[offset_0 + 1, offset_1 + 1, probe + 1] = row.Probe_FF_HR
|
||||
|
||||
if probe == 0
|
||||
@assert sf_offset_hit_time[offset_0 + 1, offset_1 + 1] == -1.0
|
||||
sf_offset_hit_time[offset_0 + 1, offset_1 + 1] = 0.0
|
||||
end
|
||||
sf_offset_hit_time[offset_0 + 1, offset_1 + 1] += row.Offset_1_SF_HR
|
||||
sr_offset_hit_time[offset_0 + 1, offset_1 + 1] += row.Offset_1_SR_HR
|
||||
ff_offset_hit_time[offset_0 + 1, offset_1 + 1] += row.Offset_1_FF_HR
|
||||
if probe == 63
|
||||
sf_offset_hit_time[offset_0 + 1, offset_1 + 1] /= 64
|
||||
sr_offset_hit_time[offset_0 + 1, offset_1 + 1] /= 64
|
||||
ff_offset_hit_time[offset_0 + 1, offset_1 + 1] /= 64
|
||||
end
|
||||
|
||||
end
|
||||
allprobes = cube_flatten_z([sf_probe_heatmap, sr_probe_heatmap, ff_probe_heatmap])
|
||||
sf_probe_heatmap_allprobes = allprobes[1]
|
||||
sr_probe_heatmap_allprobes = allprobes[2]
|
||||
ff_probe_heatmap_allprobes = allprobes[3]
|
||||
|
||||
|
||||
|
||||
all_slices = slice_extract_x([sf_probe_heatmap, sr_probe_heatmap, ff_probe_heatmap], slices_offset_0)
|
||||
sf_probe_slices_heatmaps = all_slices[1]
|
||||
sr_probe_slices_heatmaps = all_slices[2]
|
||||
ff_probe_slices_heatmaps = all_slices[3]
|
||||
|
||||
graph2d(string("julia_", basename, "_SF_AllProbes"), sf_probe_heatmap_allprobes, "i", "j")
|
||||
graph2d(string("julia_", basename, "_SR_AllProbes"), sr_probe_heatmap_allprobes, "i", "j")
|
||||
graph2d(string("julia_", basename, "_FF_AllProbes"), ff_probe_heatmap_allprobes, "i", "j")
|
||||
|
||||
|
||||
for (i, offset_0) in enumerate(slices_offset_0)
|
||||
print(offset_0)
|
||||
data = sf_probe_slices_heatmaps[i]
|
||||
graph2dclims(string("julia_", basename, "_SF_Slice_", offset_0),sf_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
graph2dclims(string("julia_", basename, "_SR_Slice_", offset_0),sr_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
graph2dclims(string("julia_", basename, "_FF_Slice_", offset_0),ff_probe_slices_heatmaps[i],(0,1), "j", "probe")
|
||||
end
|
||||
[sf_probe_heatmap, sr_probe_heatmap, ff_probe_heatmap]
|
||||
end
|
||||
|
||||
Threads.@threads for file in files[:,:,1]
|
||||
name = string("eap_",eaps[file[1]],"_",types[file[2]],levels[file[3]])
|
||||
graph_0(name, file[4])
|
||||
print(string(name,"\n"))
|
||||
end
|
||||
|
||||
Threads.@threads for file in files[:,:,2]
|
||||
name = string("eap_",eaps[file[1]],"_",types[file[2]],levels[file[3]])
|
||||
graph_1(name, file[4])
|
||||
print(string(name,"\n"))
|
||||
end
|
||||
|
||||
|
||||
cubes = fill(0.0, length(eaps), length(types), 3, 64, 64, 64)
|
||||
|
||||
Threads.@threads for file in files[:,:,3]
|
||||
name = string("eap_",eaps[file[1]],"_",types[file[2]],levels[file[3]])
|
||||
(sf,sr,ff) = graph_2(name, file[4])
|
||||
cubes[file[1], file[2], 1, :, :, :] = sf
|
||||
cubes[file[1], file[2], 2, :, :, :] = sr
|
||||
cubes[file[1], file[2], 3, :, :, :] = ff
|
||||
print(string(name,"\n"))
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
print("Computing 14 union 13...")
|
||||
|
||||
function cube_max(cubes_1, cubes_2)
|
||||
@assert size(cubes_1) == size(cubes_2)
|
||||
sizes = size(cubes_1)
|
||||
@assert length(sizes) == 5
|
||||
res = fill(0.0, sizes)
|
||||
for i in range(1,sizes[1])
|
||||
for j in range(1,sizes[2])
|
||||
Threads.@threads for k in range(1,64)
|
||||
for l in range(1, 64)
|
||||
for m in range(1, 64)
|
||||
res[i,j,k,l,m] = max(cubes_1[i,j,k,l,m], cubes_2[i,j,k,l,m])
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
index_0 = findfirst(isequal(0), eaps)
|
||||
index_12 = findfirst(isequal(12), eaps)
|
||||
index_13 = findfirst(isequal(13), eaps)
|
||||
index_14 = findfirst(isequal(14), eaps)
|
||||
|
||||
cube_max_13_14 = cube_max(cubes[index_13,:,:,:,:,:], cubes[index_14,:,:,:,:,:])
|
||||
|
||||
function do_cubes(name, cubes)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cubes[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string(name, "_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
for slice in diff_slices_offset_0
|
||||
graph2d(string(name,"_", types[type], "2_", methods[method], "_Slice_", slice), cubes[type, method, slice+1,:,:], "j", "probe")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
graph_13_14 = @task begin
|
||||
do_cubes("julia_max_13_14", cube_max_13_14)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cube_max_13_14[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string("julia_max_13_14_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
end
|
||||
end
|
||||
schedule(graph_13_14)
|
||||
|
||||
|
||||
print(" OK\n")
|
||||
|
||||
print("Computing Any difference between 0 and 12...")
|
||||
|
||||
function cube_differences(cubes_1, cubes_2)
|
||||
@assert size(cubes_1) == size(cubes_2)
|
||||
sizes = size(cubes_1)
|
||||
@assert length(sizes) == 5
|
||||
res = fill(0.0, sizes)
|
||||
for i in range(1,sizes[1])
|
||||
for j in range(1,sizes[2])
|
||||
Threads.@threads for k in range(1,64)
|
||||
for l in range(1, 64)
|
||||
for m in range(1, 64)
|
||||
res[i,j,k,l,m] = abs(cubes_1[i,j,k,l,m] - cubes_2[i,j,k,l,m])
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
res
|
||||
end
|
||||
|
||||
cube_diff_0_12 = cube_differences(cubes[index_0,:,:,:,:,:], cubes[index_12,:,:,:,:,:])
|
||||
|
||||
graph_0_12 = @task begin
|
||||
do_cubes("julia_diff_0_12", cube_diff_0_12)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cube_diff_0_12[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string("julia_diff_0_12_", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
end
|
||||
end
|
||||
schedule(graph_0_12)
|
||||
|
||||
print(" OK\n")
|
||||
|
||||
|
||||
|
||||
print("Computing Differences between 12 and (13 union 14)...")
|
||||
|
||||
cube_diff_12_1314 = cube_differences(cubes[index_0,:,:,:,:,:], cube_max_13_14)
|
||||
|
||||
graph_12_1314 = @task begin
|
||||
do_cubes("julia_diff_12_1314", cube_diff_12_1314)
|
||||
cube_list = []
|
||||
index_list = []
|
||||
for type in range(1,length(types))
|
||||
for method in range(1,3)
|
||||
push!(cube_list, cube_diff_12_1314[type,method,:,:,:])
|
||||
push!(index_list, (type, method))
|
||||
end
|
||||
end
|
||||
allgraphs = cube_flatten_z(cube_list)
|
||||
for (i,(type,method)) in enumerate(index_list)
|
||||
graph2d(string("julia_diff_12_1314", types[type], "2_", methods[method], "_AllProbes"), allgraphs[i], "i", "j")
|
||||
for slice in diff_slices_offset_0
|
||||
end
|
||||
end
|
||||
end
|
||||
schedule(graph_12_1314)
|
||||
|
||||
wait(graph_13_14)
|
||||
wait(graph_0_12)
|
||||
wait(graph_12_1314)
|
||||
print("done\n")
|
74
analysis/sort.sh
Executable file
74
analysis/sort.sh
Executable file
@ -0,0 +1,74 @@
|
||||
#!/bin/sh
|
||||
|
||||
mkdir -p _/0
|
||||
mkdir -p _/1
|
||||
mkdir -p _/2
|
||||
|
||||
mv *{S,U}0* _/0/
|
||||
mv *{S,U}1* _/1/
|
||||
mv *{S,U}2* _/2/
|
||||
|
||||
mkdir -p A/1/
|
||||
mkdir -p A/2/
|
||||
mkdir -p A/3/
|
||||
mkdir -p A/4/
|
||||
mkdir -p A/8/
|
||||
mkdir -p B/8/
|
||||
mkdir -p B/4/
|
||||
mkdir -p B/3/
|
||||
mkdir -p B/2/
|
||||
mkdir -p B/1/
|
||||
mkdir -p C/1/
|
||||
mkdir -p C/2/
|
||||
mkdir -p C/3/
|
||||
mkdir -p C/4/
|
||||
mkdir -p C/8/
|
||||
mkdir -p D/8/
|
||||
mkdir -p D/1/
|
||||
mkdir -p D/2/
|
||||
mkdir -p D/3/
|
||||
mkdir -p D/4/
|
||||
mkdir -p E/4/
|
||||
mkdir -p E/3/
|
||||
mkdir -p E/2/
|
||||
mkdir -p F/1/
|
||||
mkdir -p F/-1/
|
||||
mkdir -p F/2/
|
||||
mkdir -p F/-2/
|
||||
mkdir -p F/3/
|
||||
mkdir -p F/-3/
|
||||
mkdir -p F/4/
|
||||
mkdir -p F/-4/
|
||||
|
||||
mv *A1_* A/1/
|
||||
mv *A2_* A/2/
|
||||
mv *A3_* A/3/
|
||||
mv *A4_* A/4/
|
||||
mv *A8_* A/8/
|
||||
mv *B8_* B/8/
|
||||
mv *B4_* B/4/
|
||||
mv *B3_* B/3/
|
||||
mv *B2_* B/2/
|
||||
mv *B1_* B/1/
|
||||
mv *C1_* C/1/
|
||||
mv *C2_* C/2/
|
||||
mv *C3_* C/3/
|
||||
mv *C4_* C/4/
|
||||
mv *C8_* C/8/
|
||||
mv *D8_* D/8/
|
||||
mv *D1_* D/1/
|
||||
mv *D2_* D/2/
|
||||
mv *D3_* D/3/
|
||||
mv *D4_* D/4/
|
||||
mv *E4_* E/4/
|
||||
mv *E3_* E/3/
|
||||
mv *E2_* E/2/
|
||||
mv *F1_* F/1/
|
||||
mv *F-1_* F/-1/
|
||||
mv *F2_* F/2/
|
||||
mv *F-2_* F/-2/
|
||||
mv *F3_* F/3/
|
||||
mv *F-3_* F/-3/
|
||||
mv *F4_* F/4/
|
||||
mv *F-4_* F/-4/
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "basic_timing_cache_channel"
|
||||
version = "0.1.0"
|
||||
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@ -9,4 +9,4 @@ edition = "2018"
|
||||
[dependencies]
|
||||
cache_utils = { path = "../cache_utils" }
|
||||
cache_side_channel = { path = "../cache_side_channel" }
|
||||
nix = "0.20.0"
|
||||
nix = "0.28.0"
|
||||
|
@ -13,7 +13,6 @@ use cache_side_channel::table_side_channel::{
|
||||
MultipleTableCacheSideChannel, SingleTableCacheSideChannel, TableAttackResult,
|
||||
TableCacheSideChannel,
|
||||
};
|
||||
use cache_side_channel::SideChannelError::AddressNotReady;
|
||||
use cache_side_channel::{
|
||||
BitIterator, CacheStatus, ChannelFatalError, ChannelHandle, CoreSpec, CovertChannel,
|
||||
MultipleAddrCacheSideChannel, SideChannelError, SingleAddrCacheSideChannel,
|
||||
@ -26,9 +25,9 @@ use cache_utils::calibration::{
|
||||
CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER, CLFLUSH_NUM_ITERATION_AV, PAGE_LEN,
|
||||
SP, VPN,
|
||||
};
|
||||
use cache_utils::complex_addressing::{CacheAttackSlicing, CacheSlicing};
|
||||
use cache_utils::complex_addressing::{CacheAttackSlicing};
|
||||
use cache_utils::mmap::MMappedMemory;
|
||||
use cache_utils::{find_core_per_socket, flush, maccess, noop};
|
||||
use cache_utils::{find_core_per_socket, flush, maccess};
|
||||
use nix::sched::sched_getaffinity;
|
||||
use nix::sched::CpuSet;
|
||||
use nix::unistd::Pid;
|
||||
@ -314,7 +313,7 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
|
||||
|
||||
// Select the proper core
|
||||
|
||||
for (av, (global_error_pred, thresholds)) in res.iter() {
|
||||
for (av, (global_error_pred, _thresholds)) in res.iter() {
|
||||
if global_error_pred.error_rate() < best_error_rate {
|
||||
best_av = *av;
|
||||
best_error_rate = global_error_pred.error_rate();
|
||||
@ -625,7 +624,7 @@ impl<T: TimingChannelPrimitives> MultipleAddrCacheSideChannel for TopologyAwareT
|
||||
pages.into_iter().map(|(k, v)| v),
|
||||
self.calibration_strategy,
|
||||
) {
|
||||
Err(e) => {
|
||||
Err(_e) => {
|
||||
return Err(ChannelFatalError::Oops);
|
||||
}
|
||||
Ok(r) => r,
|
||||
@ -740,7 +739,7 @@ impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T>
|
||||
unsafe fn ready_page(&mut self, page: *const u8) -> Result<Self::CovertChannelHandle, ()> {
|
||||
let vpn: VPN = get_vpn(page);
|
||||
// Check if the page has already been readied. If so should error out ?
|
||||
if let Some(preferred) = self.preferred_address.get(&vpn) {
|
||||
if self.preferred_address.get(&vpn).is_some() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
@ -758,7 +757,7 @@ impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T>
|
||||
pages.into_iter(),
|
||||
self.calibration_strategy,
|
||||
) {
|
||||
Err(e) => {
|
||||
Err(_e) => {
|
||||
return Err(());
|
||||
}
|
||||
Ok(r) => r,
|
||||
@ -816,7 +815,7 @@ impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T>
|
||||
calibration_epoch: self.calibration_epoch,
|
||||
},
|
||||
};
|
||||
let r = unsafe { self.prepare_one_impl(&mut handle.0) }.unwrap();
|
||||
unsafe { self.prepare_one_impl(&mut handle.0) }.unwrap();
|
||||
|
||||
return Ok(handle);
|
||||
}
|
||||
|
@ -1,11 +1,11 @@
|
||||
[package]
|
||||
name = "cache_side_channel"
|
||||
version = "0.1.0"
|
||||
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
nix = "0.20.0"
|
||||
bit_field = "0.10.1"
|
||||
nix = "0.28.0"
|
||||
bit_field = "0.10.2"
|
||||
|
@ -41,7 +41,7 @@ pub fn restore_affinity(cpu_set: &CpuSet) {
|
||||
sched_setaffinity(Pid::from_raw(0), &cpu_set).unwrap();
|
||||
}
|
||||
|
||||
#[must_use = "This result must be used to restore affinity"]
|
||||
//#[must_use = "This result must be used to restore affinity"]
|
||||
pub fn set_affinity(cpu_set: &CpuSet) -> Result<CpuSet, nix::Error> {
|
||||
let old = sched_getaffinity(Pid::from_raw(0))?;
|
||||
sched_setaffinity(Pid::from_raw(0), &cpu_set)?;
|
||||
@ -161,7 +161,7 @@ impl<'a> BitIterator<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn atEnd(&self) -> bool {
|
||||
pub fn at_end(&self) -> bool {
|
||||
self.byte_index >= self.bytes.len()
|
||||
}
|
||||
}
|
||||
|
@ -118,11 +118,11 @@ impl<T: SingleAddrCacheSideChannel> SingleTableCacheSideChannel<T::Handle> for T
|
||||
SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
|
||||
},
|
||||
}
|
||||
for iteration in 0..100 {
|
||||
for _iteration in 0..100 {
|
||||
self.victim_single(victim);
|
||||
let r = unsafe { self.test_single(addr, true) };
|
||||
match r {
|
||||
Ok(status) => {}
|
||||
Ok(_status) => {}
|
||||
Err(e) => match e {
|
||||
SideChannelError::NeedRecalibration => panic!(),
|
||||
SideChannelError::FatalError(e) => {
|
||||
@ -193,7 +193,7 @@ impl<T: MultipleAddrCacheSideChannel> MultipleTableCacheSideChannel<T::Handle> f
|
||||
batch.push(&mut **addr);
|
||||
let mut hits: HashMap<*const u8, u32> = HashMap::new();
|
||||
let mut misses: HashMap<*const u8, u32> = HashMap::new();
|
||||
for i in 1..T::MAX_ADDR {
|
||||
for _i in 1..T::MAX_ADDR {
|
||||
if let Some(addr) = addr_iter.next() {
|
||||
batch.push(&mut **addr);
|
||||
} else {
|
||||
|
@ -1,8 +1,9 @@
|
||||
[package]
|
||||
name = "cache_utils"
|
||||
version = "0.1.0"
|
||||
authors = ["guillaume didier <guillaume.didier@inria.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@ -10,13 +11,13 @@ edition = "2018"
|
||||
polling_serial = { path = "../polling_serial", optional = true }
|
||||
vga_buffer = { path = "../vga_buffer", optional = true }
|
||||
cpuid = { path = "../cpuid", default-features = false }
|
||||
x86_64 = "0.14.1"
|
||||
x86_64 = "0.15.1"
|
||||
static_assertions = "1.1.0"
|
||||
itertools = { version = "0.10.0", default-features = false }
|
||||
atomic = "0.5.0"
|
||||
itertools = { version = "0.12.1", default-features = false }
|
||||
atomic = "0.6.0"
|
||||
|
||||
nix = { version = "0.20.0", optional = true }
|
||||
libc = { version = "0.2.92", optional = true }
|
||||
nix = { version = "0.28.0", optional = true, features = ["process", "mman", "sched"] }
|
||||
libc = { version = "0.2.153", optional = true }
|
||||
hashbrown = { version = "0.11.2", optional = true }
|
||||
turn_lock = { path = "../turn_lock", optional = true}
|
||||
lazy_static = "1.4.0"
|
||||
|
3
cache_utils/Readme.md
Normal file
3
cache_utils/Readme.md
Normal file
@ -0,0 +1,3 @@
|
||||
|
||||
Notes for Fedora 39
|
||||
- kernel-tools + kernel-tools-devel needed
|
231
cache_utils/analyse_csv.py
Normal file
231
cache_utils/analyse_csv.py
Normal file
@ -0,0 +1,231 @@
|
||||
# SPDX-FileCopyrightText: 2021 Guillaume DIDIER
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
import tikzplotlib
|
||||
from sys import exit
|
||||
import wquantiles as wq
|
||||
import numpy as np
|
||||
|
||||
from functools import partial
|
||||
|
||||
import sys
|
||||
|
||||
# For cyber cobay sanity check :
|
||||
from gmpy2 import popcount
|
||||
functions_i9_9900 = [
|
||||
0b1111111111010101110101010001000000,
|
||||
0b0110111110111010110001001000000000,
|
||||
0b1111111000011111110010110000000000]
|
||||
|
||||
|
||||
def complex_hash(addr):
|
||||
r = 0
|
||||
for f in reversed(functions_i9_9900):
|
||||
r <<= 1
|
||||
r |= (popcount(f & addr) & 1)
|
||||
return r
|
||||
|
||||
|
||||
def convert64(x):
|
||||
return np.int64(int(x, base=16))
|
||||
|
||||
def convert8(x):
|
||||
return np.int8(int(x, base=16))
|
||||
|
||||
df = pd.read_csv(sys.argv[1] + "-results_lite.csv.bz2",
|
||||
dtype={
|
||||
"main_core": np.int8,
|
||||
"helper_core": np.int8,
|
||||
# "address": int,
|
||||
# "hash": np.int8,
|
||||
"time": np.int16,
|
||||
"clflush_remote_hit": np.int32,
|
||||
"clflush_shared_hit": np.int32,
|
||||
"clflush_miss_f": np.int32,
|
||||
"clflush_local_hit_f": np.int32,
|
||||
"clflush_miss_n": np.int32,
|
||||
"clflush_local_hit_n": np.int32,
|
||||
"reload_miss": np.int32,
|
||||
"reload_remote_hit": np.int32,
|
||||
"reload_shared_hit": np.int32,
|
||||
"reload_local_hit": np.int32},
|
||||
converters={'address': convert64, 'hash': convert8},
|
||||
)
|
||||
|
||||
sample_columns = [
|
||||
"clflush_remote_hit",
|
||||
"clflush_shared_hit",
|
||||
"clflush_miss_f",
|
||||
"clflush_local_hit_f",
|
||||
"clflush_miss_n",
|
||||
"clflush_local_hit_n",
|
||||
"reload_miss",
|
||||
"reload_remote_hit",
|
||||
"reload_shared_hit",
|
||||
"reload_local_hit",
|
||||
]
|
||||
|
||||
sample_flush_columns = [
|
||||
"clflush_remote_hit",
|
||||
"clflush_shared_hit",
|
||||
"clflush_miss_f",
|
||||
"clflush_local_hit_f",
|
||||
"clflush_miss_n",
|
||||
"clflush_local_hit_n",
|
||||
]
|
||||
|
||||
|
||||
slice_mapping = pd.read_csv(sys.argv[1] + ".slices.csv")
|
||||
core_mapping = pd.read_csv(sys.argv[1] + ".cores.csv")
|
||||
|
||||
def remap_core(key):
|
||||
def remap(core):
|
||||
remapped = core_mapping.iloc[core]
|
||||
return remapped[key]
|
||||
|
||||
return remap
|
||||
|
||||
|
||||
df["main_socket"] = df["main_core"].apply(remap_core("socket"))
|
||||
df["main_core_fixed"] = df["main_core"].apply(remap_core("core"))
|
||||
df["main_ht"] = df["main_core"].apply(remap_core("hthread"))
|
||||
df["helper_socket"] = df["helper_core"].apply(remap_core("socket"))
|
||||
df["helper_core_fixed"] = df["helper_core"].apply(remap_core("core"))
|
||||
df["helper_ht"] = df["helper_core"].apply(remap_core("hthread"))
|
||||
|
||||
# slice_mapping = {3: 0, 1: 1, 2: 2, 0: 3}
|
||||
|
||||
slice_remap = lambda h: slice_mapping["slice_group"].iloc[h]
|
||||
df["slice_group"] = df["hash"].apply(slice_remap)
|
||||
|
||||
|
||||
print(df.columns)
|
||||
#df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3)
|
||||
|
||||
addresses = df["address"].unique()
|
||||
print(addresses)
|
||||
print(*[bin(a) for a in addresses], sep='\n')
|
||||
|
||||
print(df.head())
|
||||
|
||||
print(df["hash"].unique())
|
||||
|
||||
min_time = df["time"].min()
|
||||
max_time = df["time"].max()
|
||||
|
||||
q10s = [wq.quantile(df["time"], df[col], 0.1) for col in sample_flush_columns]
|
||||
q90s = [wq.quantile(df["time"], df[col], 0.9) for col in sample_flush_columns]
|
||||
|
||||
graph_upper = int(((max(q90s) + 19) // 10) * 10)
|
||||
graph_lower = int(((min(q10s) - 10) // 10) * 10)
|
||||
# graph_lower = (min_time // 10) * 10
|
||||
# graph_upper = ((max_time + 9) // 10) * 10
|
||||
|
||||
print("graphing between {}, {}".format(graph_lower, graph_upper))
|
||||
|
||||
df_main_core_0 = df[df["main_core"] == 0]
|
||||
#df_helper_core_0 = df[df["helper_core"] == 0]
|
||||
|
||||
colours = ["b", "r", "g", "y"]
|
||||
|
||||
def custom_hist(x, *y, **kwargs):
|
||||
for (i, yi) in enumerate(y):
|
||||
kwargs["color"] = colours[i]
|
||||
sns.distplot(x, range(graph_lower, graph_upper), hist_kws={"weights": yi, "histtype":"step"}, kde=False, **kwargs)
|
||||
|
||||
|
||||
custom_hist(df["time"], df["clflush_miss_n"], df["clflush_remote_hit"])
|
||||
|
||||
tikzplotlib.save("fig-hist-all.tex")#, axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
|
||||
plt.show()
|
||||
|
||||
attacker = 2
|
||||
victim = 7
|
||||
slice = 14
|
||||
|
||||
df_ax_vx_sx = df[(df["hash"] == slice) & (df["main_core"] == attacker) & (df["helper_core"] == victim)]
|
||||
|
||||
custom_hist(df_ax_vx_sx["time"], df_ax_vx_sx["clflush_miss_n"], df_ax_vx_sx["clflush_remote_hit"])
|
||||
tikzplotlib.save("fig-hist-good-A{}V{}S{}.tex".format(attacker,victim,slice))#, axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
|
||||
plt.show()
|
||||
|
||||
attacker = 9
|
||||
victim = 4
|
||||
slice = 8
|
||||
|
||||
df_ax_vx_sx = df[(df["hash"] == slice) & (df["main_core"] == attacker) & (df["helper_core"] == victim)]
|
||||
|
||||
custom_hist(df_ax_vx_sx["time"], df_ax_vx_sx["clflush_miss_n"], df_ax_vx_sx["clflush_remote_hit"])
|
||||
tikzplotlib.save("fig-hist-bad-A{}V{}S{}.tex".format(attacker,victim,slice))#, axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
|
||||
plt.show()
|
||||
|
||||
|
||||
g = sns.FacetGrid(df_main_core_0, col="helper_core", row="hash", legend_out=True)
|
||||
g2 = sns.FacetGrid(df, col="main_core", row="hash", legend_out=True)
|
||||
|
||||
|
||||
# Color convention here :
|
||||
# Blue = miss
|
||||
# Red = Remote Hit
|
||||
# Green = Local Hit
|
||||
# Yellow = Shared Hit
|
||||
|
||||
g.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
|
||||
|
||||
g2.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
|
||||
|
||||
# g.map(sns.distplot, "time", hist_kws={"weights": df["clflush_hit"]}, kde=False)
|
||||
|
||||
#plt.show()
|
||||
#plt.figure()
|
||||
|
||||
#df_mcf6 = df[df["main_core_fixed"] == 6]
|
||||
#df_mcf6_slg7 = df_mcf6[df_mcf6["slice_group"] == 7]
|
||||
#g3 = sns.FacetGrid(df_mcf6_slg7, row="helper_core_fixed", col="main_ht")
|
||||
#g3.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
|
||||
|
||||
#g4 = sns.FacetGrid(df_mcf6_slg7, row="helper_core_fixed", col="helper_ht")
|
||||
g#4.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
|
||||
|
||||
def stat(x, key):
|
||||
return wq.median(x["time"], x[key])
|
||||
|
||||
|
||||
miss = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_miss_n")
|
||||
hit_remote = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_remote_hit")
|
||||
hit_local = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_local_hit_n")
|
||||
hit_shared = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_shared_hit")
|
||||
|
||||
stats = miss.reset_index()
|
||||
stats.columns = ["main_core", "helper_core", "hash", "clflush_miss_n"]
|
||||
stats["clflush_remote_hit"] = hit_remote.values
|
||||
stats["clflush_local_hit_n"] = hit_local.values
|
||||
stats["clflush_shared_hit"] = hit_shared.values
|
||||
|
||||
stats.to_csv(sys.argv[1] + ".stats.csv", index=False)
|
||||
|
||||
#print(stats.to_string())
|
||||
|
||||
plt.show()
|
||||
exit(0)
|
||||
g = sns.FacetGrid(stats, row="Core")
|
||||
|
||||
g.map(sns.distplot, 'Miss', bins=range(100, 480), color="r")
|
||||
g.map(sns.distplot, 'Hit', bins=range(100, 480))
|
||||
plt.show()
|
||||
|
||||
#stats["clflush_miss_med"] = stats[[0]].apply(lambda x: x["miss_med"])
|
||||
#stats["clflush_hit_med"] = stats[[0]].apply(lambda x: x["hit_med"])
|
||||
#del df[[0]]
|
||||
#print(hit.to_string(), miss.to_string())
|
||||
|
||||
# test = pd.DataFrame({"value" : [0, 5], "weight": [5, 1]})
|
||||
# plt.figure()
|
||||
# sns.distplot(test["value"], hist_kws={"weights": test["weight"]}, kde=False)
|
||||
|
||||
exit(0)
|
281
cache_utils/analyse_medians.py
Normal file
281
cache_utils/analyse_medians.py
Normal file
@ -0,0 +1,281 @@
|
||||
# SPDX-FileCopyrightText: 2021 Guillaume DIDIER
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
from sys import exit
|
||||
import numpy as np
|
||||
from scipy import optimize
|
||||
import sys
|
||||
|
||||
# TODO
|
||||
# sys.argv[1] should be the root
|
||||
# with root-result_lite.csv.bz2 the result
|
||||
# and .stats.csv
|
||||
# root.slices a slice mapping - done
|
||||
# root.cores a core + socket mapping - done -> move to analyse csv ?
|
||||
#
|
||||
# Facet plot with actual dot cloud + plot the linear regression
|
||||
# each row is a slice
|
||||
# each row is an origin core
|
||||
# each column a helper core if applicable
|
||||
|
||||
|
||||
stats = pd.read_csv(sys.argv[1] + ".stats.csv",
|
||||
dtype={
|
||||
"main_core": np.int8,
|
||||
"helper_core": np.int8,
|
||||
# "address": int,
|
||||
"hash": np.int8,
|
||||
# "time": np.int16,
|
||||
"clflush_remote_hit": np.float64,
|
||||
"clflush_shared_hit": np.float64,
|
||||
# "clflush_miss_f": np.int32,
|
||||
# "clflush_local_hit_f": np.int32,
|
||||
"clflush_miss_n": np.float64,
|
||||
"clflush_local_hit_n": np.float64,
|
||||
# "reload_miss": np.int32,
|
||||
# "reload_remote_hit": np.int32,
|
||||
# "reload_shared_hit": np.int32,
|
||||
# "reload_local_hit": np.int32
|
||||
}
|
||||
)
|
||||
|
||||
slice_mapping = pd.read_csv(sys.argv[1] + ".slices.csv")
|
||||
core_mapping = pd.read_csv(sys.argv[1] + ".cores.csv")
|
||||
|
||||
print(core_mapping.to_string())
|
||||
print(slice_mapping.to_string())
|
||||
|
||||
print("core {} is mapped to '{}'".format(4, repr(core_mapping.iloc[4])))
|
||||
|
||||
min_time_miss = stats["clflush_miss_n"].min()
|
||||
max_time_miss = stats["clflush_miss_n"].max()
|
||||
|
||||
|
||||
def remap_core(key):
|
||||
def remap(core):
|
||||
remapped = core_mapping.iloc[core]
|
||||
return remapped[key]
|
||||
|
||||
return remap
|
||||
|
||||
|
||||
stats["main_socket"] = stats["main_core"].apply(remap_core("socket"))
|
||||
stats["main_core_fixed"] = stats["main_core"].apply(remap_core("core"))
|
||||
stats["main_ht"] = stats["main_core"].apply(remap_core("hthread"))
|
||||
stats["helper_socket"] = stats["helper_core"].apply(remap_core("socket"))
|
||||
stats["helper_core_fixed"] = stats["helper_core"].apply(remap_core("core"))
|
||||
stats["helper_ht"] = stats["helper_core"].apply(remap_core("hthread"))
|
||||
|
||||
# slice_mapping = {3: 0, 1: 1, 2: 2, 0: 3}
|
||||
|
||||
stats["slice_group"] = stats["hash"].apply(lambda h: slice_mapping["slice_group"].iloc[h])
|
||||
|
||||
graph_lower_miss = int((min_time_miss // 10) * 10)
|
||||
graph_upper_miss = int(((max_time_miss + 9) // 10) * 10)
|
||||
|
||||
print("Graphing from {} to {}".format(graph_lower_miss, graph_upper_miss))
|
||||
|
||||
g_ = sns.FacetGrid(stats, col="main_core_fixed", row="slice_group")
|
||||
|
||||
g_.map(sns.distplot, 'clflush_miss_n', bins=range(graph_lower_miss, graph_upper_miss), color="b")
|
||||
#g.map(sns.scatterplot, 'slice_group', 'clflush_local_hit_n', color="g")
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
# also explains remote
|
||||
# shared needs some thinking as there is something weird happening there.
|
||||
|
||||
#
|
||||
# M 0 1 2 3 4 5 6 7
|
||||
#
|
||||
|
||||
|
||||
print(stats.head())
|
||||
|
||||
num_core = len(stats["main_core_fixed"].unique())
|
||||
print("Found {}".format(num_core))
|
||||
|
||||
|
||||
def miss_topology(main_core_fixed, slice_group, C, h):
|
||||
return C + h * abs(main_core_fixed - slice_group) + h * abs(slice_group + 1)
|
||||
|
||||
def miss_topology_df(x, C, h):
|
||||
return x.apply(lambda x, C, h: miss_topology(x["main_core_fixed"], x["slice_group"], C, h), args=(C, h), axis=1)
|
||||
|
||||
|
||||
|
||||
res_miss = optimize.curve_fit(miss_topology_df, stats[["main_core_fixed", "slice_group"]], stats["clflush_miss_n"])
|
||||
print("Miss topology:")
|
||||
print(res_miss)
|
||||
|
||||
|
||||
memory = -1
|
||||
gpu_if_any = num_core
|
||||
|
||||
|
||||
def exclusive_hit_topology_gpu(main_core, slice_group, helper_core, C, h1, h2):
|
||||
round_trip = gpu_if_any - memory
|
||||
|
||||
if slice_group <= num_core/2:
|
||||
# send message towards higher cores first
|
||||
if helper_core < slice_group:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
|
||||
else:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
|
||||
else:
|
||||
# send message toward lower cores first
|
||||
if helper_core > slice_group:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
|
||||
else:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
|
||||
return r
|
||||
|
||||
|
||||
def exclusive_hit_topology_gpu_df(x, C, h1, h2):
|
||||
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_gpu(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
|
||||
|
||||
|
||||
def exclusive_hit_topology_gpu2(main_core, slice_group, helper_core, C, h1, h2):
|
||||
round_trip = gpu_if_any + 1 - memory
|
||||
|
||||
if slice_group <= num_core/2:
|
||||
# send message towards higher cores first
|
||||
if helper_core < slice_group:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
|
||||
else:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
|
||||
else:
|
||||
# send message toward lower cores first
|
||||
if helper_core > slice_group:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
|
||||
else:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
|
||||
return r
|
||||
|
||||
|
||||
def exclusive_hit_topology_gpu2_df(x, C, h1, h2):
|
||||
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_gpu2(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
|
||||
|
||||
|
||||
# unlikely
|
||||
def exclusive_hit_topology_nogpu(main_core, slice_group, helper_core, C, h1, h2):
|
||||
round_trip = (num_core-1) - memory
|
||||
|
||||
if slice_group <= num_core/2:
|
||||
# send message towards higher cores first
|
||||
if helper_core < slice_group:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
|
||||
else:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
|
||||
else:
|
||||
# send message toward lower cores first
|
||||
if helper_core > slice_group:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
|
||||
else:
|
||||
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
|
||||
return r
|
||||
|
||||
|
||||
def exclusive_hit_topology_nogpu_df(x, C, h1, h2):
|
||||
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_nogpu(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
|
||||
|
||||
|
||||
#res_no_gpu = optimize.curve_fit(exclusive_hit_topology_nogpu_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
|
||||
#print("Exclusive hit topology (No GPU):")
|
||||
#print(res_no_gpu)
|
||||
|
||||
res_gpu = optimize.curve_fit(exclusive_hit_topology_gpu_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
|
||||
print("Exclusive hit topology (GPU):")
|
||||
print(res_gpu)
|
||||
|
||||
#res_gpu2 = optimize.curve_fit(exclusive_hit_topology_gpu2_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
|
||||
#print("Exclusive hit topology (GPU2):")
|
||||
#print(res_gpu2)
|
||||
|
||||
|
||||
|
||||
def remote_hit_topology_2(x, C, h):
|
||||
main_core = x["main_core_fixed"]
|
||||
slice_group = x["slice_group"]
|
||||
helper_core = x["helper_core_fixed"]
|
||||
return C + h * abs(main_core - slice_group) + h * abs(slice_group - helper_core) + h * abs(helper_core - main_core)
|
||||
|
||||
|
||||
def shared_hit_topology_1(x, C, h):
|
||||
main_core = x["main_core_fixed"]
|
||||
slice_group = x["slice_group"]
|
||||
helper_core = x["helper_core_fixed"]
|
||||
return C + h * abs(main_core - slice_group) + h * max(abs(slice_group - main_core), abs(slice_group - helper_core))
|
||||
|
||||
|
||||
def plot_func(function, *params):
|
||||
def plot_it(x, **kwargs):
|
||||
# plot_x = []
|
||||
# plot_y = []
|
||||
# for x in set(x):
|
||||
# plot_y.append(function(x, *params))
|
||||
# plot_x = x
|
||||
print(x)
|
||||
plot_y = function(x, *params)
|
||||
sns.lineplot(x, plot_y, **kwargs)
|
||||
return plot_it
|
||||
|
||||
stats["predicted_miss"] = miss_topology_df(stats, *(res_miss[0]))
|
||||
|
||||
figure_median_I = sns.FacetGrid(stats, col="main_core_fixed")
|
||||
figure_median_I.map(sns.scatterplot, 'slice_group', 'clflush_miss_n', color="b")
|
||||
figure_median_I.map(sns.lineplot, 'slice_group', 'predicted_miss', color="b")
|
||||
figure_median_I.set_titles(col_template="$A$ = {col_name}")
|
||||
figure_median_I.tight_layout()
|
||||
|
||||
import tikzplotlib
|
||||
|
||||
tikzplotlib.save("fig-median-I.tex", axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
|
||||
plt.show()
|
||||
|
||||
#stats["predicted_remote_hit_no_gpu"] = exclusive_hit_topology_nogpu_df(stats, *(res_no_gpu[0]))
|
||||
stats["predicted_remote_hit_gpu"] = exclusive_hit_topology_gpu_df(stats, *(res_gpu[0]))
|
||||
#stats["predicted_remote_hit_gpu2"] = exclusive_hit_topology_gpu_df(stats, *(res_gpu2[0]))
|
||||
|
||||
|
||||
stats_A0 = stats[stats["main_core_fixed"] == 0]
|
||||
figure_median_E_A0 = sns.FacetGrid(stats_A0, col="slice_group")
|
||||
figure_median_E_A0.map(sns.scatterplot, 'helper_core_fixed', 'clflush_remote_hit', color="r")
|
||||
figure_median_E_A0.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu', color="r")
|
||||
figure_median_E_A0.set_titles(col_template="$S$ = {col_name}")
|
||||
|
||||
tikzplotlib.save("fig-median-E-A0.tex", axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
|
||||
plt.show()
|
||||
|
||||
g = sns.FacetGrid(stats, row="main_core_fixed")
|
||||
|
||||
g.map(sns.scatterplot, 'slice_group', 'clflush_miss_n', color="b")
|
||||
g.map(sns.scatterplot, 'slice_group', 'clflush_local_hit_n', color="g")
|
||||
|
||||
g0 = sns.FacetGrid(stats, row="slice_group")
|
||||
|
||||
g0.map(sns.scatterplot, 'main_core_fixed', 'clflush_miss_n', color="b")
|
||||
g0.map(sns.scatterplot, 'main_core_fixed', 'clflush_local_hit_n', color="g") # this gives away the trick I think !
|
||||
# possibility of sending a general please discard this everyone around one of the ring + wait for ACK - direction depends on the core.
|
||||
|
||||
|
||||
|
||||
g2 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group")
|
||||
g2.map(sns.scatterplot, 'helper_core_fixed', 'clflush_remote_hit', color="r")
|
||||
g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu', color="r")
|
||||
#g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu2', color="g")
|
||||
#g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_no_gpu', color="g")
|
||||
#g2.map(plot_func(exclusive_hit_topology_nogpu_df, *(res_no_gpu[0])), 'helper_core_fixed', color="g")
|
||||
|
||||
g3 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group")
|
||||
g3.map(sns.scatterplot, 'helper_core_fixed', 'clflush_shared_hit', color="y")
|
||||
|
||||
# more ideas needed
|
||||
|
||||
plt.show()
|
15
cache_utils/run-2-thread-cal.sh
Executable file
15
cache_utils/run-2-thread-cal.sh
Executable file
@ -0,0 +1,15 @@
|
||||
>&2 echo "# Running the following commands with sudo to set-up"
|
||||
>&2 echo 'sudo sh -c "echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo"'
|
||||
>&2 echo sudo cpupower frequency-set -g performance
|
||||
|
||||
# performance cpu frequency governor
|
||||
sudo cpupower frequency-set -g performance
|
||||
|
||||
# No Turbo Boost
|
||||
sudo sh -c "echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo"
|
||||
|
||||
cargo run --release --bin two_thread_cal "$@"
|
||||
|
||||
>&2 echo "# Please run the following commands to restore configuration"
|
||||
>&2 echo 'sudo sh -c "echo 0 > /sys/devices/system/cpu/intel_pstate/no_turbo"'
|
||||
>&2 echo sudo cpupower frequency-set -g powersave
|
@ -1,5 +1,10 @@
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
// SPDX-FileCopyrightText: 2021 Guillaume DIDIER
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use cache_utils::calibration::{
|
||||
accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, flush_and_reload,
|
||||
get_cache_attack_slicing, load_and_flush, map_values, only_flush, only_reload, reduce,
|
||||
@ -14,12 +19,13 @@ use nix::unistd::Pid;
|
||||
|
||||
use core::arch::x86_64 as arch_x86;
|
||||
|
||||
use core::cmp::min;
|
||||
use std::cmp::Ordering;
|
||||
use std::cmp::min;
|
||||
use std::collections::HashMap;
|
||||
use std::process::Command;
|
||||
use std::str::from_utf8;
|
||||
|
||||
|
||||
unsafe fn multiple_access(p: *const u8) {
|
||||
unsafe {
|
||||
maccess::<u8>(p);
|
||||
@ -34,7 +40,9 @@ unsafe fn multiple_access(p: *const u8) {
|
||||
}
|
||||
}
|
||||
|
||||
const SIZE: usize = 2 << 20;
|
||||
//const SIZE: usize = 2 << 20;
|
||||
const SIZE: usize = 4 << 10;
|
||||
|
||||
const MAX_SEQUENCE: usize = 2048 * 64;
|
||||
|
||||
#[derive(Clone, Copy, Hash, Eq, PartialEq, Debug)]
|
||||
@ -111,7 +119,8 @@ fn main() {
|
||||
|
||||
println!("Number of cores per socket: {}", core_per_socket);
|
||||
|
||||
let m = MMappedMemory::new(SIZE, true, false, |i: usize| i as u8);
|
||||
// TODO Enable Hugepage here if needed.
|
||||
let m = MMappedMemory::new(SIZE, false, false, |i: usize| i as u8);
|
||||
let array = m.slice();
|
||||
|
||||
let cache_line_size = 64;
|
||||
|
@ -349,7 +349,7 @@ fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>, T>(
|
||||
main_turn_handle.next();
|
||||
params = main_turn_handle.wait();
|
||||
// join thread.
|
||||
helper_thread.unwrap().join();
|
||||
helper_thread.unwrap().join().expect("Failed to join thread");
|
||||
// FIXME error handling
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use crate::complex_addressing::{cache_slicing, CacheAttackSlicing, CacheSlicing}
|
||||
use crate::{flush, maccess, rdtsc_fence};
|
||||
|
||||
use cpuid::MicroArchitecture;
|
||||
use core::cmp::min;
|
||||
|
||||
use core::arch::x86_64 as arch_x86;
|
||||
#[cfg(feature = "no_std")]
|
||||
@ -16,7 +17,6 @@ extern crate alloc;
|
||||
use crate::calibration::Verbosity::*;
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use core::cmp::min;
|
||||
use itertools::Itertools;
|
||||
use core::hash::Hash;
|
||||
use core::ops::{Add, AddAssign};
|
||||
|
@ -12,6 +12,8 @@ use hashbrown::HashMap;
|
||||
#[cfg(feature = "no_std")]
|
||||
use hashbrown::HashSet;
|
||||
|
||||
#[cfg(feature = "use_std")]
|
||||
use std::vec::Vec;
|
||||
#[cfg(feature = "use_std")]
|
||||
use std::collections::HashMap;
|
||||
#[cfg(feature = "use_std")]
|
||||
@ -78,7 +80,7 @@ pub fn cache_slicing(
|
||||
match vendor {
|
||||
CPUVendor::Intel => {
|
||||
match uarch {
|
||||
MicroArchitecture::KabyLake | MicroArchitecture::Skylake => ComplexAddressing(
|
||||
MicroArchitecture::KabyLake | MicroArchitecture::Skylake | MicroArchitecture::WhiskeyLake => ComplexAddressing(
|
||||
&SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros + 1) as usize)],
|
||||
),
|
||||
MicroArchitecture::CoffeeLake => {
|
||||
|
@ -7,6 +7,7 @@ use std::collections::LinkedList;
|
||||
use std::ptr::copy_nonoverlapping;
|
||||
use std::sync::Mutex;
|
||||
use std::vec::Vec;
|
||||
use core::arch::global_asm;
|
||||
|
||||
struct WXRange {
|
||||
start: usize,
|
||||
|
@ -1,7 +1,5 @@
|
||||
#![cfg_attr(feature = "no_std", no_std)]
|
||||
#![feature(ptr_internals)]
|
||||
#![feature(linked_list_cursors)]
|
||||
#![feature(global_asm)]
|
||||
#![allow(clippy::missing_safety_doc)]
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
@ -19,7 +17,6 @@ assert_cfg!(
|
||||
);
|
||||
|
||||
pub mod cache_info;
|
||||
mod calibrate_2t;
|
||||
pub mod calibration;
|
||||
pub mod complex_addressing;
|
||||
#[cfg(feature = "use_std")]
|
||||
|
@ -1,15 +1,15 @@
|
||||
#![cfg(feature = "use_std")]
|
||||
|
||||
use core::borrow::{Borrow, BorrowMut};
|
||||
use core::ffi::c_void;
|
||||
use core::mem::{size_of, MaybeUninit};
|
||||
use core::mem::size_of;
|
||||
use core::num::NonZeroUsize;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::ptr;
|
||||
use core::ptr::null_mut;
|
||||
use core::ptr::Unique;
|
||||
use core::ptr::NonNull;
|
||||
use core::slice::{from_raw_parts, from_raw_parts_mut};
|
||||
use nix::errno::Errno::EINVAL;
|
||||
use std::convert::TryFrom;
|
||||
use nix::sys::mman;
|
||||
use core::ptr;
|
||||
|
||||
|
||||
/* from linux kernel headers.
|
||||
#define HUGETLB_FLAG_ENCODE_SHIFT 26
|
||||
@ -22,7 +22,7 @@ use nix::sys::mman;
|
||||
*/
|
||||
/** Safety issue : if T is non triviably constructable and destructable this is dangerous */
|
||||
pub struct MMappedMemory<T> {
|
||||
pointer: Unique<T>,
|
||||
pointer: NonNull<T>,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
@ -34,37 +34,30 @@ impl<T> MMappedMemory<T> {
|
||||
initializer: impl Fn(usize) -> T,
|
||||
) -> Result<MMappedMemory<T>, nix::Error> {
|
||||
assert_ne!(size_of::<T>(), 0);
|
||||
if let Some(p) = unsafe {
|
||||
let p = mman::mmap(
|
||||
null_mut(),
|
||||
size * size_of::<T>(),
|
||||
mman::ProtFlags::PROT_READ
|
||||
| mman::ProtFlags::PROT_WRITE
|
||||
| if executable {
|
||||
mman::ProtFlags::PROT_EXEC
|
||||
} else {
|
||||
mman::ProtFlags::PROT_READ
|
||||
},
|
||||
match unsafe {
|
||||
mman::mmap_anonymous(
|
||||
None,
|
||||
NonZeroUsize::try_from(size * size_of::<T>()).unwrap(),
|
||||
mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE,
|
||||
mman::MapFlags::MAP_PRIVATE
|
||||
| mman::MapFlags::MAP_ANONYMOUS
|
||||
| if huge {
|
||||
mman::MapFlags::MAP_HUGETLB
|
||||
} else {
|
||||
mman::MapFlags::MAP_ANONYMOUS
|
||||
},
|
||||
-1,
|
||||
0,
|
||||
)?;
|
||||
let pointer_T = p as *mut T;
|
||||
Unique::new(pointer_T)
|
||||
} {
|
||||
let mut s = MMappedMemory { pointer: p, size };
|
||||
for i in 0..s.size {
|
||||
unsafe { ptr::write(s.pointer.as_ptr().add(i), initializer(i)) };
|
||||
mman::MapFlags::MAP_HUGETLB
|
||||
} else {
|
||||
mman::MapFlags::MAP_ANONYMOUS
|
||||
},
|
||||
)
|
||||
}
|
||||
{
|
||||
Ok(p) => {
|
||||
let mut s : MMappedMemory<T> = MMappedMemory { pointer: p.cast(), size };
|
||||
for i in 0..s.size {
|
||||
unsafe { ptr::write(s.pointer.as_ptr().add(i), initializer(i)) };
|
||||
}
|
||||
Ok(s)
|
||||
} Err(e) => {
|
||||
Err(e)
|
||||
}
|
||||
Ok(s)
|
||||
} else {
|
||||
Err(nix::Error::Sys(EINVAL))
|
||||
}
|
||||
}
|
||||
/*
|
||||
@ -118,11 +111,8 @@ impl<T> MMappedMemory<T> {
|
||||
|
||||
impl<T> Drop for MMappedMemory<T> {
|
||||
fn drop(&mut self) {
|
||||
for i in 0..self.size {
|
||||
unsafe { ptr::drop_in_place(self.pointer.as_ptr().add(i)) };
|
||||
}
|
||||
unsafe {
|
||||
mman::munmap(self.pointer.as_ptr() as *mut c_void, self.size).unwrap();
|
||||
mman::munmap(self.pointer.cast(), self.size).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -164,3 +154,7 @@ impl<T> BorrowMut<[T]> for MMappedMemory<T> {
|
||||
self.slice_mut()
|
||||
}
|
||||
}
|
||||
|
||||
// It owns the memory, so it should be safe to send.
|
||||
unsafe impl<T> Send for MMappedMemory<T> {}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "covert_channels_benchmark"
|
||||
version = "0.1.0"
|
||||
authors = ["Guillume DIDIER <guillaume.didier@inria.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
@ -1,15 +1,15 @@
|
||||
[package]
|
||||
name = "covert_channels_evaluation"
|
||||
version = "0.1.0"
|
||||
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
rand = "0.8.3"
|
||||
bit_field = "0.10.1"
|
||||
rand = "0.8.5"
|
||||
bit_field = "0.10.2"
|
||||
turn_lock = { path = "../turn_lock" }
|
||||
cache_utils = { path = "../cache_utils" }
|
||||
nix = "0.20.0"
|
||||
nix = "0.28.0"
|
||||
cache_side_channel = { path = "../cache_side_channel" }
|
||||
|
@ -95,13 +95,13 @@ fn transmit_thread<T: CovertChannel>(
|
||||
let mut bit_iter = BitIterator::new(&result);
|
||||
let start_time = std::time::Instant::now();
|
||||
let start = unsafe { rdtsc_fence() };
|
||||
while !bit_iter.atEnd() {
|
||||
while !bit_iter.at_end() {
|
||||
for page in params.handles.iter_mut() {
|
||||
let mut handle = page.wait();
|
||||
unsafe { params.covert_channel.transmit(&mut *handle, &mut bit_iter) };
|
||||
bit_sent += T::BIT_PER_PAGE;
|
||||
page.next();
|
||||
if bit_iter.atEnd() {
|
||||
if bit_iter.at_end() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ edition = "2018"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
itertools = { version = "0.10.0", default-features = false }
|
||||
itertools = { version = "0.13.0", default-features = false }
|
||||
|
||||
[features]
|
||||
use_std = ["itertools/use_std"]
|
||||
|
@ -12,12 +12,8 @@ use core::arch::x86_64;
|
||||
//use cstr_core::{CStr, CString};
|
||||
|
||||
use crate::CPUVendor::{Intel, Unknown};
|
||||
use crate::MicroArchitecture::{
|
||||
Airmont, Bonnell, Broadwell, CannonLake, CascadeLake, CoffeeLake, CooperLake, Core, Goldmont,
|
||||
GoldmontPlus, Haswell, HaswellE, IceLake, IvyBridge, IvyBridgeE, KabyLake, KnightsLanding,
|
||||
KnightsMill, Nehalem, NetBurst, Penryn, PentiumM, Saltwell, SandyBridge, Silvermont, Skylake,
|
||||
SkylakeServer, Tremont, Westmere, Yonah, P5, P6,
|
||||
};
|
||||
use crate::MicroArchitecture::{Airmont, Bonnell, Broadwell, CannonLake, CascadeLake, CoffeeLake, CooperLake, Core, Goldmont, GoldmontPlus, Haswell, HaswellE, IceLake, IvyBridge, IvyBridgeE, KabyLake, KnightsLanding, KnightsMill, Nehalem, NetBurst, Penryn, PentiumM, Saltwell, SandyBridge, Silvermont, Skylake, SkylakeServer, Tremont, Westmere, Yonah, P5, P6, WhiskeyLake};
|
||||
|
||||
//#[cfg(feature = "std")]
|
||||
//use std::ffi::{CStr, CString};
|
||||
|
||||
@ -152,6 +148,7 @@ pub enum MicroArchitecture {
|
||||
// supports Intel 64 architecture.
|
||||
CascadeLake,
|
||||
CannonLake, // Only in volume 4 ??
|
||||
WhiskeyLake,
|
||||
// The 2nd generation Intel® Xeon® Processor Scalable Family is based on the Cascade Lake product and supports
|
||||
// Intel 64 architecture.
|
||||
IceLake,
|
||||
@ -184,17 +181,20 @@ impl MicroArchitecture {
|
||||
// Intel® CoreTM processors based on Coffee Lake microarchitecture, Intel® Xeon® E processors based on
|
||||
// Coffee Lake microarchitecture
|
||||
0x06_8E => {
|
||||
if stepping <= 9 {
|
||||
KabyLake
|
||||
} else {
|
||||
CoffeeLake
|
||||
match stepping {
|
||||
9 => KabyLake,
|
||||
10 => CoffeeLake,
|
||||
11 | 12 => WhiskeyLake,
|
||||
_ => {return None;}
|
||||
}
|
||||
}
|
||||
0x06_9E => {
|
||||
if stepping <= 9 {
|
||||
KabyLake
|
||||
} else {
|
||||
} else if stepping <= 13{
|
||||
CoffeeLake
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
// Future Intel® Xeon® processors based on Ice Lake microarchitecture
|
||||
@ -289,11 +289,11 @@ impl MicroArchitecture {
|
||||
}
|
||||
}
|
||||
pub fn get_family_model_stepping() -> Option<(CPUVendor, u32, u32)> {
|
||||
let vendor = CPUVendor::get_cpu_vendor();
|
||||
// Warning this might not support AMD
|
||||
if true {
|
||||
if vendor == Intel {
|
||||
// TODO refactor some of this into a separate function.
|
||||
// has cpuid
|
||||
let vendor = CPUVendor::get_cpu_vendor();
|
||||
let eax = unsafe { x86_64::__cpuid(1) }.eax;
|
||||
let stepping = eax & 0xf;
|
||||
let mut model = (eax >> 4) & 0xf;
|
||||
|
44
dendrobates-t-azureus/Cargo.toml
Normal file
44
dendrobates-t-azureus/Cargo.toml
Normal file
@ -0,0 +1,44 @@
|
||||
[package]
|
||||
name = "dendrobates_tinctoreus_azureus"
|
||||
version = "0.1.0"
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.bootimage]
|
||||
#run-command = ["./scripts/bochs.sh", "{}"]
|
||||
run-command = ["./scripts/run.sh", "{}"]
|
||||
test-args = ["qemu"]
|
||||
run-args = ["bochs"]
|
||||
#run-command = ["qemu-system-x86_64", "-drive", "format=raw,file={}"]
|
||||
#test-args = ["-device", "isa-debug-exit,iobase=0xf4,iosize=0x04"]
|
||||
test-success-exit-code = 33 # (0x10 << 1) | 1
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
x86_64 = "0.15.1"
|
||||
vga_buffer = { path = "../vga_buffer" }
|
||||
polling_serial = { path = "../polling_serial" }
|
||||
volatile = "0.4.4"
|
||||
linked_list_allocator = "0.9.0"
|
||||
cache_utils = { path = "../cache_utils", features = ["no_std"], default-features = false }
|
||||
arrayref = "0.3.6"
|
||||
|
||||
[dependencies.lazy_static]
|
||||
version = "1.4.0"
|
||||
features = ["spin_no_std"]
|
||||
|
||||
[dependencies.bootloader]
|
||||
version = "0.9.16"
|
||||
features = ["sse", "map_physical_memory"]
|
||||
|
||||
#[patch.crates-io]
|
||||
#bootloader = { path = "../bootloader" }
|
||||
|
||||
[[test]]
|
||||
name = "panic_test"
|
||||
harness = false
|
||||
|
||||
[[test]]
|
||||
name = "stack_overflow"
|
||||
harness = false
|
@ -20,3 +20,5 @@ Design decision :
|
||||
- [ ] Get serial console
|
||||
- [ ] Deal with cpuid / floating point niceties
|
||||
- [ ] Deal with the user mode switch
|
||||
|
||||
Known good rust version : 1.57.0-nightly (9a28ac83c 2021-09-18)
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "flush_flush"
|
||||
version = "0.1.0"
|
||||
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@ -9,5 +9,5 @@ edition = "2018"
|
||||
[dependencies]
|
||||
cache_utils = { path = "../cache_utils" }
|
||||
cache_side_channel = { path = "../cache_side_channel" }
|
||||
nix = "0.20.0"
|
||||
nix = "0.28.0"
|
||||
basic_timing_cache_channel = { path = "../basic_timing_cache_channel" }
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "flush_reload"
|
||||
version = "0.1.0"
|
||||
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@ -9,5 +9,5 @@ edition = "2018"
|
||||
[dependencies]
|
||||
cache_utils = { path = "../cache_utils" }
|
||||
cache_side_channel = { path = "../cache_side_channel" }
|
||||
nix = "0.20.0"
|
||||
nix = "0.28.0"
|
||||
basic_timing_cache_channel = { path = "../basic_timing_cache_channel" }
|
||||
|
@ -7,7 +7,7 @@ edition = "2018"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
x86_64 = "0.14.1"
|
||||
x86_64 = "0.15.1"
|
||||
spin = "0.9.0"
|
||||
|
||||
[dependencies.lazy_static]
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "turn_lock"
|
||||
version = "0.1.0"
|
||||
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
|
||||
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
Loading…
x
Reference in New Issue
Block a user