Major refactor

This commit is contained in:
Guillaume DIDIER 2024-05-27 11:51:13 +02:00
parent 30d9527ceb
commit 8edd90b6a3
52 changed files with 875 additions and 276 deletions

View File

@ -2,7 +2,7 @@
<module type="CPP_MODULE" version="4">
<component name="FacetManager">
<facet type="Python" name="Python facet">
<configuration sdkName="Python 3.7 (dendrobates-t-azureus)" />
<configuration sdkName="" />
</facet>
</component>
<component name="NewModuleRootManager">
@ -41,6 +41,8 @@
<sourceFolder url="file://$MODULE_DIR$/covert_channels_benchmark/src" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/prefetcher_reverse/src" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/CacheObserver/src" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/dendrobates-t-azureus/src" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/dendrobates-t-azureus/tests" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/cache_info/target" />
<excludeFolder url="file://$MODULE_DIR$/cache_utils/target" />
<excludeFolder url="file://$MODULE_DIR$/kernel/target" />
@ -51,6 +53,5 @@
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Python 3.7 (dendrobates-t-azureus) interpreter library" level="application" />
</component>
</module>

227
Cargo.lock generated
View File

@ -12,23 +12,9 @@ dependencies = [
"cache_utils",
"flush_flush",
"flush_reload",
"itertools",
"itertools 0.10.1",
"lazy_static",
"nix",
"rand",
]
[[package]]
name = "aes-t-tables"
version = "0.1.0"
dependencies = [
"cache_side_channel",
"cache_utils",
"flush_flush",
"flush_reload",
"memmap2",
"nix",
"openssl",
"nix 0.20.0",
"rand",
]
@ -45,15 +31,24 @@ dependencies = [
[[package]]
name = "arrayref"
version = "0.3.6"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545"
[[package]]
name = "atomic"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994"
dependencies = [
"bytemuck",
]
[[package]]
name = "autocfg"
version = "1.0.1"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "basic_timing_cache_channel"
@ -61,20 +56,14 @@ version = "0.1.0"
dependencies = [
"cache_side_channel",
"cache_utils",
"nix",
"nix 0.28.0",
]
[[package]]
name = "bit_field"
version = "0.9.0"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56"
[[package]]
name = "bit_field"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4"
checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61"
[[package]]
name = "bitflags"
@ -82,6 +71,12 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "bitflags"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "bitvec"
version = "0.22.3"
@ -96,32 +91,39 @@ dependencies = [
[[package]]
name = "bootloader"
version = "0.9.18"
version = "0.9.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a3c1ceed1cd9e61c7998100cc18c13d413aa40d018992b871ab8e7435ce6372"
checksum = "365861702868e2a37b4247aaecc7bd8f4389baec8d025497ad8ba7ff37ee9440"
dependencies = [
"bit_field 0.10.1",
"bit_field",
]
[[package]]
name = "bytemuck"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5"
[[package]]
name = "cache_side_channel"
version = "0.1.0"
dependencies = [
"bit_field 0.10.1",
"nix",
"bit_field",
"nix 0.28.0",
]
[[package]]
name = "cache_utils"
version = "0.1.0"
dependencies = [
"atomic",
"bitvec",
"cpuid",
"hashbrown",
"itertools",
"itertools 0.12.1",
"lazy_static",
"libc",
"nix",
"nix 0.28.0",
"polling_serial",
"static_assertions",
"turn_lock",
@ -141,6 +143,12 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cfg_aliases"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "covert_channels_benchmark"
version = "0.1.0"
@ -150,17 +158,17 @@ dependencies = [
"covert_channels_evaluation",
"flush_flush",
"flush_reload",
"nix",
"nix 0.20.0",
]
[[package]]
name = "covert_channels_evaluation"
version = "0.1.0"
dependencies = [
"bit_field 0.10.1",
"bit_field",
"cache_side_channel",
"cache_utils",
"nix",
"nix 0.28.0",
"rand",
"turn_lock",
]
@ -169,7 +177,7 @@ dependencies = [
name = "cpuid"
version = "0.1.0"
dependencies = [
"itertools",
"itertools 0.13.0",
]
[[package]]
@ -200,7 +208,7 @@ dependencies = [
"basic_timing_cache_channel",
"cache_side_channel",
"cache_utils",
"nix",
"nix 0.28.0",
]
[[package]]
@ -210,24 +218,9 @@ dependencies = [
"basic_timing_cache_channel",
"cache_side_channel",
"cache_utils",
"nix",
"nix 0.28.0",
]
[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
dependencies = [
"foreign-types-shared",
]
[[package]]
name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "funty"
version = "1.2.0"
@ -263,6 +256,24 @@ dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
dependencies = [
"either",
]
[[package]]
name = "itertools"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
dependencies = [
"either",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
@ -274,88 +285,59 @@ dependencies = [
[[package]]
name = "libc"
version = "0.2.96"
version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5600b4e6efc5421841a2138a6b082e07fe12f9aaa12783d50e5d13325b26b4fc"
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "linked_list_allocator"
version = "0.9.0"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0b725207570aa16096962d0b20c79f8a543df2280bd3c903022b9b0b4d7ea68"
checksum = "549ce1740e46b291953c4340adcd74c59bcf4308f4cac050fd33ba91b7168f4a"
dependencies = [
"spinning_top",
]
[[package]]
name = "lock_api"
version = "0.4.4"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "memmap2"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "723e3ebdcdc5c023db1df315364573789f8857c11b631a2fdfad7c00f5c046b4"
dependencies = [
"libc",
]
[[package]]
name = "nix"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa9b4819da1bc61c0ea48b63b7bc8604064dd43013e7cc325df098d49cd7c18a"
dependencies = [
"bitflags",
"bitflags 1.2.1",
"cc",
"cfg-if",
"libc",
]
[[package]]
name = "nix"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
dependencies = [
"bitflags 2.5.0",
"cfg-if",
"cfg_aliases",
"libc",
]
[[package]]
name = "once_cell"
version = "1.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
[[package]]
name = "openssl"
version = "0.10.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8"
dependencies = [
"bitflags",
"cfg-if",
"foreign-types",
"libc",
"once_cell",
"openssl-sys",
]
[[package]]
name = "openssl-sys"
version = "0.9.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98"
dependencies = [
"autocfg",
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "pkg-config"
version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
[[package]]
name = "polling_serial"
version = "0.1.0"
@ -379,14 +361,13 @@ checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb"
[[package]]
name = "rand"
version = "0.8.3"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
"rand_hc",
]
[[package]]
@ -409,13 +390,10 @@ dependencies = [
]
[[package]]
name = "rand_hc"
version = "0.3.0"
name = "rustversion"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
dependencies = [
"rand_core",
]
checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47"
[[package]]
name = "scopeguard"
@ -440,9 +418,9 @@ dependencies = [
[[package]]
name = "spinning_top"
version = "0.2.4"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75adad84ee84b521fb2cca2d4fd0f1dab1d8d026bda3c5bea4ca63b5f9f9293c"
checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0"
dependencies = [
"lock_api",
]
@ -463,12 +441,6 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
name = "turn_lock"
version = "0.1.0"
[[package]]
name = "vcpkg"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa"
[[package]]
name = "version_check"
version = "0.9.3"
@ -513,11 +485,12 @@ dependencies = [
[[package]]
name = "x86_64"
version = "0.14.3"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7c54a17492391c594753ce2a180142bec7ad2876543565c2a08aa11cddef251"
checksum = "4bc79523af8abf92fb1a970c3e086c5a343f6bcc1a0eb890f575cbb3b45743df"
dependencies = [
"bit_field 0.9.0",
"bitflags",
"bit_field",
"bitflags 2.5.0",
"rustversion",
"volatile 0.4.4",
]

View File

@ -5,7 +5,7 @@ members = [
"polling_serial",
"cache_utils",
"cpuid",
"aes-t-tables",
#"aes-t-tables",
"covert_channels_benchmark",
"covert_channels_evaluation",
"cache_side_channel",
@ -14,58 +14,5 @@ members = [
"basic_timing_cache_channel",
"turn_lock",
"CacheObserver",
]
[package]
name = "dendrobates_tinctoreus_azureus"
version = "0.1.0"
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
edition = "2018"
[package.metadata.bootimage]
#run-command = ["./scripts/bochs.sh", "{}"]
run-command = ["./scripts/run.sh", "{}"]
test-args = ["qemu"]
run-args = ["bochs"]
#run-command = ["qemu-system-x86_64", "-drive", "format=raw,file={}"]
#test-args = ["-device", "isa-debug-exit,iobase=0xf4,iosize=0x04"]
test-success-exit-code = 33 # (0x10 << 1) | 1
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
x86_64 = "0.14.3"
vga_buffer = { path = "vga_buffer" }
polling_serial = { path = "polling_serial" }
volatile = "0.4.4"
linked_list_allocator = "0.9.0"
cache_utils = { path = "cache_utils", features = ["no_std"], default-features = false }
arrayref = "0.3.6"
[dependencies.lazy_static]
version = "1.4.0"
features = ["spin_no_std"]
[dependencies.bootloader]
version = "0.9.16"
features = ["sse", "map_physical_memory"]
#[patch.crates-io]
#bootloader = { path = "../bootloader" }
[profile.dev]
opt-level = 1
debug = 2
[profile.test]
opt-level = 1
debug = 2
[[test]]
name = "panic_test"
harness = false
[[test]]
name = "stack_overflow"
harness = false
"dendrobates-t-azureus"
]

88
README-Calibration.md Normal file
View File

@ -0,0 +1,88 @@
# calibration-done-right
Public code for the "calibration done right" paper
The code base is written entirely in rust.
There are three crates with binaries providing results :
- **aes-t-tables** runs the T-table attack using the 3 side channels
- **cache_utils** `two_thread_cal` runs a full calibration on all core pairs for Flush+Flush
and analyses the results to provide error rate predictions in various attacker models
- **covert_channel_beanchmark** is the crate that runs covert channel benchmarks on the various covert channels
The code presented runs under Fedora 30, and can also be made run on Ubuntu 18.04 LTS with minor tweaks
(Notably lib cpupower may also be called lib cpufreq)
On recent fedora the package is `kernel-tools-libs-devel`, on debian `libcpupower-dev`
You will also need utility `cpupower` (from `kernel-tools` / `linux-cpupower`)
# Usage
## General set-up
Requires rust nightly features. Install rust nightly using rustup
One should disable turbo boost and other source of idle frequency scaling
On Intel platforms, the following should work.
```shell
# performance cpu frequency governor
cpupower frequency-set -g performance
# No Turbo Boost
echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo
```
Depending on the experiment you may be interested in disabling prefetchers.
## Two thread calibration set-up and usage
In addition to the general set-up you need to enable 2MB hugepage and ensure at least one is available.
Then you can run `cargo run --release --bin two_thread_cal > result.log`
Various scripts are also included that have been used to parse the log.
`analyse.sh` -> `analyse_csv.py` -> `analyse_median.py` Is used to analyse the timing histograms
`extract_analysis_csv.sh` Is used to extract the attacker model results.
The python scripts requires an environment (such as a virtual env) with the packages in `cache_utils/requirements.txt`
## AES T-table set-up and usage
One needs an OpenSSL built with the no-asm and the no-hw flags install in ~/openssl (the path is in aes-t-tables/cargo.sh and can be changed).
You the need to extract the T-table addresses, this can be done using `nm libcrypto.so | "grep Te[0-4]"`, and update those in aes-t-tables/src/main.rs
You'll also want to update the thresholds in main.rs using the results from the calibration.
You can then run `./cargo.sh run --release > result.log`
## Covert Channel benchmark
Do the general set-up, update the thresholds for Naive channels in main.rs and then run `cargo run --release | tee results.log`
# Crate documentation
- `cpuid` is a small crate that handles CPU microarchitecture indentification and provides info about what is known about it
- `cache_utils` contains utilities related to cache attacks
- `cache_side_channel` defines the interface cache side channels have to implement
- `basic_timing_cache_channel` contains generic implementations of Naive and Optimised cache side channels, that just require providing the actual operation used
- `flush_flush` and `flush_reload` are tiny crates that use `basic_timing_cache_channel` to export Flush+Flush and Flush+Reload primitives
- `turn_lock` is the synchronisation primitive used by `cache_utils` and the `covert_channel_evaluation`.
- `covert_channel_evaluation` is a generic implementation of a `covert_channel` benchmark
- `covert_channel_benchmark` calls the previous implementation over the 3 channels.
# rust version
Known good nightly :
- rustc 1.54.0-nightly (eab201df7 2021-06-09)
- rustc 1.55.0-nightly (885399992 2021-07-06)
# License
Apache-2.0 or MIT

3
README.md Normal file
View File

@ -0,0 +1,3 @@
See README-Calibration.md
Merging of the readmes of various paper code in progress.

View File

@ -3,15 +3,16 @@ name = "aes-t-tables"
version = "0.1.0"
authors = ["GuillaumeDIDIER <guillaume.didier95@hotmail.fr>"]
edition = "2018"
license = "MIT OR Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
openssl = "0.10.33"
cache_utils = { path = "../cache_utils" }
memmap2 = "0.2.1"
rand = "0.8.3"
nix = "0.20.0"
memmap2 = "0.9.4"
rand = "0.8.5"
nix = "0.28.0"
cache_side_channel = { path = "../cache_side_channel" }
flush_flush = { path = "../flush_flush" }
flush_reload = { path = "../flush_reload" }

View File

@ -1,4 +1,11 @@
# SPDX-FileCopyrightText: 2021 Guillaume DIDIER
#
# SPDX-License-Identifier: Apache-2.0
# SPDX-License-Identifier: MIT
#/bin/bash
# Change this to the correct openssl installation path
export OPENSSL_DIR=$(readlink -f ~/openssl)
export X86_64_UNKNOWN_LINUX_GNU_OPENSSL_DIR=$OPENSSL_DIR
export PKG_CONFIG_PATH=$OPENSSL_DIR

View File

@ -9,4 +9,4 @@ edition = "2018"
[dependencies]
cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
nix = "0.20.0"
nix = "0.28.0"

View File

@ -13,7 +13,6 @@ use cache_side_channel::table_side_channel::{
MultipleTableCacheSideChannel, SingleTableCacheSideChannel, TableAttackResult,
TableCacheSideChannel,
};
use cache_side_channel::SideChannelError::AddressNotReady;
use cache_side_channel::{
BitIterator, CacheStatus, ChannelFatalError, ChannelHandle, CoreSpec, CovertChannel,
MultipleAddrCacheSideChannel, SideChannelError, SingleAddrCacheSideChannel,
@ -26,9 +25,9 @@ use cache_utils::calibration::{
CFLUSH_BUCKET_NUMBER, CFLUSH_BUCKET_SIZE, CFLUSH_NUM_ITER, CLFLUSH_NUM_ITERATION_AV, PAGE_LEN,
SP, VPN,
};
use cache_utils::complex_addressing::{CacheAttackSlicing, CacheSlicing};
use cache_utils::complex_addressing::{CacheAttackSlicing};
use cache_utils::mmap::MMappedMemory;
use cache_utils::{find_core_per_socket, flush, maccess, noop};
use cache_utils::{find_core_per_socket, flush, maccess};
use nix::sched::sched_getaffinity;
use nix::sched::CpuSet;
use nix::unistd::Pid;
@ -314,7 +313,7 @@ impl<T: TimingChannelPrimitives> TopologyAwareTimingChannel<T> {
// Select the proper core
for (av, (global_error_pred, thresholds)) in res.iter() {
for (av, (global_error_pred, _thresholds)) in res.iter() {
if global_error_pred.error_rate() < best_error_rate {
best_av = *av;
best_error_rate = global_error_pred.error_rate();
@ -625,7 +624,7 @@ impl<T: TimingChannelPrimitives> MultipleAddrCacheSideChannel for TopologyAwareT
pages.into_iter().map(|(k, v)| v),
self.calibration_strategy,
) {
Err(e) => {
Err(_e) => {
return Err(ChannelFatalError::Oops);
}
Ok(r) => r,
@ -740,7 +739,7 @@ impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T>
unsafe fn ready_page(&mut self, page: *const u8) -> Result<Self::CovertChannelHandle, ()> {
let vpn: VPN = get_vpn(page);
// Check if the page has already been readied. If so should error out ?
if let Some(preferred) = self.preferred_address.get(&vpn) {
if self.preferred_address.get(&vpn).is_some() {
return Err(());
}
@ -758,7 +757,7 @@ impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T>
pages.into_iter(),
self.calibration_strategy,
) {
Err(e) => {
Err(_e) => {
return Err(());
}
Ok(r) => r,
@ -816,7 +815,7 @@ impl<T: TimingChannelPrimitives> CovertChannel for TopologyAwareTimingChannel<T>
calibration_epoch: self.calibration_epoch,
},
};
let r = unsafe { self.prepare_one_impl(&mut handle.0) }.unwrap();
unsafe { self.prepare_one_impl(&mut handle.0) }.unwrap();
return Ok(handle);
}

View File

@ -7,5 +7,5 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
nix = "0.20.0"
bit_field = "0.10.1"
nix = "0.28.0"
bit_field = "0.10.2"

View File

@ -41,7 +41,7 @@ pub fn restore_affinity(cpu_set: &CpuSet) {
sched_setaffinity(Pid::from_raw(0), &cpu_set).unwrap();
}
#[must_use = "This result must be used to restore affinity"]
//#[must_use = "This result must be used to restore affinity"]
pub fn set_affinity(cpu_set: &CpuSet) -> Result<CpuSet, nix::Error> {
let old = sched_getaffinity(Pid::from_raw(0))?;
sched_setaffinity(Pid::from_raw(0), &cpu_set)?;
@ -161,7 +161,7 @@ impl<'a> BitIterator<'a> {
}
}
pub fn atEnd(&self) -> bool {
pub fn at_end(&self) -> bool {
self.byte_index >= self.bytes.len()
}
}

View File

@ -118,11 +118,11 @@ impl<T: SingleAddrCacheSideChannel> SingleTableCacheSideChannel<T::Handle> for T
SideChannelError::AddressNotCalibrated(_addr) => unimplemented!(),
},
}
for iteration in 0..100 {
for _iteration in 0..100 {
self.victim_single(victim);
let r = unsafe { self.test_single(addr, true) };
match r {
Ok(status) => {}
Ok(_status) => {}
Err(e) => match e {
SideChannelError::NeedRecalibration => panic!(),
SideChannelError::FatalError(e) => {
@ -193,7 +193,7 @@ impl<T: MultipleAddrCacheSideChannel> MultipleTableCacheSideChannel<T::Handle> f
batch.push(&mut **addr);
let mut hits: HashMap<*const u8, u32> = HashMap::new();
let mut misses: HashMap<*const u8, u32> = HashMap::new();
for i in 1..T::MAX_ADDR {
for _i in 1..T::MAX_ADDR {
if let Some(addr) = addr_iter.next() {
batch.push(&mut **addr);
} else {

View File

@ -3,6 +3,7 @@ name = "cache_utils"
version = "0.1.0"
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
edition = "2018"
license = "MIT OR Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -10,12 +11,13 @@ edition = "2018"
polling_serial = { path = "../polling_serial", optional = true }
vga_buffer = { path = "../vga_buffer", optional = true }
cpuid = { path = "../cpuid", default-features = false }
x86_64 = "0.14.1"
x86_64 = "0.15.1"
static_assertions = "1.1.0"
itertools = { version = "0.10.0", default-features = false }
itertools = { version = "0.12.1", default-features = false }
atomic = "0.6.0"
nix = { version = "0.20.0", optional = true }
libc = { version = "0.2.92", optional = true }
nix = { version = "0.28.0", optional = true, features = ["process", "mman", "sched"] }
libc = { version = "0.2.153", optional = true }
hashbrown = { version = "0.11.2", optional = true }
turn_lock = { path = "../turn_lock", optional = true}
lazy_static = "1.4.0"

3
cache_utils/Readme.md Normal file
View File

@ -0,0 +1,3 @@
Notes for Fedora 39
- kernel-tools + kernel-tools-devel needed

231
cache_utils/analyse_csv.py Normal file
View File

@ -0,0 +1,231 @@
# SPDX-FileCopyrightText: 2021 Guillaume DIDIER
#
# SPDX-License-Identifier: Apache-2.0
# SPDX-License-Identifier: MIT
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tikzplotlib
from sys import exit
import wquantiles as wq
import numpy as np
from functools import partial
import sys
# For cyber cobay sanity check :
from gmpy2 import popcount
functions_i9_9900 = [
0b1111111111010101110101010001000000,
0b0110111110111010110001001000000000,
0b1111111000011111110010110000000000]
def complex_hash(addr):
r = 0
for f in reversed(functions_i9_9900):
r <<= 1
r |= (popcount(f & addr) & 1)
return r
def convert64(x):
return np.int64(int(x, base=16))
def convert8(x):
return np.int8(int(x, base=16))
df = pd.read_csv(sys.argv[1] + "-results_lite.csv.bz2",
dtype={
"main_core": np.int8,
"helper_core": np.int8,
# "address": int,
# "hash": np.int8,
"time": np.int16,
"clflush_remote_hit": np.int32,
"clflush_shared_hit": np.int32,
"clflush_miss_f": np.int32,
"clflush_local_hit_f": np.int32,
"clflush_miss_n": np.int32,
"clflush_local_hit_n": np.int32,
"reload_miss": np.int32,
"reload_remote_hit": np.int32,
"reload_shared_hit": np.int32,
"reload_local_hit": np.int32},
converters={'address': convert64, 'hash': convert8},
)
sample_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
"reload_miss",
"reload_remote_hit",
"reload_shared_hit",
"reload_local_hit",
]
sample_flush_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
]
slice_mapping = pd.read_csv(sys.argv[1] + ".slices.csv")
core_mapping = pd.read_csv(sys.argv[1] + ".cores.csv")
def remap_core(key):
def remap(core):
remapped = core_mapping.iloc[core]
return remapped[key]
return remap
df["main_socket"] = df["main_core"].apply(remap_core("socket"))
df["main_core_fixed"] = df["main_core"].apply(remap_core("core"))
df["main_ht"] = df["main_core"].apply(remap_core("hthread"))
df["helper_socket"] = df["helper_core"].apply(remap_core("socket"))
df["helper_core_fixed"] = df["helper_core"].apply(remap_core("core"))
df["helper_ht"] = df["helper_core"].apply(remap_core("hthread"))
# slice_mapping = {3: 0, 1: 1, 2: 2, 0: 3}
slice_remap = lambda h: slice_mapping["slice_group"].iloc[h]
df["slice_group"] = df["hash"].apply(slice_remap)
print(df.columns)
#df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3)
addresses = df["address"].unique()
print(addresses)
print(*[bin(a) for a in addresses], sep='\n')
print(df.head())
print(df["hash"].unique())
min_time = df["time"].min()
max_time = df["time"].max()
q10s = [wq.quantile(df["time"], df[col], 0.1) for col in sample_flush_columns]
q90s = [wq.quantile(df["time"], df[col], 0.9) for col in sample_flush_columns]
graph_upper = int(((max(q90s) + 19) // 10) * 10)
graph_lower = int(((min(q10s) - 10) // 10) * 10)
# graph_lower = (min_time // 10) * 10
# graph_upper = ((max_time + 9) // 10) * 10
print("graphing between {}, {}".format(graph_lower, graph_upper))
df_main_core_0 = df[df["main_core"] == 0]
#df_helper_core_0 = df[df["helper_core"] == 0]
colours = ["b", "r", "g", "y"]
def custom_hist(x, *y, **kwargs):
for (i, yi) in enumerate(y):
kwargs["color"] = colours[i]
sns.distplot(x, range(graph_lower, graph_upper), hist_kws={"weights": yi, "histtype":"step"}, kde=False, **kwargs)
custom_hist(df["time"], df["clflush_miss_n"], df["clflush_remote_hit"])
tikzplotlib.save("fig-hist-all.tex")#, axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
plt.show()
attacker = 2
victim = 7
slice = 14
df_ax_vx_sx = df[(df["hash"] == slice) & (df["main_core"] == attacker) & (df["helper_core"] == victim)]
custom_hist(df_ax_vx_sx["time"], df_ax_vx_sx["clflush_miss_n"], df_ax_vx_sx["clflush_remote_hit"])
tikzplotlib.save("fig-hist-good-A{}V{}S{}.tex".format(attacker,victim,slice))#, axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
plt.show()
attacker = 9
victim = 4
slice = 8
df_ax_vx_sx = df[(df["hash"] == slice) & (df["main_core"] == attacker) & (df["helper_core"] == victim)]
custom_hist(df_ax_vx_sx["time"], df_ax_vx_sx["clflush_miss_n"], df_ax_vx_sx["clflush_remote_hit"])
tikzplotlib.save("fig-hist-bad-A{}V{}S{}.tex".format(attacker,victim,slice))#, axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
plt.show()
g = sns.FacetGrid(df_main_core_0, col="helper_core", row="hash", legend_out=True)
g2 = sns.FacetGrid(df, col="main_core", row="hash", legend_out=True)
# Color convention here :
# Blue = miss
# Red = Remote Hit
# Green = Local Hit
# Yellow = Shared Hit
g.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
g2.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
# g.map(sns.distplot, "time", hist_kws={"weights": df["clflush_hit"]}, kde=False)
#plt.show()
#plt.figure()
#df_mcf6 = df[df["main_core_fixed"] == 6]
#df_mcf6_slg7 = df_mcf6[df_mcf6["slice_group"] == 7]
#g3 = sns.FacetGrid(df_mcf6_slg7, row="helper_core_fixed", col="main_ht")
#g3.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
#g4 = sns.FacetGrid(df_mcf6_slg7, row="helper_core_fixed", col="helper_ht")
g#4.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
def stat(x, key):
return wq.median(x["time"], x[key])
miss = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_miss_n")
hit_remote = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_remote_hit")
hit_local = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_local_hit_n")
hit_shared = df.groupby(["main_core", "helper_core", "hash"]).apply(stat, "clflush_shared_hit")
stats = miss.reset_index()
stats.columns = ["main_core", "helper_core", "hash", "clflush_miss_n"]
stats["clflush_remote_hit"] = hit_remote.values
stats["clflush_local_hit_n"] = hit_local.values
stats["clflush_shared_hit"] = hit_shared.values
stats.to_csv(sys.argv[1] + ".stats.csv", index=False)
#print(stats.to_string())
plt.show()
exit(0)
g = sns.FacetGrid(stats, row="Core")
g.map(sns.distplot, 'Miss', bins=range(100, 480), color="r")
g.map(sns.distplot, 'Hit', bins=range(100, 480))
plt.show()
#stats["clflush_miss_med"] = stats[[0]].apply(lambda x: x["miss_med"])
#stats["clflush_hit_med"] = stats[[0]].apply(lambda x: x["hit_med"])
#del df[[0]]
#print(hit.to_string(), miss.to_string())
# test = pd.DataFrame({"value" : [0, 5], "weight": [5, 1]})
# plt.figure()
# sns.distplot(test["value"], hist_kws={"weights": test["weight"]}, kde=False)
exit(0)

View File

@ -0,0 +1,281 @@
# SPDX-FileCopyrightText: 2021 Guillaume DIDIER
#
# SPDX-License-Identifier: Apache-2.0
# SPDX-License-Identifier: MIT
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sys import exit
import numpy as np
from scipy import optimize
import sys
# TODO
# sys.argv[1] should be the root
# with root-result_lite.csv.bz2 the result
# and .stats.csv
# root.slices a slice mapping - done
# root.cores a core + socket mapping - done -> move to analyse csv ?
#
# Facet plot with actual dot cloud + plot the linear regression
# each row is a slice
# each row is an origin core
# each column a helper core if applicable
stats = pd.read_csv(sys.argv[1] + ".stats.csv",
dtype={
"main_core": np.int8,
"helper_core": np.int8,
# "address": int,
"hash": np.int8,
# "time": np.int16,
"clflush_remote_hit": np.float64,
"clflush_shared_hit": np.float64,
# "clflush_miss_f": np.int32,
# "clflush_local_hit_f": np.int32,
"clflush_miss_n": np.float64,
"clflush_local_hit_n": np.float64,
# "reload_miss": np.int32,
# "reload_remote_hit": np.int32,
# "reload_shared_hit": np.int32,
# "reload_local_hit": np.int32
}
)
slice_mapping = pd.read_csv(sys.argv[1] + ".slices.csv")
core_mapping = pd.read_csv(sys.argv[1] + ".cores.csv")
print(core_mapping.to_string())
print(slice_mapping.to_string())
print("core {} is mapped to '{}'".format(4, repr(core_mapping.iloc[4])))
min_time_miss = stats["clflush_miss_n"].min()
max_time_miss = stats["clflush_miss_n"].max()
def remap_core(key):
def remap(core):
remapped = core_mapping.iloc[core]
return remapped[key]
return remap
stats["main_socket"] = stats["main_core"].apply(remap_core("socket"))
stats["main_core_fixed"] = stats["main_core"].apply(remap_core("core"))
stats["main_ht"] = stats["main_core"].apply(remap_core("hthread"))
stats["helper_socket"] = stats["helper_core"].apply(remap_core("socket"))
stats["helper_core_fixed"] = stats["helper_core"].apply(remap_core("core"))
stats["helper_ht"] = stats["helper_core"].apply(remap_core("hthread"))
# slice_mapping = {3: 0, 1: 1, 2: 2, 0: 3}
stats["slice_group"] = stats["hash"].apply(lambda h: slice_mapping["slice_group"].iloc[h])
graph_lower_miss = int((min_time_miss // 10) * 10)
graph_upper_miss = int(((max_time_miss + 9) // 10) * 10)
print("Graphing from {} to {}".format(graph_lower_miss, graph_upper_miss))
g_ = sns.FacetGrid(stats, col="main_core_fixed", row="slice_group")
g_.map(sns.distplot, 'clflush_miss_n', bins=range(graph_lower_miss, graph_upper_miss), color="b")
#g.map(sns.scatterplot, 'slice_group', 'clflush_local_hit_n', color="g")
plt.show()
# also explains remote
# shared needs some thinking as there is something weird happening there.
#
# M 0 1 2 3 4 5 6 7
#
print(stats.head())
num_core = len(stats["main_core_fixed"].unique())
print("Found {}".format(num_core))
def miss_topology(main_core_fixed, slice_group, C, h):
return C + h * abs(main_core_fixed - slice_group) + h * abs(slice_group + 1)
def miss_topology_df(x, C, h):
return x.apply(lambda x, C, h: miss_topology(x["main_core_fixed"], x["slice_group"], C, h), args=(C, h), axis=1)
res_miss = optimize.curve_fit(miss_topology_df, stats[["main_core_fixed", "slice_group"]], stats["clflush_miss_n"])
print("Miss topology:")
print(res_miss)
memory = -1
gpu_if_any = num_core
def exclusive_hit_topology_gpu(main_core, slice_group, helper_core, C, h1, h2):
round_trip = gpu_if_any - memory
if slice_group <= num_core/2:
# send message towards higher cores first
if helper_core < slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
else:
# send message toward lower cores first
if helper_core > slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
return r
def exclusive_hit_topology_gpu_df(x, C, h1, h2):
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_gpu(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
def exclusive_hit_topology_gpu2(main_core, slice_group, helper_core, C, h1, h2):
round_trip = gpu_if_any + 1 - memory
if slice_group <= num_core/2:
# send message towards higher cores first
if helper_core < slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
else:
# send message toward lower cores first
if helper_core > slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
return r
def exclusive_hit_topology_gpu2_df(x, C, h1, h2):
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_gpu2(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
# unlikely
def exclusive_hit_topology_nogpu(main_core, slice_group, helper_core, C, h1, h2):
round_trip = (num_core-1) - memory
if slice_group <= num_core/2:
# send message towards higher cores first
if helper_core < slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
else:
# send message toward lower cores first
if helper_core > slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
return r
def exclusive_hit_topology_nogpu_df(x, C, h1, h2):
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_nogpu(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
#res_no_gpu = optimize.curve_fit(exclusive_hit_topology_nogpu_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
#print("Exclusive hit topology (No GPU):")
#print(res_no_gpu)
res_gpu = optimize.curve_fit(exclusive_hit_topology_gpu_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
print("Exclusive hit topology (GPU):")
print(res_gpu)
#res_gpu2 = optimize.curve_fit(exclusive_hit_topology_gpu2_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
#print("Exclusive hit topology (GPU2):")
#print(res_gpu2)
def remote_hit_topology_2(x, C, h):
main_core = x["main_core_fixed"]
slice_group = x["slice_group"]
helper_core = x["helper_core_fixed"]
return C + h * abs(main_core - slice_group) + h * abs(slice_group - helper_core) + h * abs(helper_core - main_core)
def shared_hit_topology_1(x, C, h):
main_core = x["main_core_fixed"]
slice_group = x["slice_group"]
helper_core = x["helper_core_fixed"]
return C + h * abs(main_core - slice_group) + h * max(abs(slice_group - main_core), abs(slice_group - helper_core))
def plot_func(function, *params):
def plot_it(x, **kwargs):
# plot_x = []
# plot_y = []
# for x in set(x):
# plot_y.append(function(x, *params))
# plot_x = x
print(x)
plot_y = function(x, *params)
sns.lineplot(x, plot_y, **kwargs)
return plot_it
stats["predicted_miss"] = miss_topology_df(stats, *(res_miss[0]))
figure_median_I = sns.FacetGrid(stats, col="main_core_fixed")
figure_median_I.map(sns.scatterplot, 'slice_group', 'clflush_miss_n', color="b")
figure_median_I.map(sns.lineplot, 'slice_group', 'predicted_miss', color="b")
figure_median_I.set_titles(col_template="$A$ = {col_name}")
figure_median_I.tight_layout()
import tikzplotlib
tikzplotlib.save("fig-median-I.tex", axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
plt.show()
#stats["predicted_remote_hit_no_gpu"] = exclusive_hit_topology_nogpu_df(stats, *(res_no_gpu[0]))
stats["predicted_remote_hit_gpu"] = exclusive_hit_topology_gpu_df(stats, *(res_gpu[0]))
#stats["predicted_remote_hit_gpu2"] = exclusive_hit_topology_gpu_df(stats, *(res_gpu2[0]))
stats_A0 = stats[stats["main_core_fixed"] == 0]
figure_median_E_A0 = sns.FacetGrid(stats_A0, col="slice_group")
figure_median_E_A0.map(sns.scatterplot, 'helper_core_fixed', 'clflush_remote_hit', color="r")
figure_median_E_A0.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu', color="r")
figure_median_E_A0.set_titles(col_template="$S$ = {col_name}")
tikzplotlib.save("fig-median-E-A0.tex", axis_width=r'0.175\textwidth', axis_height=r'0.25\textwidth')
plt.show()
g = sns.FacetGrid(stats, row="main_core_fixed")
g.map(sns.scatterplot, 'slice_group', 'clflush_miss_n', color="b")
g.map(sns.scatterplot, 'slice_group', 'clflush_local_hit_n', color="g")
g0 = sns.FacetGrid(stats, row="slice_group")
g0.map(sns.scatterplot, 'main_core_fixed', 'clflush_miss_n', color="b")
g0.map(sns.scatterplot, 'main_core_fixed', 'clflush_local_hit_n', color="g") # this gives away the trick I think !
# possibility of sending a general please discard this everyone around one of the ring + wait for ACK - direction depends on the core.
g2 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group")
g2.map(sns.scatterplot, 'helper_core_fixed', 'clflush_remote_hit', color="r")
g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu', color="r")
#g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu2', color="g")
#g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_no_gpu', color="g")
#g2.map(plot_func(exclusive_hit_topology_nogpu_df, *(res_no_gpu[0])), 'helper_core_fixed', color="g")
g3 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group")
g3.map(sns.scatterplot, 'helper_core_fixed', 'clflush_shared_hit', color="y")
# more ideas needed
plt.show()

15
cache_utils/run-2-thread-cal.sh Executable file
View File

@ -0,0 +1,15 @@
>&2 echo "# Running the following commands with sudo to set-up"
>&2 echo 'sudo sh -c "echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo"'
>&2 echo sudo cpupower frequency-set -g performance
# performance cpu frequency governor
sudo cpupower frequency-set -g performance
# No Turbo Boost
sudo sh -c "echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo"
cargo run --release --bin two_thread_cal "$@"
>&2 echo "# Please run the following commands to restore configuration"
>&2 echo 'sudo sh -c "echo 0 > /sys/devices/system/cpu/intel_pstate/no_turbo"'
>&2 echo sudo cpupower frequency-set -g powersave

View File

@ -1,5 +1,10 @@
#![deny(unsafe_op_in_unsafe_fn)]
// SPDX-FileCopyrightText: 2021 Guillaume DIDIER
//
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use cache_utils::calibration::{
accumulate, calibrate_fixed_freq_2_thread, calibration_result_to_ASVP, flush_and_reload,
get_cache_attack_slicing, load_and_flush, map_values, only_flush, only_reload, reduce,
@ -14,12 +19,13 @@ use nix::unistd::Pid;
use core::arch::x86_64 as arch_x86;
use core::cmp::min;
use std::cmp::Ordering;
use std::cmp::min;
use std::collections::HashMap;
use std::process::Command;
use std::str::from_utf8;
unsafe fn multiple_access(p: *const u8) {
unsafe {
maccess::<u8>(p);
@ -34,7 +40,9 @@ unsafe fn multiple_access(p: *const u8) {
}
}
const SIZE: usize = 2 << 20;
//const SIZE: usize = 2 << 20;
const SIZE: usize = 4 << 10;
const MAX_SEQUENCE: usize = 2048 * 64;
#[derive(Clone, Copy, Hash, Eq, PartialEq, Debug)]
@ -111,7 +119,8 @@ fn main() {
println!("Number of cores per socket: {}", core_per_socket);
let m = MMappedMemory::new(SIZE, true, false, |i: usize| i as u8);
// TODO Enable Hugepage here if needed.
let m = MMappedMemory::new(SIZE, false, false, |i: usize| i as u8);
let array = m.slice();
let cache_line_size = 64;

View File

@ -349,7 +349,7 @@ fn calibrate_fixed_freq_2_thread_impl<I: Iterator<Item = (usize, usize)>, T>(
main_turn_handle.next();
params = main_turn_handle.wait();
// join thread.
helper_thread.unwrap().join();
helper_thread.unwrap().join().expect("Failed to join thread");
// FIXME error handling
}
}

View File

@ -4,6 +4,7 @@ use crate::complex_addressing::{cache_slicing, CacheAttackSlicing, CacheSlicing}
use crate::{flush, maccess, rdtsc_fence};
use cpuid::MicroArchitecture;
use core::cmp::min;
use core::arch::x86_64 as arch_x86;
#[cfg(feature = "no_std")]
@ -16,7 +17,6 @@ extern crate alloc;
use crate::calibration::Verbosity::*;
use alloc::vec;
use alloc::vec::Vec;
use core::cmp::min;
use itertools::Itertools;
use core::hash::Hash;
use core::ops::{Add, AddAssign};

View File

@ -12,6 +12,8 @@ use hashbrown::HashMap;
#[cfg(feature = "no_std")]
use hashbrown::HashSet;
#[cfg(feature = "use_std")]
use std::vec::Vec;
#[cfg(feature = "use_std")]
use std::collections::HashMap;
#[cfg(feature = "use_std")]
@ -78,7 +80,7 @@ pub fn cache_slicing(
match vendor {
CPUVendor::Intel => {
match uarch {
MicroArchitecture::KabyLake | MicroArchitecture::Skylake => ComplexAddressing(
MicroArchitecture::KabyLake | MicroArchitecture::Skylake | MicroArchitecture::WhiskeyLake => ComplexAddressing(
&SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros + 1) as usize)],
),
MicroArchitecture::CoffeeLake => {

View File

@ -7,6 +7,7 @@ use std::collections::LinkedList;
use std::ptr::copy_nonoverlapping;
use std::sync::Mutex;
use std::vec::Vec;
use core::arch::global_asm;
struct WXRange {
start: usize,

View File

@ -1,7 +1,5 @@
#![cfg_attr(feature = "no_std", no_std)]
#![feature(ptr_internals)]
#![feature(linked_list_cursors)]
#![feature(global_asm)]
#![allow(clippy::missing_safety_doc)]
#![deny(unsafe_op_in_unsafe_fn)]

View File

@ -1,15 +1,15 @@
#![cfg(feature = "use_std")]
use core::borrow::{Borrow, BorrowMut};
use core::ffi::c_void;
use core::mem::{size_of, MaybeUninit};
use core::mem::size_of;
use core::num::NonZeroUsize;
use core::ops::{Deref, DerefMut};
use core::ptr;
use core::ptr::null_mut;
use core::ptr::Unique;
use core::ptr::NonNull;
use core::slice::{from_raw_parts, from_raw_parts_mut};
use nix::errno::Errno::EINVAL;
use std::convert::TryFrom;
use nix::sys::mman;
use core::ptr;
/* from linux kernel headers.
#define HUGETLB_FLAG_ENCODE_SHIFT 26
@ -22,7 +22,7 @@ use nix::sys::mman;
*/
/** Safety issue : if T is non triviably constructable and destructable this is dangerous */
pub struct MMappedMemory<T> {
pointer: Unique<T>,
pointer: NonNull<T>,
size: usize,
}
@ -34,37 +34,30 @@ impl<T> MMappedMemory<T> {
initializer: impl Fn(usize) -> T,
) -> Result<MMappedMemory<T>, nix::Error> {
assert_ne!(size_of::<T>(), 0);
if let Some(p) = unsafe {
let p = mman::mmap(
null_mut(),
size * size_of::<T>(),
mman::ProtFlags::PROT_READ
| mman::ProtFlags::PROT_WRITE
| if executable {
mman::ProtFlags::PROT_EXEC
} else {
mman::ProtFlags::PROT_READ
},
match unsafe {
mman::mmap_anonymous(
None,
NonZeroUsize::try_from(size * size_of::<T>()).unwrap(),
mman::ProtFlags::PROT_READ | mman::ProtFlags::PROT_WRITE,
mman::MapFlags::MAP_PRIVATE
| mman::MapFlags::MAP_ANONYMOUS
| if huge {
mman::MapFlags::MAP_HUGETLB
} else {
mman::MapFlags::MAP_ANONYMOUS
},
-1,
0,
)?;
let pointer_T = p as *mut T;
Unique::new(pointer_T)
} {
let mut s = MMappedMemory { pointer: p, size };
for i in 0..s.size {
unsafe { ptr::write(s.pointer.as_ptr().add(i), initializer(i)) };
mman::MapFlags::MAP_HUGETLB
} else {
mman::MapFlags::MAP_ANONYMOUS
},
)
}
{
Ok(p) => {
let mut s : MMappedMemory<T> = MMappedMemory { pointer: p.cast(), size };
for i in 0..s.size {
unsafe { ptr::write(s.pointer.as_ptr().add(i), initializer(i)) };
}
Ok(s)
} Err(e) => {
Err(e)
}
Ok(s)
} else {
Err(nix::Error::Sys(EINVAL))
}
}
/*
@ -118,11 +111,8 @@ impl<T> MMappedMemory<T> {
impl<T> Drop for MMappedMemory<T> {
fn drop(&mut self) {
for i in 0..self.size {
unsafe { ptr::drop_in_place(self.pointer.as_ptr().add(i)) };
}
unsafe {
mman::munmap(self.pointer.as_ptr() as *mut c_void, self.size).unwrap();
mman::munmap(self.pointer.cast(), self.size).unwrap();
}
}
}
@ -164,3 +154,7 @@ impl<T> BorrowMut<[T]> for MMappedMemory<T> {
self.slice_mut()
}
}
// It owns the memory, so it should be safe to send.
unsafe impl<T> Send for MMappedMemory<T> {}

View File

@ -7,9 +7,9 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rand = "0.8.3"
bit_field = "0.10.1"
rand = "0.8.5"
bit_field = "0.10.2"
turn_lock = { path = "../turn_lock" }
cache_utils = { path = "../cache_utils" }
nix = "0.20.0"
nix = "0.28.0"
cache_side_channel = { path = "../cache_side_channel" }

View File

@ -95,13 +95,13 @@ fn transmit_thread<T: CovertChannel>(
let mut bit_iter = BitIterator::new(&result);
let start_time = std::time::Instant::now();
let start = unsafe { rdtsc_fence() };
while !bit_iter.atEnd() {
while !bit_iter.at_end() {
for page in params.handles.iter_mut() {
let mut handle = page.wait();
unsafe { params.covert_channel.transmit(&mut *handle, &mut bit_iter) };
bit_sent += T::BIT_PER_PAGE;
page.next();
if bit_iter.atEnd() {
if bit_iter.at_end() {
break;
}
}

View File

@ -7,7 +7,7 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
itertools = { version = "0.10.0", default-features = false }
itertools = { version = "0.13.0", default-features = false }
[features]
use_std = ["itertools/use_std"]

View File

@ -12,12 +12,8 @@ use core::arch::x86_64;
//use cstr_core::{CStr, CString};
use crate::CPUVendor::{Intel, Unknown};
use crate::MicroArchitecture::{
Airmont, Bonnell, Broadwell, CannonLake, CascadeLake, CoffeeLake, CooperLake, Core, Goldmont,
GoldmontPlus, Haswell, HaswellE, IceLake, IvyBridge, IvyBridgeE, KabyLake, KnightsLanding,
KnightsMill, Nehalem, NetBurst, Penryn, PentiumM, Saltwell, SandyBridge, Silvermont, Skylake,
SkylakeServer, Tremont, Westmere, Yonah, P5, P6,
};
use crate::MicroArchitecture::{Airmont, Bonnell, Broadwell, CannonLake, CascadeLake, CoffeeLake, CooperLake, Core, Goldmont, GoldmontPlus, Haswell, HaswellE, IceLake, IvyBridge, IvyBridgeE, KabyLake, KnightsLanding, KnightsMill, Nehalem, NetBurst, Penryn, PentiumM, Saltwell, SandyBridge, Silvermont, Skylake, SkylakeServer, Tremont, Westmere, Yonah, P5, P6, WhiskeyLake};
//#[cfg(feature = "std")]
//use std::ffi::{CStr, CString};
@ -152,6 +148,7 @@ pub enum MicroArchitecture {
// supports Intel 64 architecture.
CascadeLake,
CannonLake, // Only in volume 4 ??
WhiskeyLake,
// The 2nd generation Intel® Xeon® Processor Scalable Family is based on the Cascade Lake product and supports
// Intel 64 architecture.
IceLake,
@ -184,17 +181,20 @@ impl MicroArchitecture {
// Intel® CoreTM processors based on Coffee Lake microarchitecture, Intel® Xeon® E processors based on
// Coffee Lake microarchitecture
0x06_8E => {
if stepping <= 9 {
KabyLake
} else {
CoffeeLake
match stepping {
9 => KabyLake,
10 => CoffeeLake,
11 | 12 => WhiskeyLake,
_ => {return None;}
}
}
0x06_9E => {
if stepping <= 9 {
KabyLake
} else {
} else if stepping <= 13{
CoffeeLake
} else {
return None;
}
}
// Future Intel® Xeon® processors based on Ice Lake microarchitecture
@ -289,11 +289,11 @@ impl MicroArchitecture {
}
}
pub fn get_family_model_stepping() -> Option<(CPUVendor, u32, u32)> {
let vendor = CPUVendor::get_cpu_vendor();
// Warning this might not support AMD
if true {
if vendor == Intel {
// TODO refactor some of this into a separate function.
// has cpuid
let vendor = CPUVendor::get_cpu_vendor();
let eax = unsafe { x86_64::__cpuid(1) }.eax;
let stepping = eax & 0xf;
let mut model = (eax >> 4) & 0xf;

View File

@ -0,0 +1,44 @@
[package]
name = "dendrobates_tinctoreus_azureus"
version = "0.1.0"
authors = ["Guillaume DIDIER <guillaume.didier.2014@polytechnique.org>"]
edition = "2018"
[package.metadata.bootimage]
#run-command = ["./scripts/bochs.sh", "{}"]
run-command = ["./scripts/run.sh", "{}"]
test-args = ["qemu"]
run-args = ["bochs"]
#run-command = ["qemu-system-x86_64", "-drive", "format=raw,file={}"]
#test-args = ["-device", "isa-debug-exit,iobase=0xf4,iosize=0x04"]
test-success-exit-code = 33 # (0x10 << 1) | 1
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
x86_64 = "0.15.1"
vga_buffer = { path = "../vga_buffer" }
polling_serial = { path = "../polling_serial" }
volatile = "0.4.4"
linked_list_allocator = "0.9.0"
cache_utils = { path = "../cache_utils", features = ["no_std"], default-features = false }
arrayref = "0.3.6"
[dependencies.lazy_static]
version = "1.4.0"
features = ["spin_no_std"]
[dependencies.bootloader]
version = "0.9.16"
features = ["sse", "map_physical_memory"]
#[patch.crates-io]
#bootloader = { path = "../bootloader" }
[[test]]
name = "panic_test"
harness = false
[[test]]
name = "stack_overflow"
harness = false

View File

@ -9,5 +9,5 @@ edition = "2018"
[dependencies]
cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
nix = "0.20.0"
nix = "0.28.0"
basic_timing_cache_channel = { path = "../basic_timing_cache_channel" }

View File

@ -9,5 +9,5 @@ edition = "2018"
[dependencies]
cache_utils = { path = "../cache_utils" }
cache_side_channel = { path = "../cache_side_channel" }
nix = "0.20.0"
nix = "0.28.0"
basic_timing_cache_channel = { path = "../basic_timing_cache_channel" }

View File

@ -7,7 +7,7 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
x86_64 = "0.14.1"
x86_64 = "0.15.1"
spin = "0.9.0"
[dependencies.lazy_static]