Cache utils updates

This commit is contained in:
root 2020-09-22 14:27:52 +02:00
parent 33df427053
commit 654b9b2cdb
15 changed files with 787 additions and 56 deletions

View File

@ -0,0 +1,15 @@
#!/bin/sh
NAME=`basename "$1" .txt.bz2`
echo $NAME
#bzcat $1 | awk '/^Iteration [:digit:]*[.]*/ ' > "${NAME}-iterations.txt"
#rm "${NAME}-results.csv.bz2"
#TODO forward NAME to awk script
#awk -v logname="${NAME}" -f `dirname $0`/analyse_iterations.awk < "${NAME}-iterations.txt" | bzip2 -c > "${NAME}-results.csv.bz2" # This uses system to split off awk scripts doing the analysis
bzgrep "RESULT:" "$1" | cut -b 8- | bzip2 -c > "${NAME}-results.csv.bz2"
# remove line with no data points
bzgrep -v -e "0,0,0,0,0,0,0,0,0,0$" "${NAME}-results.csv.bz2" | bzip2 -c > "${NAME}-results_lite.csv.bz2"
#paste -d"," *.csv > combined.csv

View File

@ -0,0 +1,139 @@
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sys import exit
import wquantiles as wq
import numpy as np
from functools import partial
import sys
def convert64(x):
return np.int64(int(x, base=16))
def convert8(x):
return np.int8(int(x, base=16))
df = pd.read_csv(sys.argv[1],
dtype={
"main_core": np.int8,
"helper_core": np.int8,
# "address": int,
# "hash": np.int8,
"time": np.int16,
"clflush_remote_hit": np.int32,
"clflush_shared_hit": np.int32,
"clflush_miss_f": np.int32,
"clflush_local_hit_f": np.int32,
"clflush_miss_n": np.int32,
"clflush_local_hit_n": np.int32,
"reload_miss": np.int32,
"reload_remote_hit": np.int32,
"reload_shared_hit": np.int32,
"reload_local_hit": np.int32},
converters={'address': convert64, 'hash': convert8},
)
sample_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
"reload_miss",
"reload_remote_hit",
"reload_shared_hit",
"reload_local_hit",
]
sample_flush_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
]
print(df.columns)
#df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3)
print(df.head())
print(df["hash"].unique())
min_time = df["time"].min()
max_time = df["time"].max()
q10s = [wq.quantile(df["time"], df[col], 0.1) for col in sample_flush_columns]
q90s = [wq.quantile(df["time"], df[col], 0.9) for col in sample_flush_columns]
graph_upper = int(((max(q90s) + 19) // 10) * 10)
graph_lower = int(((min(q10s) - 10) // 10) * 10)
# graph_lower = (min_time // 10) * 10
# graph_upper = ((max_time + 9) // 10) * 10
print("graphing between {}, {}".format(graph_lower, graph_upper))
df_main_core_0 = df[df["main_core"] == 0]
#df_helper_core_0 = df[df["helper_core"] == 0]
g = sns.FacetGrid(df_main_core_0, col="helper_core", row="hash", legend_out=True)
g2 = sns.FacetGrid(df, col="main_core", row="hash", legend_out=True)
colours = ["b", "r", "g", "y"]
def custom_hist(x, *y, **kwargs):
for (i, yi) in enumerate(y):
kwargs["color"] = colours[i]
sns.distplot(x, range(graph_lower, graph_upper), hist_kws={"weights": yi, "histtype":"step"}, kde=False, **kwargs)
# Color convention here :
# Blue = miss
# Red = Remote Hit
# Green = Local Hit
# Yellow = Shared Hit
g.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
g2.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
# g.map(sns.distplot, "time", hist_kws={"weights": df["clflush_hit"]}, kde=False)
plt.show()
#plt.figure()
exit(0)
def stat(x, key):
return wq.median(x["Time"], x[key])
miss = df.groupby(["Core", "Hash"]).apply(stat, "ClflushMiss")
stats = miss.reset_index()
stats.columns = ["Core", "Hash", "Miss"]
hit = df.groupby(["Core", "Hash"]).apply(stat, "ClflushHit")
stats["Hit"] = hit.values
print(stats.to_string())
g = sns.FacetGrid(stats, row="Core")
g.map(sns.distplot, 'Miss', bins=range(100, 480), color="r")
g.map(sns.distplot, 'Hit', bins=range(100, 480))
plt.show()
#stats["clflush_miss_med"] = stats[[0]].apply(lambda x: x["miss_med"])
#stats["clflush_hit_med"] = stats[[0]].apply(lambda x: x["hit_med"])
#del df[[0]]
#print(hit.to_string(), miss.to_string())
# test = pd.DataFrame({"value" : [0, 5], "weight": [5, 1]})
# plt.figure()
# sns.distplot(test["value"], hist_kws={"weights": test["weight"]}, kde=False)
exit(0)

View File

@ -0,0 +1,15 @@
#!/bin/sh
NAME=`basename "$1" .txt.bz2`
echo $NAME
#bzcat $1 | awk '/^Iteration [:digit:]*[.]*/ ' > "${NAME}-iterations.txt"
#rm "${NAME}-results.csv.bz2"
#TODO forward NAME to awk script
#awk -v logname="${NAME}" -f `dirname $0`/analyse_iterations.awk < "${NAME}-iterations.txt" | bzip2 -c > "${NAME}-results.csv.bz2" # This uses system to split off awk scripts doing the analysis
bzgrep "RESULT:" "$1" | cut -b 8- | bzip2 -c > "${NAME}-results.csv.bz2"
# remove line with no data points
bzgrep -v -e "0,0,0,0,0,0,0,0,0,0$" "${NAME}-results.csv.bz2" | bzip2 -c > "${NAME}-results_lite.csv.bz2"
#paste -d"," *.csv > combined.csv

View File

@ -0,0 +1,139 @@
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sys import exit
import wquantiles as wq
import numpy as np
from functools import partial
import sys
def convert64(x):
return np.int64(int(x, base=16))
def convert8(x):
return np.int8(int(x, base=16))
df = pd.read_csv(sys.argv[1],
dtype={
"main_core": np.int8,
"helper_core": np.int8,
# "address": int,
# "hash": np.int8,
"time": np.int16,
"clflush_remote_hit": np.int32,
"clflush_shared_hit": np.int32,
"clflush_miss_f": np.int32,
"clflush_local_hit_f": np.int32,
"clflush_miss_n": np.int32,
"clflush_local_hit_n": np.int32,
"reload_miss": np.int32,
"reload_remote_hit": np.int32,
"reload_shared_hit": np.int32,
"reload_local_hit": np.int32},
converters={'address': convert64, 'hash': convert8},
)
sample_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
"reload_miss",
"reload_remote_hit",
"reload_shared_hit",
"reload_local_hit",
]
sample_flush_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
]
print(df.columns)
#df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3)
print(df.head())
print(df["hash"].unique())
min_time = df["time"].min()
max_time = df["time"].max()
q10s = [wq.quantile(df["time"], df[col], 0.1) for col in sample_flush_columns]
q90s = [wq.quantile(df["time"], df[col], 0.9) for col in sample_flush_columns]
graph_upper = int(((max(q90s) + 19) // 10) * 10)
graph_lower = int(((min(q10s) - 10) // 10) * 10)
# graph_lower = (min_time // 10) * 10
# graph_upper = ((max_time + 9) // 10) * 10
print("graphing between {}, {}".format(graph_lower, graph_upper))
df_main_core_0 = df[df["main_core"] == 0]
#df_helper_core_0 = df[df["helper_core"] == 0]
g = sns.FacetGrid(df_main_core_0, col="helper_core", row="hash", legend_out=True)
g2 = sns.FacetGrid(df, col="main_core", row="hash", legend_out=True)
colours = ["b", "r", "g", "y"]
def custom_hist(x, *y, **kwargs):
for (i, yi) in enumerate(y):
kwargs["color"] = colours[i]
sns.distplot(x, range(graph_lower, graph_upper), hist_kws={"weights": yi, "histtype":"step"}, kde=False, **kwargs)
# Color convention here :
# Blue = miss
# Red = Remote Hit
# Green = Local Hit
# Yellow = Shared Hit
g.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
g2.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
# g.map(sns.distplot, "time", hist_kws={"weights": df["clflush_hit"]}, kde=False)
plt.show()
#plt.figure()
exit(0)
def stat(x, key):
return wq.median(x["Time"], x[key])
miss = df.groupby(["Core", "Hash"]).apply(stat, "ClflushMiss")
stats = miss.reset_index()
stats.columns = ["Core", "Hash", "Miss"]
hit = df.groupby(["Core", "Hash"]).apply(stat, "ClflushHit")
stats["Hit"] = hit.values
print(stats.to_string())
g = sns.FacetGrid(stats, row="Core")
g.map(sns.distplot, 'Miss', bins=range(100, 480), color="r")
g.map(sns.distplot, 'Hit', bins=range(100, 480))
plt.show()
#stats["clflush_miss_med"] = stats[[0]].apply(lambda x: x["miss_med"])
#stats["clflush_hit_med"] = stats[[0]].apply(lambda x: x["hit_med"])
#del df[[0]]
#print(hit.to_string(), miss.to_string())
# test = pd.DataFrame({"value" : [0, 5], "weight": [5, 1]})
# plt.figure()
# sns.distplot(test["value"], hist_kws={"weights": test["weight"]}, kde=False)
exit(0)

View File

@ -72,6 +72,31 @@ sample_flush_columns = [
"clflush_miss_n", "clflush_miss_n",
"clflush_local_hit_n", "clflush_local_hit_n",
] ]
slice_mapping = pd.read_csv(sys.argv[1] + ".slices.csv")
core_mapping = pd.read_csv(sys.argv[1] + ".cores.csv")
def remap_core(key):
def remap(core):
remapped = core_mapping.iloc[core]
return remapped[key]
return remap
df["main_socket"] = df["main_core"].apply(remap_core("socket"))
df["main_core_fixed"] = df["main_core"].apply(remap_core("core"))
df["main_ht"] = df["main_core"].apply(remap_core("hthread"))
df["helper_socket"] = df["helper_core"].apply(remap_core("socket"))
df["helper_core_fixed"] = df["helper_core"].apply(remap_core("core"))
df["helper_ht"] = df["helper_core"].apply(remap_core("hthread"))
# slice_mapping = {3: 0, 1: 1, 2: 2, 0: 3}
df["slice_group"] = df["hash"].apply(lambda h: slice_mapping["slice_group"].iloc[h])
print(df.columns) print(df.columns)
#df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3) #df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3)
@ -125,7 +150,13 @@ g2.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_loc
#plt.show() #plt.show()
#plt.figure() #plt.figure()
df_mcf6 = df[df["main_core_fixed"] == 6]
df_mcf6_slg7 = df_mcf6[df_mcf6["slice_group"] == 7]
g3 = sns.FacetGrid(df_mcf6_slg7, row="helper_core_fixed", col="main_ht")
g3.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
g4 = sns.FacetGrid(df_mcf6_slg7, row="helper_core_fixed", col="helper_ht")
g4.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
def stat(x, key): def stat(x, key):
return wq.median(x["time"], x[key]) return wq.median(x["time"], x[key])

View File

@ -68,7 +68,7 @@ stats["helper_ht"] = stats["helper_core"].apply(remap_core("hthread"))
# slice_mapping = {3: 0, 1: 1, 2: 2, 0: 3} # slice_mapping = {3: 0, 1: 1, 2: 2, 0: 3}
stats["slice_group"] = stats["hash"].apply(lambda h: slice_mapping.iloc[h]) stats["slice_group"] = stats["hash"].apply(lambda h: slice_mapping["slice_group"].iloc[h])
graph_lower_miss = int((min_time_miss // 10) * 10) graph_lower_miss = int((min_time_miss // 10) * 10)
graph_upper_miss = int(((max_time_miss + 9) // 10) * 10) graph_upper_miss = int(((max_time_miss + 9) // 10) * 10)
@ -86,14 +86,28 @@ g = sns.FacetGrid(stats, row="main_core_fixed")
g.map(sns.scatterplot, 'slice_group', 'clflush_miss_n', color="b") g.map(sns.scatterplot, 'slice_group', 'clflush_miss_n', color="b")
g.map(sns.scatterplot, 'slice_group', 'clflush_local_hit_n', color="g") g.map(sns.scatterplot, 'slice_group', 'clflush_local_hit_n', color="g")
g2 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group") g0 = sns.FacetGrid(stats, row="slice_group")
g2.map(sns.scatterplot, 'helper_core_fixed', 'clflush_remote_hit', color="r")
g0.map(sns.scatterplot, 'main_core_fixed', 'clflush_miss_n', color="b")
g0.map(sns.scatterplot, 'main_core_fixed', 'clflush_local_hit_n', color="g") # this gives away the trick I think !
# possibility of sending a general please discard this everyone around one of the ring + wait for ACK - direction depends on the core.
#
# M 0 1 2 3 4 5 6 7
#
# also explains remote
# shared needs some thinking as there is something weird happening there.
g3 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group")
g3.map(sns.scatterplot, 'helper_core_fixed', 'clflush_shared_hit', color="y")
print(stats.head()) print(stats.head())
num_core = len(stats["main_core_fixed"].unique())
print("Found {}".format(num_core))
def miss_topology(x, C, h): def miss_topology(x, C, h):
main_core = x["main_core_fixed"] main_core = x["main_core_fixed"]
@ -102,21 +116,93 @@ def miss_topology(x, C, h):
res = optimize.curve_fit(miss_topology, stats[["main_core_fixed", "slice_group"]], stats["clflush_miss_n"]) res = optimize.curve_fit(miss_topology, stats[["main_core_fixed", "slice_group"]], stats["clflush_miss_n"])
print("Miss topology:")
print(res) print(res)
def local_hit_topology(x, C, h): memory = -1
main_core = x["main_core_fixed"] gpu_if_any = num_core
slice_group = x["slice_group"]
return C + h * abs(main_core - slice_group)
def exclusive_hit_topology_gpu(main_core, slice_group, helper_core, C, h1, h2):
round_trip = gpu_if_any - memory
if slice_group <= num_core/2:
# send message towards higher cores first
if helper_core < slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
else:
# send message toward lower cores first
if helper_core > slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
return r
def exclusive_hit_topology_gpu_df(x, C, h1, h2):
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_gpu(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
def exclusive_hit_topology_gpu2(main_core, slice_group, helper_core, C, h1, h2):
round_trip = gpu_if_any + 1 - memory
if slice_group <= num_core/2:
# send message towards higher cores first
if helper_core < slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
else:
# send message toward lower cores first
if helper_core > slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
return r
def exclusive_hit_topology_gpu2_df(x, C, h1, h2):
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_gpu2(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
# unlikely
def exclusive_hit_topology_nogpu(main_core, slice_group, helper_core, C, h1, h2):
round_trip = (num_core-1) - memory
if slice_group <= num_core/2:
# send message towards higher cores first
if helper_core < slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(round_trip - (helper_core - memory))
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
else:
# send message toward lower cores first
if helper_core > slice_group:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - memory)
else:
r = C + h1 * abs(main_core - slice_group) + h2 * abs(helper_core - slice_group)
return r
def exclusive_hit_topology_nogpu_df(x, C, h1, h2):
return x.apply(lambda x, C, h1, h2: exclusive_hit_topology_nogpu(x["main_core_fixed"], x["slice_group"], x["helper_core_fixed"], C, h1, h2), args=(C, h1, h2), axis=1)
#res_no_gpu = optimize.curve_fit(exclusive_hit_topology_nogpu_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
#print("Exclusive hit topology (No GPU):")
#print(res_no_gpu)
res_gpu = optimize.curve_fit(exclusive_hit_topology_gpu_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
print("Exclusive hit topology (GPU):")
print(res_gpu)
#res_gpu2 = optimize.curve_fit(exclusive_hit_topology_gpu2_df, stats[["main_core_fixed", "slice_group", "helper_core_fixed"]], stats["clflush_remote_hit"])
#print("Exclusive hit topology (GPU2):")
#print(res_gpu2)
def remote_hit_topology_1(x, C, h):
main_core = x["main_core_fixed"]
slice_group = x["slice_group"]
helper_core = x["helper_core_fixed"]
return C + h * abs(main_core - slice_group) + h * abs(slice_group - helper_core)
def remote_hit_topology_2(x, C, h): def remote_hit_topology_2(x, C, h):
@ -133,6 +219,33 @@ def shared_hit_topology_1(x, C, h):
return C + h * abs(main_core - slice_group) + h * max(abs(slice_group - main_core), abs(slice_group - helper_core)) return C + h * abs(main_core - slice_group) + h * max(abs(slice_group - main_core), abs(slice_group - helper_core))
def plot_func(function, *params):
def plot_it(x, **kwargs):
# plot_x = []
# plot_y = []
# for x in set(x):
# plot_y.append(function(x, *params))
# plot_x = x
print(x)
plot_y = function(x, *params)
sns.lineplot(x, plot_y, **kwargs)
return plot_it
#stats["predicted_remote_hit_no_gpu"] = exclusive_hit_topology_nogpu_df(stats, *(res_no_gpu[0]))
stats["predicted_remote_hit_gpu"] = exclusive_hit_topology_gpu_df(stats, *(res_gpu[0]))
stats["predicted_remote_hit_gpu2"] = exclusive_hit_topology_gpu_df(stats, *(res_gpu2[0]))
g2 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group")
g2.map(sns.scatterplot, 'helper_core_fixed', 'clflush_remote_hit', color="r")
g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu', color="r")
g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_gpu2', color="g")
#g2.map(sns.lineplot, 'helper_core_fixed', 'predicted_remote_hit_no_gpu', color="g")
#g2.map(plot_func(exclusive_hit_topology_nogpu_df, *(res_no_gpu[0])), 'helper_core_fixed', color="g")
g3 = sns.FacetGrid(stats, row="main_core_fixed", col="slice_group")
g3.map(sns.scatterplot, 'helper_core_fixed', 'clflush_shared_hit', color="y")
# more ideas needed # more ideas needed
plt.show() plt.show()

15
cache_utils/2T/analyse.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/sh
NAME=`basename "$1" .txt.bz2`
echo $NAME
#bzcat $1 | awk '/^Iteration [:digit:]*[.]*/ ' > "${NAME}-iterations.txt"
#rm "${NAME}-results.csv.bz2"
#TODO forward NAME to awk script
#awk -v logname="${NAME}" -f `dirname $0`/analyse_iterations.awk < "${NAME}-iterations.txt" | bzip2 -c > "${NAME}-results.csv.bz2" # This uses system to split off awk scripts doing the analysis
bzgrep "RESULT:" "$1" | cut -b 8- | bzip2 -c > "${NAME}-results.csv.bz2"
# remove line with no data points
bzgrep -v -e "0,0,0,0,0,0,0,0,0,0$" "${NAME}-results.csv.bz2" | bzip2 -c > "${NAME}-results_lite.csv.bz2"
#paste -d"," *.csv > combined.csv

View File

@ -0,0 +1,139 @@
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sys import exit
import wquantiles as wq
import numpy as np
from functools import partial
import sys
def convert64(x):
return np.int64(int(x, base=16))
def convert8(x):
return np.int8(int(x, base=16))
df = pd.read_csv(sys.argv[1],
dtype={
"main_core": np.int8,
"helper_core": np.int8,
# "address": int,
# "hash": np.int8,
"time": np.int16,
"clflush_remote_hit": np.int32,
"clflush_shared_hit": np.int32,
"clflush_miss_f": np.int32,
"clflush_local_hit_f": np.int32,
"clflush_miss_n": np.int32,
"clflush_local_hit_n": np.int32,
"reload_miss": np.int32,
"reload_remote_hit": np.int32,
"reload_shared_hit": np.int32,
"reload_local_hit": np.int32},
converters={'address': convert64, 'hash': convert8},
)
sample_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
"reload_miss",
"reload_remote_hit",
"reload_shared_hit",
"reload_local_hit",
]
sample_flush_columns = [
"clflush_remote_hit",
"clflush_shared_hit",
"clflush_miss_f",
"clflush_local_hit_f",
"clflush_miss_n",
"clflush_local_hit_n",
]
print(df.columns)
#df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3)
print(df.head())
print(df["hash"].unique())
min_time = df["time"].min()
max_time = df["time"].max()
q10s = [wq.quantile(df["time"], df[col], 0.05) for col in sample_flush_columns]
q90s = [wq.quantile(df["time"], df[col], 0.95) for col in sample_flush_columns]
graph_upper = int(((max(q90s) + 29) // 10) * 10)
graph_lower = int(((min(q10s) - 20) // 10) * 10)
# graph_lower = (min_time // 10) * 10
# graph_upper = ((max_time + 9) // 10) * 10
print("graphing between {}, {}".format(graph_lower, graph_upper))
df_main_core_0 = df[df["main_core"] == 0]
#df_helper_core_0 = df[df["helper_core"] == 0]
g = sns.FacetGrid(df_main_core_0, col="helper_core", row="hash", legend_out=True)
g2 = sns.FacetGrid(df, col="main_core", row="hash", legend_out=True)
colours = ["b", "r", "g", "y"]
def custom_hist(x, *y, **kwargs):
for (i, yi) in enumerate(y):
kwargs["color"] = colours[i]
sns.distplot(x, range(graph_lower, graph_upper), hist_kws={"weights": yi, "histtype":"step"}, kde=False, **kwargs)
# Color convention here :
# Blue = miss
# Red = Remote Hit
# Green = Local Hit
# Yellow = Shared Hit
g.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
g2.map(custom_hist, "time", "clflush_miss_n", "clflush_remote_hit", "clflush_local_hit_n", "clflush_shared_hit")
# g.map(sns.distplot, "time", hist_kws={"weights": df["clflush_hit"]}, kde=False)
plt.show()
#plt.figure()
exit(0)
def stat(x, key):
return wq.median(x["Time"], x[key])
miss = df.groupby(["Core", "Hash"]).apply(stat, "ClflushMiss")
stats = miss.reset_index()
stats.columns = ["Core", "Hash", "Miss"]
hit = df.groupby(["Core", "Hash"]).apply(stat, "ClflushHit")
stats["Hit"] = hit.values
print(stats.to_string())
g = sns.FacetGrid(stats, row="Core")
g.map(sns.distplot, 'Miss', bins=range(100, 480), color="r")
g.map(sns.distplot, 'Hit', bins=range(100, 480))
plt.show()
#stats["clflush_miss_med"] = stats[[0]].apply(lambda x: x["miss_med"])
#stats["clflush_hit_med"] = stats[[0]].apply(lambda x: x["hit_med"])
#del df[[0]]
#print(hit.to_string(), miss.to_string())
# test = pd.DataFrame({"value" : [0, 5], "weight": [5, 1]})
# plt.figure()
# sns.distplot(test["value"], hist_kws={"weights": test["weight"]}, kde=False)
exit(0)

View File

@ -0,0 +1,11 @@
#!/bin/sh
NAME=`basename "$1" .txt.bz2`
echo $NAME
bzcat $1 | awk '/^Iteration [:digit:]*[.]*/ ' > "${NAME}-iterations.txt"
rm "${NAME}-results.csv.bz2"
#TODO forward NAME to awk script
awk -v logname="${NAME}" -f `dirname $0`/analyse_iterations.awk < "${NAME}-iterations.txt" | bzip2 -c > "${NAME}-results.csv.bz2" # This uses system to split off awk scripts doing the analysis
bzgrep -v -e "0,0$" "${NAME}-results.csv.bz2" | bzip2 -c > "${NAME}-results_lite.csv.bz2"
#paste -d"," *.csv > combined.csv

View File

@ -0,0 +1,68 @@
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sys import exit
import wquantiles as wq
import numpy as np
from functools import partial
import sys
df = pd.read_csv(sys.argv[1], header=1, names=["Core", "Addr", "Hash", "Time", "ClflushHit", "ClflushMiss"], dtype={"Core": int, "Time": int, "ClflushHit": int, "ClflushMiss": int},
converters={'Addr': partial(int, base=16), 'Hash': partial(int, base=16)},
usecols=["Core", "Addr", "Hash", "Time", "ClflushHit", "ClflushMiss"]
)
print(df.columns)
#df["Hash"] = df["Addr"].apply(lambda x: (x >> 15)&0x3)
print(df.head())
print(df["Hash"].unique())
g = sns.FacetGrid(df, col="Core", row="Hash", legend_out=True)
def custom_hist(x, y1, y2, **kwargs):
sns.distplot(x, range(100, 400), hist_kws={"weights": y1, "histtype":"step"}, kde=False, **kwargs)
kwargs["color"] = "r"
sns.distplot(x, range(100, 400), hist_kws={"weights": y2, "histtype":"step"}, kde=False, **kwargs)
g.map(custom_hist, "Time", "ClflushHit", "ClflushMiss")
# g.map(sns.distplot, "time", hist_kws={"weights": df["clflush_hit"]}, kde=False)
#plt.figure()
plt.show()
exit(0)
def stat(x, key):
return wq.median(x["Time"], x[key])
miss = df.groupby(["Core", "Hash"]).apply(stat, "ClflushMiss")
stats = miss.reset_index()
stats.columns = ["Core", "Hash", "Miss"]
hit = df.groupby(["Core", "Hash"]).apply(stat, "ClflushHit")
stats["Hit"] = hit.values
print(stats.to_string())
g = sns.FacetGrid(stats, row="Core")
g.map(sns.distplot, 'Miss', bins=range(100, 480), color="r")
g.map(sns.distplot, 'Hit', bins=range(100, 480))
plt.show()
#stats["clflush_miss_med"] = stats[[0]].apply(lambda x: x["miss_med"])
#stats["clflush_hit_med"] = stats[[0]].apply(lambda x: x["hit_med"])
#del df[[0]]
#print(hit.to_string(), miss.to_string())
# test = pd.DataFrame({"value" : [0, 5], "weight": [5, 1]})
# plt.figure()
# sns.distplot(test["value"], hist_kws={"weights": test["weight"]}, kde=False)
exit(0)

View File

@ -0,0 +1,17 @@
BEGIN {
i = 0
print(logname)
}
{
start = $0
getline
end = $0
if (i == 0) {
# generate header
system("bzcat < "logname".txt.bz2 | awk '$0 == \""start"\",$0 == \""end"\"' | grep \"RESULT:\" | head -n 1 | cut -b 8- | awk '{print \"core,\" $0}'")
}
cut = "cut -b 8- | tail -n +2"
system("bzcat < "logname".txt.bz2 | awk '$0 == \""start"\",$0 == \""end"\"' | grep \"RESULT:\" | " cut " | awk '{print \""i",\" $0}'")
i = i + 1
}

View File

@ -176,8 +176,8 @@ fn main() {
let r = calibrate_fixed_freq_2_thread( let r = calibrate_fixed_freq_2_thread(
pointer, pointer,
64, 64, // FIXME : MAGIC
array.len() as isize >> 3, array.len() as isize >> 3, // MAGIC
&mut core_pairs.into_iter(), &mut core_pairs.into_iter(),
&operations, &operations,
CalibrationOptions { CalibrationOptions {
@ -227,8 +227,8 @@ fn main() {
Ok(results) => { Ok(results) => {
for r in results { for r in results {
let offset = r.offset; let offset = r.offset;
let miss_hist = r.histogram[miss_index].clone(); let miss_hist = &r.histogram[miss_index];
let hit_hist = r.histogram[hit_index].clone(); let hit_hist = &r.histogram[hit_index];
if miss_hist.len() != hit_hist.len() { if miss_hist.len() != hit_hist.len() {
panic!("Maformed results"); panic!("Maformed results");
@ -272,10 +272,10 @@ fn main() {
victim: result.helper_core as u8, victim: result.helper_core as u8,
}, },
ResultAnalysis { ResultAnalysis {
miss: miss_hist, miss: miss_hist.clone(),
miss_cum_sum, miss_cum_sum,
miss_total, miss_total,
hit: hit_hist, hit: hit_hist.clone(),
hit_cum_sum, hit_cum_sum,
hit_total, hit_total,
error_miss_less_than_hit, error_miss_less_than_hit,

View File

@ -3,7 +3,7 @@
use crate::complex_addressing::{cache_slicing, CacheSlicing}; use crate::complex_addressing::{cache_slicing, CacheSlicing};
use crate::{flush, maccess, rdtsc_fence}; use crate::{flush, maccess, rdtsc_fence};
use cpuid::{CPUVendor, MicroArchitecture}; use cpuid::MicroArchitecture;
use core::arch::x86_64 as arch_x86; use core::arch::x86_64 as arch_x86;
#[cfg(feature = "no_std")] #[cfg(feature = "no_std")]

View File

@ -5,8 +5,6 @@ use cpuid::{CPUVendor, MicroArchitecture};
extern crate alloc; extern crate alloc;
#[cfg(feature = "no_std")]
use alloc::collections::VecDeque;
#[cfg(feature = "no_std")] #[cfg(feature = "no_std")]
use alloc::vec::Vec; use alloc::vec::Vec;
#[cfg(feature = "no_std")] #[cfg(feature = "no_std")]
@ -18,8 +16,6 @@ use hashbrown::HashSet;
use std::collections::HashMap; use std::collections::HashMap;
#[cfg(feature = "use_std")] #[cfg(feature = "use_std")]
use std::collections::HashSet; use std::collections::HashSet;
#[cfg(feature = "use_std")]
use std::collections::VecDeque;
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct SimpleAddressingParams { pub struct SimpleAddressingParams {
@ -72,20 +68,25 @@ pub fn cache_slicing(
physical_cores: u8, physical_cores: u8,
vendor: CPUVendor, vendor: CPUVendor,
family_model_display: u32, family_model_display: u32,
stepping: u32, _stepping: u32,
) -> CacheSlicing { ) -> CacheSlicing {
let trailing_zeros = physical_cores.trailing_zeros(); let trailing_zeros = physical_cores.trailing_zeros();
if physical_cores != (1 << trailing_zeros) { if physical_cores != (1 << trailing_zeros) {
return Unsupported; return Unsupported;
} }
match vendor {
CPUVendor::Intel => {
match uarch { match uarch {
MicroArchitecture::KabyLake | MicroArchitecture::Skylake => { MicroArchitecture::KabyLake | MicroArchitecture::Skylake => ComplexAddressing(
ComplexAddressing(&SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros + 1) as usize)]) &SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros + 1) as usize)],
} ),
MicroArchitecture::CoffeeLake => { MicroArchitecture::CoffeeLake => {
if family_model_display == 0x6_9E { if family_model_display == 0x6_9E {
ComplexAddressing(&COFFEELAKE_R_i9_FUNCTIONS[0..((trailing_zeros + 1) as usize)]) // TODO stepping should probably be involved here
ComplexAddressing(
&COFFEELAKE_R_i9_FUNCTIONS[0..((trailing_zeros + 1) as usize)],
)
} else { } else {
ComplexAddressing( ComplexAddressing(
&SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros + 1) as usize)], &SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros + 1) as usize)],
@ -96,14 +97,16 @@ pub fn cache_slicing(
| MicroArchitecture::HaswellE | MicroArchitecture::HaswellE
| MicroArchitecture::Broadwell | MicroArchitecture::Broadwell
| MicroArchitecture::IvyBridge | MicroArchitecture::IvyBridge
| MicroArchitecture::IvyBridgeE => { | MicroArchitecture::IvyBridgeE => ComplexAddressing(
ComplexAddressing(&SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros) as usize)]) &SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros) as usize)],
} ),
MicroArchitecture::Haswell => { MicroArchitecture::Haswell => {
if family_model_display == 0x06_46 { if family_model_display == 0x06_46 {
ComplexAddressing(&CRYSTAL_WELL_FUNCTIONS[0..((trailing_zeros) as usize)]) ComplexAddressing(&CRYSTAL_WELL_FUNCTIONS[0..((trailing_zeros) as usize)])
} else { } else {
ComplexAddressing(&SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros) as usize)]) ComplexAddressing(
&SANDYBRIDGE_TO_SKYLAKE_FUNCTIONS[0..((trailing_zeros) as usize)],
)
} }
} }
MicroArchitecture::Nehalem | MicroArchitecture::Westmere => { MicroArchitecture::Nehalem | MicroArchitecture::Westmere => {
@ -112,6 +115,10 @@ pub fn cache_slicing(
_ => Unsupported, _ => Unsupported,
} }
} }
CPUVendor::AMD => Unsupported,
_ => Unsupported,
}
}
fn hash(addr: usize, mask: usize) -> u8 { fn hash(addr: usize, mask: usize) -> u8 {
((addr & mask).count_ones() & 1) as u8 ((addr & mask).count_ones() & 1) as u8
@ -126,7 +133,7 @@ impl CacheSlicing {
} }
pub fn hash(&self, addr: usize) -> Option<u8> { pub fn hash(&self, addr: usize) -> Option<u8> {
match self { match self {
SimpleAddressing(mask) => None, //Some(addr & *mask), SimpleAddressing(mask) => Some(((addr >> mask.shift) & ((1 << mask.bits) - 1)) as u8),
ComplexAddressing(masks) => { ComplexAddressing(masks) => {
let mut res = 0; let mut res = 0;
for mask in *masks { for mask in *masks {
@ -143,7 +150,7 @@ impl CacheSlicing {
// May work in the future for simple. // May work in the future for simple.
fn pivot(&self, mask: isize) -> Vec<(u8, isize)> { fn pivot(&self, mask: isize) -> Vec<(u8, isize)> {
match self { match self {
ComplexAddressing(functions) => { ComplexAddressing(_functions) => {
let mut matrix = Vec::new(); let mut matrix = Vec::new();
let mut i = 1; let mut i = 1;
@ -248,7 +255,7 @@ impl CacheSlicing {
result.insert(0, 0); result.insert(0, 0);
for (slice_u, addr_u) in matrix { for (slice_u, addr_u) in matrix {
if (slice_u != 0) { if slice_u != 0 {
let mut tmp = HashMap::new(); let mut tmp = HashMap::new();
for (slice_v, addr_v) in &result { for (slice_v, addr_v) in &result {
tmp.insert(slice_v ^ slice_u, addr_v ^ addr_u); tmp.insert(slice_v ^ slice_u, addr_v ^ addr_u);

View File

@ -47,6 +47,28 @@ pub unsafe fn flush(p: *const u8) {
pub fn noop<T>(_: *const T) {} pub fn noop<T>(_: *const T) {}
#[cfg(feature = "use_std")]
pub fn find_core_per_socket() -> u8 {
use std::process::Command;
use std::str::from_utf8;
let core_per_socket_out = Command::new("sh")
.arg("-c")
.arg("lscpu | grep socket | cut -b 22-")
.output()
.expect("Failed to detect cpu count");
//println!("{:#?}", core_per_socket_str);
let core_per_socket_str = from_utf8(&core_per_socket_out.stdout).unwrap();
//println!("Number of cores per socket: {}", cps_str);
let core_per_socket: u8 = core_per_socket_str[0..(core_per_socket_str.len() - 1)]
.parse()
.unwrap_or(0);
core_per_socket
}
// future enhancements // future enhancements
// prefetch // prefetch
// long nop (64 nops) // long nop (64 nops)