-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLeakageIdentification.py
More file actions
111 lines (87 loc) · 3.05 KB
/
LeakageIdentification.py
File metadata and controls
111 lines (87 loc) · 3.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import keras.utils.np_utils
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, BatchNormalization
from tensorflow.keras.optimizers import Adam
import numpy
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from keras.utils.np_utils import to_categorical
import h5py
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# TODO: Add path to ASCAD traces
TracePath = "ATMega8515_raw_traces.h5"
# Range to analyze
start_sample = 30000
end_sample = 40000
# Network parameters
nrepochs = 20
batchsize= 1000
target_byte = 7
# Additional parameters
train_traces = 20000
gradCalc = 10000
# Read traces and metadata
f = h5py.File(TracePath, "r")
traces = f["traces"]
plains = f["metadata"]["plaintext"]
numTraces = traces.shape[0]
samplesPerFile = traces.shape[1]
# Sanity check
assert train_traces <= numTraces
assert end_sample <= samplesPerFile
assert start_sample < end_sample
# Select relevant data
train_x = traces[:, start_sample:end_sample]
train_y = plains[:,target_byte]
x = numpy.arange(start_sample, end_sample, 1)
# Normalize input data
scaler = StandardScaler()
train_x = scaler.fit_transform(train_x)
# Callback to perform sensitivity analysis after each epoch
class SACallback(tf.keras.callbacks.Callback):
def __init__(self, globSum):
super().__init__()
self.globSum = globSum
self.inp = tf.Variable(train_x[:gradCalc], dtype=tf.float32)
self.trues = to_categorical(train_y[:gradCalc])
self.trues_tf = tf.Variable(self.trues, dtype=tf.float32)
def on_epoch_end(self, epoch, logs=None):
with tf.GradientTape() as tape:
tape.watch(self.inp)
preds = self.model(self.inp)
loss = tf.keras.losses.mean_squared_error(self.trues_tf, preds)
grads = tape.gradient(loss, self.inp)
self.globSum = self.globSum + numpy.sum(numpy.abs((grads * train_x[:gradCalc])), axis=0)
def on_train_end(self, logs=None):
resfile = open("weights_{}_{}_{}.dat".format(target_byte, start_sample, end_sample), "wb")
resfile.write(self.globSum)
resfile.close()
print(f'Peak Position: {self.globSum.argmax()+start_sample}')
fig, ax = plt.subplots(1)
ax.set_title("Sensitivity Analysis Byte {}".format(target_byte))
ax.plot(x, self.globSum, linewidth=0.6)
ax.set_xlim([start_sample, end_sample])
fig.show()
plt.close()
grads_sum = 0
sa_callback = SACallback(grads_sum)
# Define model
model = Sequential(
[keras.Input(shape=(len(train_x[0]),)),
Dense(50, activation='relu'),
Dense(1600, activation='relu'),
Dense(1600, activation='relu'),
Dense(100, activation='relu'),
Dense(256, activation='softmax')]
)
model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['accuracy'])
# Create One-Hot encoded labels
trues = to_categorical(train_y)
# Start training
model.fit(train_x, trues,
epochs=nrepochs,
batch_size=batchsize,
verbose=1,
callbacks=[sa_callback])