Skip to content

Commit

Permalink
update readme
Browse files Browse the repository at this point in the history
  • Loading branch information
ebursztein committed Oct 28, 2021
1 parent 0768ec2 commit f896e85
Show file tree
Hide file tree
Showing 6 changed files with 290 additions and 89 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ scald/datasets/
scald/archive/
scald/deprecated/
scald/tmp/

*.zip
.vscode/
# Byte-compiled / optimized / DLL files
__pycache__/
Expand Down
14 changes: 7 additions & 7 deletions scaaml/intro/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,13 @@ def create_dataset(filepattern,
cprint('|-x:%s' % str(x.shape), 'green')

# make it a tf dataset
cprint("building tf dataset", 'magenta')
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset.cache()
if is_training:
dataset = dataset.shuffle(shuffle_size, reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return dataset
# cprint("building tf dataset", 'magenta')

This comment has been minimized.

Copy link
@rubidijum

rubidijum Aug 22, 2022

Hi @ebursztein , why was this commented out, in article it was mentioned that shuffling is required for model convergence?

This comment has been minimized.

Copy link
@kralka

kralka Sep 2, 2022

Collaborator

Sorry for the late reply. Keras fit should do the shuffling for us, since we are not passing an iterator or a tf.data.Dataset.

See https://keras.io/api/models/model_training_apis/#fit-method shuffle parameter.

This comment has been minimized.

Copy link
@rubidijum

rubidijum Sep 3, 2022

Oh, thank you, that makes sense :)

# dataset = tf.data.Dataset.from_tensor_slices((x, y))
# dataset.cache()
# if is_training:
# dataset = dataset.shuffle(shuffle_size, reshuffle_each_iteration=True)
# dataset = dataset.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return (x, y)


def list_shards(filepattern, num_shards):
Expand Down
2 changes: 1 addition & 1 deletion scaaml_intro/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ In order to run the notebooks/train models you need to download the following da

| Filename | What it is | Download size | Expected Location | SHAS256 |
| -------------------------------------------------------------------------------------- | --------------------------------------------------------- | :-----------: | ----------------- | ---------------------------------------------------------------- |
| [datasets.zip](https://storage.googleapis.com/scaaml-public/scaaml_intro/datasets.zip) | TinyAES train & test datasets | 7GB | `datasets/` | 4bf2c6defb79b40b30f01f488e83762396b56daad14a694f64916be2b665b2f8 |
| [datasets.zip](https://storage.googleapis.com/scaaml-public/scaaml_intro/datasets.zip) | TinyAES train & test datasets | 8.2GB | `datasets/` | 4bf2c6defb79b40b30f01f488e83762396b56daad14a694f64916be2b665b2f8 |
| [models.zip](https://storage.googleapis.com/scaaml-public/scaaml_intro/models.zip) | TinyAES 48 pretrained models - 3 attack points * 16 bytes | 312MB | `models/` | 17d7d32cca0ac0db157ae1f5696f6c64bba6d753a8f33802d0d9614bb07d3d9b |
| [logs.zip](https://storage.googleapis.com/scaaml-public/scaaml_intro/logs.zip) | Tensorboard training logs (optional) | 616MB | `logs` | 5b2f43f89990653d64820cca61f15fc6818ee674ae4cc2b4f235cfd9a48f3b28 |

Expand Down
347 changes: 274 additions & 73 deletions scaaml_intro/key_recovery_demo.ipynb

Large diffs are not rendered by default.

12 changes: 5 additions & 7 deletions scaaml_intro/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def train_model(config):
for attack_byte in config['attack_bytes']:
for attack_point in config['attack_points']:

g_train = create_dataset(
x_train, y_train = create_dataset(
TRAIN_GLOB,
batch_size=BATCH_SIZE,
attack_point=attack_point,
Expand All @@ -46,7 +46,7 @@ def train_model(config):
max_trace_length=config['max_trace_len'],
is_training=True)

g_test = create_dataset(
x_test, y_test = create_dataset(
TEST_GLOB,
batch_size=BATCH_SIZE,
attack_point=attack_point,
Expand All @@ -57,9 +57,7 @@ def train_model(config):
is_training=False)

# infers shape
for data in g_test.take(1):
x, y = data
input_shape = x.shape[1:]
input_shape = x_train.shape[1:]

# reset graph and load a new model
K.clear_session()
Expand All @@ -85,8 +83,8 @@ def train_model(config):
TensorBoard(log_dir='logs/' + stub, update_freq='batch')
]

model.fit(g_train,
validation_data=g_test,
model.fit(x_train, y_train,
validation_data=(x_test, y_test),
verbose=1,
epochs=config['epochs'],
callbacks=cb)
Expand Down
2 changes: 2 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
"tensorflow>=2.2.0",
"future-fstrings",
"pygments",
"chipwhisperer",
"scipy"
],
package_data={"": ["*.pickle"]},
classifiers=[
Expand Down

0 comments on commit f896e85

Please sign in to comment.