Commit 63aff984 authored by jean Ibarz's avatar jean Ibarz
Browse files

Input of the model has been standardized to BWHF (Batch, Width, Height,...

Input of the model has been standardized to BWHF (Batch, Width, Height, Features) shape. RandomScale2DLayer and RandomShift2DLayer has been incorporated into the model in order to apply these two steps of data augmentation on-the-fly during the training of the model, but not during the test of the model.
parent 557f555b
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D
from tensorflow.keras.layers import LeakyReLU
from core.layers import RandomShift2DLayer, RandomScale2DLayer
def default_model_creator():
model = tf.keras.Sequential([
RandomScale2DLayer(minval=-10, maxval=10),
RandomShift2DLayer(minval=0, maxval=100, axis=1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=320, activation='relu'),
tf.keras.layers.Dropout(rate=0.05),
......@@ -26,26 +26,3 @@ def default_model_creator():
])
return model
def conv_model_creator():
model = Sequential()
model.add(Conv1D(16, 3, padding='same', input_shape=(40611, 2)))
model.add(LeakyReLU())
model.add(Conv1D(16, 3, padding='same'))
model.add(LeakyReLU())
model.add(MaxPooling1D(pool_size=3))
model.add(Dropout(0.25))
model.add(Conv1D(16, 3, padding='same'))
model.add(LeakyReLU())
model.add(Conv1D(16, 3, padding='same'))
model.add(LeakyReLU())
model.add(MaxPooling1D(pool_size=3))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
return model
......@@ -4,7 +4,7 @@ from core.utils import load_ircam_hrirs_data, split_dataset, generate_signals, g
import numpy as np
import tensorflow as tf
from core.logger import experiment_logger
from core.model import default_model_creator, conv_model_creator
from core.model import default_model_creator
import pandas as pd
from core.metrics import MeanAbsoluteAzimuthError
......@@ -30,7 +30,7 @@ exp_config = ExperimentConfiguration({'n_augment': 1,
'stimulus': 'dirac', # possible values: ['dirac', 'any_sound']
'train_subject_ids': ALL_SUBJECTS_EXCEPT_1059[0:30],
'test_subject_ids': [1059],
'movement': 'deterministic', # possible values: ['none', 'random', 'deterministic']
'movement': 'none', # possible values: ['none', 'random', 'deterministic']
'sessions': [0]})
if __name__ == '__main__':
......@@ -72,7 +72,8 @@ if __name__ == '__main__':
valid_train_subject_ids = 'train_subject_ids' in exp_config.keys() and exp_config[
'train_subject_ids'] is not None and len(exp_config['train_subject_ids']) > 0
valid_test_subject_ids = 'test_subject_ids' in exp_config.keys() and exp_config['test_subject_ids'] is not None and len(
valid_test_subject_ids = 'test_subject_ids' in exp_config.keys() and exp_config[
'test_subject_ids'] is not None and len(
exp_config['test_subject_ids']) > 0
if valid_train_subject_ids and valid_test_subject_ids:
train_subject_ids = exp_config['train_subject_ids']
......@@ -139,6 +140,11 @@ if __name__ == '__main__':
sounds=test_sounds,
n_samples=n_samples,
n_augment=1)
# Expand dimensions of arrays to comply with the stanrdized shape BWHF (Batch, Width, Height, Features).
# Here, width=time axis, and Height=channel (left or right)
training_signals = np.expand_dims(training_signals, axis=-1)
test_signals = np.expand_dims(test_signals, axis=-1)
model.fit(x=training_signals, y=training_labels, batch_size=batch_size, epochs=exp_config['n_epochs'],
validation_data=(test_signals, test_labels))
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment