feat: save mfcc_data_hard and make a comparison

plot between their training progress
This commit is contained in:
Skudalen 2021-07-20 15:02:57 +02:00
parent d7c733e54f
commit 53ef95f2fe
6 changed files with 1249508 additions and 211 deletions

View File

@ -17,7 +17,9 @@ import statistics
import csv
# Path to json file that stores MFCCs and subject labels for each processed sample
DATA_PATH_MFCC = str(Path.cwd()) + "/mfcc_data.json"
SOFT_DATA_PATH_MFCC = str(Path.cwd()) + "/mfcc_data_soft.json"
HARD_DATA_PATH_MFCC = str(Path.cwd()) + "/mfcc_data_hard.json"
# Loads data from the json file and reshapes X_data(samples, 1, 208) and y_data(samples, 1)
# Input: JSON path
@ -40,212 +42,9 @@ def load_data_from_json(data_path, nr_classes):
return X, y, session_lengths
# Plots the training history with two subplots. First training and test accuracy, and then
# loss with respect to epochs
# Input: History(from model.fit(...))
# Ouput: None -> plot
def plot_train_history(history, val_data=False):
fig, axs = plt.subplots(2)
# create accuracy sublpot
axs[0].plot(history.history["accuracy"], label="train accuracy")
if val_data:
axs[0].plot(history.history["val_accuracy"], label="validation accuracy")
axs[0].set_ylabel("Accuracy")
axs[0].legend(loc="lower right")
axs[0].set_title("Accuracy eval")
# create error sublpot
axs[1].plot(history.history["loss"], label="train error")
if val_data:
axs[1].plot(history.history["val_loss"], label="validation error")
axs[1].set_ylabel("Error")
axs[1].set_xlabel("Epoch")
axs[1].legend(loc="upper right")
axs[1].set_title("Error eval")
plt.show()
# Plots the training history of four networks inverse cross-validated
# Input: data, nr of sessions in total, batch_size and epochs
# Ouput: None -> plot
def plot_4_x_inverse_cross_val(X, y, session_lengths, nr_sessions, batch_size=64, epochs=30):
history_dict = {'GRU': [],
'LSTM': [],
'FFN': [],
'CNN_1D': []}
for i in range(nr_sessions):
X_test_session, X_train_session, y_test_session, y_train_session = prepare_datasets_sessions(X, y, session_lengths, i)
model_GRU = GRU(input_shape=(1, 208))
GRU_h = train(model_GRU, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['GRU'].append(GRU_h)
del model_GRU
K.clear_session()
model_LSTM = LSTM(input_shape=(1, 208))
LSTM_h = train(model_LSTM, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['LSTM'].append(LSTM_h)
del model_LSTM
K.clear_session()
model_FFN = FFN(input_shape=(1, 208))
FFN_h = train(model_FFN, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['FFN'].append(FFN_h)
del model_FFN
K.clear_session()
model_CNN_1D = CNN_1D(input_shape=(208, 1))
X_train_session = np.reshape(X_train_session, (X_train_session.shape[0], 208, 1))
X_test_session = np.reshape(X_test_session, (X_test_session.shape[0], 208, 1))
CNN_1D_h = train(model_CNN_1D, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['CNN_1D'].append(CNN_1D_h)
del model_CNN_1D
K.clear_session()
fig, axs = plt.subplots(2, 2, sharey=True)
plt.ylim(0, 1)
# GRU plot:
axs[0, 0].plot(history_dict['GRU'][0].history["accuracy"])
axs[0, 0].plot(history_dict['GRU'][1].history["accuracy"], 'tab:orange')
axs[0, 0].plot(history_dict['GRU'][2].history["accuracy"], 'tab:green')
axs[0, 0].plot(history_dict['GRU'][3].history["accuracy"], 'tab:red')
axs[0, 0].set_title('GRU')
# LSTM plot:
axs[0, 1].plot(history_dict['LSTM'][0].history["accuracy"])
axs[0, 1].plot(history_dict['LSTM'][1].history["accuracy"], 'tab:orange')
axs[0, 1].plot(history_dict['LSTM'][2].history["accuracy"], 'tab:green')
axs[0, 1].plot(history_dict['LSTM'][3].history["accuracy"], 'tab:red')
axs[0, 1].set_title('LSTM')
# FFN plot:
axs[1, 0].plot(history_dict['FFN'][0].history["accuracy"])
axs[1, 0].plot(history_dict['FFN'][1].history["accuracy"], 'tab:orange')
axs[1, 0].plot(history_dict['FFN'][2].history["accuracy"], 'tab:green')
axs[1, 0].plot(history_dict['FFN'][3].history["accuracy"], 'tab:red')
axs[1, 0].set_title('FFN')
# CNN_1D plot:
axs[1, 1].plot(history_dict['CNN_1D'][0].history["accuracy"])
axs[1, 1].plot(history_dict['CNN_1D'][1].history["accuracy"], 'tab:orange')
axs[1, 1].plot(history_dict['CNN_1D'][2].history["accuracy"], 'tab:green')
axs[1, 1].plot(history_dict['CNN_1D'][3].history["accuracy"], 'tab:red')
axs[1, 1].set_title('CNN_1D')
for ax in axs.flat:
ax.set(xlabel='Epochs', ylabel='Accuracy')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
plt.show()
# Plots the average training history of four networks inverse cross-validated
# Input: data, nr of sessions in total, batch_size and epochs
# Ouput: None -> plot
def plot_4_x_average_val(X, y, session_lengths, nr_sessions, batch_size=64, epochs=30):
history_dict = {'GRU_train': [],
'LSTM_train': [],
'FFN_train': [],
'CNN_1D_train': []}
history_dict_val = {'GRU_val': [],
'LSTM_val': [],
'FFN_val': [],
'CNN_1D_val': []}
for i in range(nr_sessions):
# Prepare data
X_val_session, X_train_session, y_val_session, y_train_session = prepare_datasets_sessions(X, y, session_lengths, i)
# ----- DATA HANDLING ------
# GRU
model_GRU = GRU(input_shape=(1, 208))
GRU_h = train(model_GRU, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['GRU_train'].append(GRU_h.history['accuracy'])
history_dict_val['GRU_val'].append(GRU_h.history['val_accuracy'])
del model_GRU
K.clear_session()
# LSTM
model_LSTM = LSTM(input_shape=(1, 208))
LSTM_h = train(model_LSTM, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['LSTM_train'].append(LSTM_h.history['accuracy'])
history_dict_val['LSTM_val'].append(LSTM_h.history['val_accuracy'])
del model_LSTM
K.clear_session()
# FFN
model_FFN = FFN(input_shape=(1, 208))
FFN_h = train(model_FFN, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['FFN_train'].append(FFN_h.history['accuracy'])
history_dict_val['FFN_val'].append(FFN_h.history['val_accuracy'])
del model_FFN
K.clear_session()
# CNN_1D
model_CNN_1D = CNN_1D(input_shape=(208, 1))
X_train_session = np.reshape(X_train_session, (X_train_session.shape[0], 208, 1))
X_val_session = np.reshape(X_val_session, (X_val_session.shape[0], 208, 1))
CNN_1D_h = train(model_CNN_1D, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['CNN_1D_train'].append(CNN_1D_h.history['accuracy'])
history_dict_val['CNN_1D_val'].append(CNN_1D_h.history['val_accuracy'])
del model_CNN_1D
K.clear_session()
# Averaging out session training for each network
for key in history_dict:
history_dict[key] = list(np.average([x, y, z, c]) for x, y, z, c in list(zip(*history_dict[key])))
for key in history_dict_val:
history_dict_val[key] = list(np.average([x, y, z, c]) for x, y, z, c in list(zip(*history_dict_val[key])))
'''
history_dict = {'GRU_train': [0.5, 0.8],
'LSTM_train': [0.5, 0.9],
'FFN_train': [0.75, 0.8],
'CNN_1D_train': [0.8, 0.95]}
history_dict_val = {'GRU_val': [0.5, 0.8],
'LSTM_val': [0.5, 0.9],
'FFN_val': [0.75, 0.8],
'CNN_1D_val': [0.8, 0.95]}
'''
# Plot:
fig, axs = plt.subplots(2, sharey=True)
plt.ylim(0, 1)
plt.subplots_adjust(hspace=1.0, top=0.85, bottom=0.15, right=0.75)
fig.suptitle('Avarage accuracy with cross-session-training', fontsize=16)
axs[0].plot(history_dict['GRU_train'], label='GRU')
axs[0].plot(history_dict['LSTM_train'], 'tab:orange', label='LSTM')
axs[0].plot(history_dict['FFN_train'], 'tab:green', label='FFN')
axs[0].plot(history_dict['CNN_1D_train'], 'tab:red', label='CNN_1D')
axs[0].set_title('Training accuracy')
axs[1].plot(history_dict_val['GRU_val'], label='GRU')
axs[1].plot(history_dict_val['LSTM_val'], 'tab:orange', label='LSTM')
axs[1].plot(history_dict_val['FFN_val'], 'tab:green', label='FFN')
axs[1].plot(history_dict_val['CNN_1D_val'], 'tab:red', label='CNN_1D')
axs[1].set_title('Validation accuracy')
for ax in axs.flat:
ax.set(xlabel='Epochs', ylabel='Accuracy')
plt.legend(bbox_to_anchor=(1.05, 1.5), title='Networks', loc='center left')
plt.show()
# Takes in data and labels, and splits it into train, validation and test sets by percentage
# Input: Data, labels, whether to shuffle, % validatiion, % test
# Ouput: X_train, X_validation, X_test, y_train, y_validation, y_test
@ -525,7 +324,291 @@ def prediction_csv_logger(X, y, model_name, model, session_nr, custom_path=None)
data = zip(X, layerOutput, y)
writer.writerows(data)
csv_file.close()
def get_session_info(session_lengths_soft, session_lengths_hard):
print('Soft: {}\nHard: {}'.format(session_lengths_soft, session_lengths_hard))
soft_avg_sess = np.average(list(np.average(x) for x in session_lengths_soft))
soft_avg_sub = np.sum(list(np.average(x) for x in session_lengths_soft))
hard_avg_sub = np.sum(list(np.average(x) for x in session_lengths_hard))
hard_avg_sess = np.average(list(np.average(x) for x in session_lengths_hard))
print('Avg session:', soft_avg_sess, hard_avg_sess)
print('Avg sub:', soft_avg_sub, hard_avg_sub)
# ----- PLOTS ------
# Plots the training history with two subplots. First training and test accuracy, and then
# loss with respect to epochs
# Input: History(from model.fit(...))
# Ouput: None -> plot
def plot_train_history(history, val_data=False):
fig, axs = plt.subplots(2)
# create accuracy sublpot
axs[0].plot(history.history["accuracy"], label="train accuracy")
if val_data:
axs[0].plot(history.history["val_accuracy"], label="validation accuracy")
axs[0].set_ylabel("Accuracy")
axs[0].legend(loc="lower right")
axs[0].set_title("Accuracy eval")
# create error sublpot
axs[1].plot(history.history["loss"], label="train error")
if val_data:
axs[1].plot(history.history["val_loss"], label="validation error")
axs[1].set_ylabel("Error")
axs[1].set_xlabel("Epoch")
axs[1].legend(loc="upper right")
axs[1].set_title("Error eval")
plt.show()
# Plots the training history of four networks inverse cross-validated
# Input: data, nr of sessions in total, batch_size and epochs
# Ouput: None -> plot
def plot_comp_session_spread(X, y, session_lengths, nr_sessions, batch_size=64, epochs=30):
history_dict = {'GRU': [],
'LSTM': [],
'FFN': [],
'CNN_1D': []}
for i in range(nr_sessions):
X_test_session, X_train_session, y_test_session, y_train_session = prepare_datasets_sessions(X, y, session_lengths, i)
model_GRU = GRU(input_shape=(1, 208))
GRU_h = train(model_GRU, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['GRU'].append(GRU_h)
del model_GRU
K.clear_session()
model_LSTM = LSTM(input_shape=(1, 208))
LSTM_h = train(model_LSTM, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['LSTM'].append(LSTM_h)
del model_LSTM
K.clear_session()
model_FFN = FFN(input_shape=(1, 208))
FFN_h = train(model_FFN, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['FFN'].append(FFN_h)
del model_FFN
K.clear_session()
model_CNN_1D = CNN_1D(input_shape=(208, 1))
X_train_session = np.reshape(X_train_session, (X_train_session.shape[0], 208, 1))
X_test_session = np.reshape(X_test_session, (X_test_session.shape[0], 208, 1))
CNN_1D_h = train(model_CNN_1D, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs)
history_dict['CNN_1D'].append(CNN_1D_h)
del model_CNN_1D
K.clear_session()
fig, axs = plt.subplots(2, 2, sharey=True)
plt.ylim(0, 1)
# GRU plot:
axs[0, 0].plot(history_dict['GRU'][0].history["accuracy"])
axs[0, 0].plot(history_dict['GRU'][1].history["accuracy"], 'tab:orange')
axs[0, 0].plot(history_dict['GRU'][2].history["accuracy"], 'tab:green')
axs[0, 0].plot(history_dict['GRU'][3].history["accuracy"], 'tab:red')
axs[0, 0].set_title('GRU')
# LSTM plot:
axs[0, 1].plot(history_dict['LSTM'][0].history["accuracy"])
axs[0, 1].plot(history_dict['LSTM'][1].history["accuracy"], 'tab:orange')
axs[0, 1].plot(history_dict['LSTM'][2].history["accuracy"], 'tab:green')
axs[0, 1].plot(history_dict['LSTM'][3].history["accuracy"], 'tab:red')
axs[0, 1].set_title('LSTM')
# FFN plot:
axs[1, 0].plot(history_dict['FFN'][0].history["accuracy"])
axs[1, 0].plot(history_dict['FFN'][1].history["accuracy"], 'tab:orange')
axs[1, 0].plot(history_dict['FFN'][2].history["accuracy"], 'tab:green')
axs[1, 0].plot(history_dict['FFN'][3].history["accuracy"], 'tab:red')
axs[1, 0].set_title('FFN')
# CNN_1D plot:
axs[1, 1].plot(history_dict['CNN_1D'][0].history["accuracy"])
axs[1, 1].plot(history_dict['CNN_1D'][1].history["accuracy"], 'tab:orange')
axs[1, 1].plot(history_dict['CNN_1D'][2].history["accuracy"], 'tab:green')
axs[1, 1].plot(history_dict['CNN_1D'][3].history["accuracy"], 'tab:red')
axs[1, 1].set_title('CNN_1D')
for ax in axs.flat:
ax.set(xlabel='Epochs', ylabel='Accuracy')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
plt.show()
# Plots the average training history of four networks inverse cross-validated
# Input: data, nr of sessions in total, batch_size and epochs
# Ouput: None -> plot
def plot_comp_accuracy(X, y, session_lengths, nr_sessions, batch_size=64, epochs=30):
#'''
history_dict = {'GRU_train': [],
'LSTM_train': [],
'FFN_train': [],
'CNN_1D_train': []}
history_dict_val = {'GRU_val': [],
'LSTM_val': [],
'FFN_val': [],
'CNN_1D_val': []}
for i in range(nr_sessions):
# Prepare data
X_val_session, X_train_session, y_val_session, y_train_session = prepare_datasets_sessions(X, y, session_lengths, i)
# GRU
model_GRU = GRU(input_shape=(1, 208))
GRU_h = train(model_GRU, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['GRU_train'].append(GRU_h.history['accuracy'])
history_dict_val['GRU_val'].append(GRU_h.history['val_accuracy'])
del model_GRU
K.clear_session()
# LSTM
model_LSTM = LSTM(input_shape=(1, 208))
LSTM_h = train(model_LSTM, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['LSTM_train'].append(LSTM_h.history['accuracy'])
history_dict_val['LSTM_val'].append(LSTM_h.history['val_accuracy'])
del model_LSTM
K.clear_session()
# FFN
model_FFN = FFN(input_shape=(1, 208))
FFN_h = train(model_FFN, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['FFN_train'].append(FFN_h.history['accuracy'])
history_dict_val['FFN_val'].append(FFN_h.history['val_accuracy'])
del model_FFN
K.clear_session()
# CNN_1D
model_CNN_1D = CNN_1D(input_shape=(208, 1))
X_train_session = np.reshape(X_train_session, (X_train_session.shape[0], 208, 1))
X_val_session = np.reshape(X_val_session, (X_val_session.shape[0], 208, 1))
CNN_1D_h = train(model_CNN_1D, X_train_session, y_train_session, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_session, y_validation=y_val_session)
history_dict['CNN_1D_train'].append(CNN_1D_h.history['accuracy'])
history_dict_val['CNN_1D_val'].append(CNN_1D_h.history['val_accuracy'])
del model_CNN_1D
K.clear_session()
# Averaging out session training for each network
for key in history_dict:
history_dict[key] = list(np.average([x, y, z, c]) for x, y, z, c in list(zip(*history_dict[key])))
for key in history_dict_val:
history_dict_val[key] = list(np.average([x, y, z, c]) for x, y, z, c in list(zip(*history_dict_val[key])))
'''
history_dict = {'GRU_train': [0.5, 0.8, 0.4, 0.8],
'LSTM_train': [0.5, 0.9, 0.3, 0.9],
'FFN_train': [0.75, 0.8, 0.2, 0.7],
'CNN_1D_train': [0.8, 0.95, 0.1, 0.6]}
history_dict_val = {'GRU_val': [0.5, 0.8, 0.4, 0.8],
'LSTM_val': [0.5, 0.9, 0.4, 0.8],
'FFN_val': [0.75, 0.8, 0.4, 0.8],
'CNN_1D_val': [0.8, 0.95, 0.4, 0.8]}
'''
# Plot:
fig, axs = plt.subplots(2, sharey=True)
plt.ylim(0, 1)
plt.subplots_adjust(hspace=1.0, top=0.85, bottom=0.15, right=0.75)
fig.suptitle('Average accuracy with cross-session-training', fontsize=16)
axs[0].plot(history_dict['CNN_1D_train'], ':', label='CNN_1D')
axs[0].plot(history_dict['LSTM_train'], '--', label='LSTM')
axs[0].plot(history_dict['GRU_train'], '-', label='GRU')
axs[0].plot(history_dict['FFN_train'], '-.', label='FFN')
axs[0].set_title('Training accuracy')
axs[1].plot(history_dict_val['CNN_1D_val'], ':', label='CNN_1D')
axs[1].plot(history_dict_val['LSTM_val'], '--', label='LSTM')
axs[1].plot(history_dict_val['GRU_val'], '-', label='GRU')
axs[1].plot(history_dict_val['FFN_val'], '-.', label='FFN')
axs[1].set_title('Validation accuracy')
for ax in axs.flat:
ax.set(xlabel='Epochs', ylabel='Accuracy')
plt.legend(bbox_to_anchor=(1.05, 1.5), title='Models used\n', loc='center left')
plt.style.use('seaborn-dark-palette')
plt.show()
def plot_comp_SoftHard(X_soft, y_soft, X_hard, y_hard, session_lengths_soft, session_lengths_hard, nr_sessions, batch_size=64, epochs=30):
#'''
train_dict = {'SOFT':[], 'HARD':[]}
val_dict = {'SOFT':[], 'HARD':[]}
for i in range(nr_sessions):
# Prepare data
X_val_soft, X_train_soft, y_val_soft, y_train_soft = prepare_datasets_sessions(X_soft, y_soft, session_lengths_soft, i)
X_val_hard, X_train_hard, y_val_hard, y_train_hard = prepare_datasets_sessions(X_hard, y_hard, session_lengths_hard, i)
X_train_soft = np.reshape(X_train_soft, (X_train_soft.shape[0], 208, 1))
X_val_soft = np.reshape(X_val_soft, (X_val_soft.shape[0], 208, 1))
X_train_hard = np.reshape(X_train_hard, (X_train_hard.shape[0], 208, 1))
X_val_hard = np.reshape(X_val_hard, (X_val_hard.shape[0], 208, 1))
# CNN_1D SOFT
model_CNN_1D = CNN_1D(input_shape=(208, 1))
CNN_1D_h = train(model_CNN_1D, X_train_soft, y_train_soft, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_soft, y_validation=y_val_soft)
train_dict['SOFT'] = list(CNN_1D_h.history['accuracy'])
val_dict['SOFT'] = list(CNN_1D_h.history['val_accuracy'])
del model_CNN_1D
K.clear_session()
# CNN_1D HARD
model_CNN_1D = CNN_1D(input_shape=(208, 1))
CNN_1D_h = train(model_CNN_1D, X_train_hard, y_train_hard, 1, batch_size=batch_size, epochs=epochs,
X_validation=X_val_hard, y_validation=y_val_hard)
train_dict['HARD'] = list(CNN_1D_h.history['accuracy'])
val_dict['HARD'] = list(CNN_1D_h.history['val_accuracy'])
del model_CNN_1D
K.clear_session()
'''
history_dict = {'GRU_train': [0.5, 0.8, 0.4, 0.8],
'LSTM_train': [0.5, 0.9, 0.3, 0.9],
'FFN_train': [0.75, 0.8, 0.2, 0.7],
'CNN_1D_train': [0.8, 0.95, 0.1, 0.6]}
history_dict_val = {'GRU_val': [0.5, 0.8, 0.4, 0.8],
'LSTM_val': [0.5, 0.9, 0.4, 0.8],
'FFN_val': [0.75, 0.8, 0.4, 0.8],
'CNN_1D_val': [0.8, 0.95, 0.4, 0.8]}
'''
# Plot:
fig, axs = plt.subplots(2, sharey=True)
plt.ylim(0, 1)
plt.subplots_adjust(hspace=1.0, top=0.85, bottom=0.15, right=0.75)
fig.suptitle('Model training and validation with SOFT/HARD data', fontsize=16)
axs[0].plot(train_dict['SOFT'], ':', label='CNN_1D SOFT')
axs[0].plot(train_dict['HARD'], '--', label='CNN_1D HARD')
axs[0].set_title('Training accuracy')
axs[1].plot(val_dict['SOFT'], ':', label='CNN_1D SOFT')
axs[1].plot(val_dict['HARD'], '--', label='CNN_1D HARD')
axs[1].set_title('Validation accuracy')
for ax in axs.flat:
ax.set(xlabel='Epochs', ylabel='Accuracy')
plt.legend(bbox_to_anchor=(1.05, 1.5), title='Models used\n', loc='center left')
plt.style.use('seaborn-dark-palette')
plt.show()
# ----- MODELS ------
@ -596,7 +679,8 @@ if __name__ == "__main__":
# X.shape = (2806, 1, 208)
# y.shape = (2806, nr_subjects)
# session_lengths.shape = (nr_subjects, nr_sessions)
X, y, session_lengths = load_data_from_json(DATA_PATH_MFCC, nr_classes=5)
X_soft, y_soft, session_lengths_soft = load_data_from_json(SOFT_DATA_PATH_MFCC, nr_classes=5)
X_hard, y_hard, session_lengths_hard = load_data_from_json(HARD_DATA_PATH_MFCC, nr_classes=5)
# Parameters:
NR_SUBJECTS = 5
@ -615,7 +699,7 @@ if __name__ == "__main__":
# y_train.shape = (2806-y_test, nr_subjects)
# y_test.shape = (y_test(from session nr. ?), nr_subjects)
X_train, X_test, y_train, y_test = prepare_datasets_sessions(X, y, session_lengths, TEST_SESSION_NR)
X_train, X_test, y_train, y_test = prepare_datasets_sessions(X_soft, y_soft, session_lengths_soft, TEST_SESSION_NR)
'''
@ -714,6 +798,7 @@ if __name__ == "__main__":
# ----- PLOTTING ------
#plot_4xinverse_cross_val(X, y, session_lengths, NR_SESSIONS, epochs=30)
plot_4_x_average_val(X, y, session_lengths, NR_SESSIONS, epochs=30)
#plot_4_x_average_val(X_soft, y_soft, session_lengths, NR_SESSIONS, epochs=30)
#plot_comp_SoftHard(X_soft, y_soft, X_hard, y_hard, session_lengths_soft, session_lengths_hard, NR_SESSIONS, epochs=30)

View File

@ -249,14 +249,16 @@ if __name__ == "__main__":
soft_dir_name = 'Exp20201205_2myo_softType'
hard_dir_name = 'Exp20201205_2myo_hardType'
JSON_TEST_NAME = 'TEST_mfcc.json'
JSON_FILE_SOFT = 'mfcc_data_soft.json'
JSON_FILE_HARD = 'mfcc_data_hard.json'
csv_handler = CSV_handler(NR_SUBJECTS, NR_SESSIONS)
dict = csv_handler.load_data('soft', soft_dir_name)
nn_handler = NN_handler(csv_handler)
nn_handler.store_mfcc_samples()
nn_handler.save_json_mfcc(JSON_TEST_NAME)
nn_handler.save_json_mfcc(JSON_FILE_SOFT)

View File

@ -34,7 +34,7 @@ Scripts to handle CSV files composed by 2 * 8 EMG sensors(left & right) devided
1. Clone the repo
2. Place the data files in the working directory
3. Place the data files within the `data`-folder
(format: /`data`/<datatype>/<subject-folder+ID>/<session-folder>/<left/right-CSV-files>)
(format: `/data/<datatype>/<subject-folder+ID>/<session-folder>/<left/right-CSV-files>`)
4. Assuming NN analysis:
1. Create a `CSV_handler` object
2. Load data with `load_data(CSV_handler, <datatype>)`

657099
mfcc_data_hard.json Normal file

File diff suppressed because it is too large Load Diff

592111
mfcc_data_soft.json Normal file

File diff suppressed because it is too large Load Diff