Skip to content
Snippets Groups Projects
Commit 9dd1f995 authored by Joseph Sleiman's avatar Joseph Sleiman
Browse files

Cleaned up comments, updated models file to include RNN that has masking feature

parent 95191a4e
Branches main
No related tags found
No related merge requests found
......@@ -4,16 +4,25 @@ from argparse import ArgumentParser
def set_constants(problem):
if problem == "Conway":
Knotind = ["0_1", "conway", "kt"]
elif problem == "5Class":
Knotind = ['0_1', '3_1', '4_1', '5_1', '5_2']
elif problem == "SQRGRN8":
Knotind = ['3_1_3_1', '3_1-3_1', '8_20'] # square knot, granny knot, 8_20
elif problem == "10Crossings":
Knotind = [""]
return Knotind
def getParams():
"""Receive user-input of simulation parameters via the command Line interface (CLI) and Python library argparse.
"""Receive user-input of training parameters via the command Line interface (CLI) and Python library argparse.
Default values are provided if no input is specified.
Returns:
args: Values defining the knot parameters, of specified type.
args: Values defining the knot parameters.
"""
par = ArgumentParser()
......@@ -21,15 +30,15 @@ def getParams():
"-p",
"--problem",
type=str,
default="SQRGRN8",
help="Options: 0_5 or SQRGRN8 or SQRGRN or GRN8 or SQR8",
) # NOTE CHANGE TO SQRGRN8
default="Conway",
help="Options: Conway, 5Class, SQRGRN8, 10Crossings",
)
par.add_argument(
"-d",
"--datatype",
type=str,
default="Writhe",
help="Options: 1DWrithe or Writhe or LD or LC or LCW or XYZ",
help="Options: 1DWrithe, Writhe, Sig_Writhe, LD, LC, LCW, XYZ",
)
par.add_argument(
"-a",
......@@ -53,7 +62,11 @@ def getParams():
help="Type of neural network: FFNN or RNN",
)
par.add_argument(
"-e", "--epochs", type=int, default=1000, help="Set the number of training epochs"
"-e",
"--epochs",
type=int,
default=1000,
help="Set the number of training epochs"
)
args = par.parse_args()
......
......@@ -3,11 +3,17 @@ import tensorflow as tf
def load_dataset(filename, preproc, label):
# Loading the dataset file
dataset = tf.data.experimental.CsvDataset(filename, header=True, field_delim=" ")
dataset = tf.data.TextLineDataset(filename, num_parallel_reads=5)
dataset.filter(filter_function)
# Apply eventual preprocessing steps
dataset = dataset.map(preproc)
dataset = dataset.shuffle(buffer_size)
dataset = dataset.batch(batch_size)
# Create label vector
labels = tf.fill(tf.data.experimental.cardinality(dataset), label)
......@@ -30,3 +36,14 @@ def split_train_test_validation(dataset, train_size, test_size, val_size):
train_dataset = dataset.take(train_size)
test_dataset = dataset.take(test_size)
val_dataset = dataset.take(val_size)
def filter_function(line):
return tf.not_equal(tf.strings.contains(line, '#'))
def set_shape(x):
x.set_shape((Nbeads,dimensions))
return x
def pad_sequences(x):
x = tf.keras.utils.pad_sequences(x, maxlen=1000, value=-100)
return x
import tensorflow as tf
from tensorflow.keras.layers import Input, Masking, LSTM, Dense, Bidirectional
from tensorflow.keras.models import Model
def setup_RNN(RNN_hidden_top, input_shape, output_shape, hidden_activation, opt):
def setup_RNN(input_shape, output_shape, hidden_activation, opt):
max_sequence_length = 1000
mask_value = -100
model = tf.keras.models.Sequential()
#input_shape = (max_sequence_length, dimensions)
input_layer = Input(shape=input_shape)
masked_layer = Masking(mask_value=mask_value)(input_layer)
lstm_layer1 = LSTM(100,
activation=hidden_activation,
return_sequences=True,
recurrent_dropout=0)(masked_layer)
# add input LSTM layer (input_shape=(size_input,))
model.add(tf.keras.layers.LSTM(
RNN_hidden_top[0],
input_shape=input_shape,
activation=hidden_activation,
return_sequences=True,
recurrent_dropout=0))
bidirectional_layer = Bidirectional(LSTM(100,
activation=hidden_activation,
return_sequences=True,
recurrent_dropout=0))(lstm_layer1)
lstm_layer2 = LSTM(100,
activation=hidden_activation,
return_sequences=True,
recurrent_dropout = 0)(bidirectional_layer)
lstm_layer3 = LSTM(100, activation=hidden_activation)(lstm_layer2)
# add bidirectional LSTM layer
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(RNN_hidden_top[1], activation=hidden_activation, return_sequences=True, recurrent_dropout = 0)))
output_layer = Dense(output_shape, activation="softmax")(lstm_layer3)
# add intermediate LSTM layers
for i in range(len(RNN_hidden_top)-3):
model.add(tf.keras.layers.LSTM(
RNN_hidden_top[i+2],
activation=hidden_activation,
return_sequences=True,
recurrent_dropout = 0))
model = Model(inputs=input_layer, outputs=output_layer)
# add final LSTM layer with output only from last memory cell (a la Vandans et al.)
model.add(tf.keras.layers.LSTM(RNN_hidden_top[-1], activation=hidden_activation))
#final output layer with "Nknots" neurons for the "Nknots" knot types
model.add(tf.keras.layers.Dense(output_shape,activation="softmax"))
#loss function compares y_pred to y_true: in this case sparse categoricalcrossentropy
# used for labels that are integers (CategoricalCrossEntropy used for one-hot encoding)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
......@@ -40,11 +44,10 @@ def setup_RNN(RNN_hidden_top, input_shape, output_shape, hidden_activation, opt)
print("Generated RNN model:")
print(model.summary())
return model
return model
def setup_NN(NN_hidden_top, input_shape, output_shape, hidden_activation, opt):
model = tf.keras.models.Sequential()
# model.add(tf.keras.layers.Flatten()) #flattens dimensions of data so its 1D and takes as input
# add input layer (input_shape=(size_input,)) and first hidden layer to NN
model.add(tf.keras.layers.Dense(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment