Skip to content
Snippets Groups Projects
Commit d65f7740 authored by Tim O'Donnell's avatar Tim O'Donnell
Browse files

Switch back to dense layer size 32 from 16

parent bdee8330
No related merge requests found
Showing
with 10 additions and 62 deletions
......@@ -42,9 +42,7 @@
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
],
"layer_sizes": [32],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
"dropout_probability": 0.0,
......
......@@ -40,7 +40,7 @@ time mhcflurry-class1-train-allele-specific-models \
--allele $ALLELES 2>&1 | tee -a LOG.standard.txt &
# Model variations on qualitative + quantitative
for mod in 0local_noL1 0local 2local widelocal dense8 dense32 dense64 noL1 onehot embedding
for mod in 0local_noL1 0local 2local widelocal dense16 dense64 noL1 onehot embedding
do
cp $SCRIPT_DIR/hyperparameters-${mod}.yaml .
mkdir models-${mod}
......
......@@ -37,7 +37,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -37,7 +37,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.0,
"batch_normalization": false,
......
......@@ -47,7 +47,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -42,7 +42,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
8
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
"minibatch_size": 128,
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
......@@ -43,7 +43,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -42,7 +42,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.0,
"batch_normalization": false,
......
......@@ -42,7 +42,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -43,7 +43,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment