Skip to content
Snippets Groups Projects
Commit ee242d39 authored by Tim O'Donnell's avatar Tim O'Donnell
Browse files

Switch from 32 hidden layer size to 16 in standard models

parent 55e3cfc1
No related branches found
No related tags found
No related merge requests found
Showing
with 59 additions and 10 deletions
......@@ -42,7 +42,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -36,7 +36,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -36,7 +36,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.0,
"batch_normalization": false,
......
......@@ -46,7 +46,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -41,7 +41,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -41,7 +41,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
64
64
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
8
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
......@@ -42,7 +42,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -41,7 +41,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.0,
"batch_normalization": false,
......
......@@ -41,7 +41,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
......@@ -42,7 +42,7 @@
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment