Skip to content
Snippets Groups Projects
Commit 13c509e2 authored by Tim O'Donnell's avatar Tim O'Donnell
Browse files

update models_class1_experiments1

parent 0cfd7e0d
No related branches found
No related tags found
No related merge requests found
......@@ -23,6 +23,8 @@ git status
cd $SCRATCH_DIR/$DOWNLOAD_NAME
ALLELES="HLA-A*01:01 HLA-A*02:01 HLA-A*02:03 HLA-A*02:07 HLA-A*03:01 HLA-A*11:01 HLA-A*24:02 HLA-A*29:02 HLA-A*31:01 HLA-A*68:02 HLA-B*07:02 HLA-B*15:01 HLA-B*35:01 HLA-B*44:02 HLA-B*44:03 HLA-B*51:01 HLA-B*54:01 HLA-B*57:01"
# Standard architecture on quantitative only
cp $SCRIPT_DIR/hyperparameters-standard.json .
mkdir models-standard-quantitative
......@@ -31,7 +33,8 @@ time mhcflurry-class1-train-allele-specific-models \
--only-quantitative \
--hyperparameters hyperparameters-standard.json \
--out-models-dir models-standard-quantitative \
--min-measurements-per-allele 100 &
--percent-rank-calibration-num-peptides-per-length 0 \
--alleles $ALLELES &
# Model variations on qualitative + quantitative
for mod in 0local_noL1 0local 1local dense16 dense64 noL1
......@@ -42,7 +45,8 @@ do
--data "$(mhcflurry-downloads path data_curated)/curated_training_data.csv.bz2" \
--hyperparameters hyperparameters-${mod}.json \
--out-models-dir models-${mod} \
--min-measurements-per-allele 100 &
--percent-rank-calibration-num-peptides-per-length 0 \
--alleles $ALLELES &
done
wait
......
[
{
"n_models": 12,
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
"random_negative_rate": 0.0,
"random_negative_constant": 25,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
"use_embedding": false,
"kmer_size": 15,
"batch_normalization": false,
"locally_connected_layers": [],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"dense_layer_l1_regularization": 0.001,
"dropout_probability": 0.0
}
]
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
[
{
"n_models": 12,
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
"random_negative_rate": 0.0,
"random_negative_constant": 25,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
"use_embedding": false,
"kmer_size": 15,
"batch_normalization": false,
"locally_connected_layers": [],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"dense_layer_l1_regularization": 0.0,
"dropout_probability": 0.0
}
]
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"dense_layer_l1_regularization": 0.0,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
[
{
"n_models": 12,
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"use_embedding": false,
"kmer_size": 15,
"batch_normalization": false,
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"dense_layer_l1_regularization": 0.001,
"dropout_probability": 0.0
}
]
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
},
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
[
{
"n_models": 12,
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
"random_negative_rate": 0.0,
"random_negative_constant": 25,
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"use_embedding": false,
"kmer_size": 15,
"batch_normalization": false,
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
},
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
],
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"dense_layer_l1_regularization": 0.001,
"dropout_probability": 0.0
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
]
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
16
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
[
{
"n_models": 12,
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
"random_negative_rate": 0.0,
"random_negative_constant": 25,
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"use_embedding": false,
"kmer_size": 15,
"batch_normalization": false,
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
},
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
64
],
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"dense_layer_l1_regularization": 0.001,
"dropout_probability": 0.0
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
]
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
64
],
"dense_layer_l1_regularization": 0.001,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
[
{
"n_models": 12,
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
[{
##########################################
# ENSEMBLE SIZE
##########################################
"n_models": 8,
##########################################
# OPTIMIZATION
##########################################
"max_epochs": 500,
"patience": 10,
"early_stopping": true,
"validation_split": 0.2,
"random_negative_rate": 0.0,
"random_negative_constant": 25,
##########################################
# RANDOM NEGATIVE PEPTIDES
##########################################
"random_negative_rate": 0.0,
"random_negative_constant": 25,
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"use_embedding": false,
"kmer_size": 15,
"batch_normalization": false,
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
},
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"random_negative_affinity_min": 20000.0,
"random_negative_affinity_max": 50000.0,
"dense_layer_l1_regularization": 0.0,
"dropout_probability": 0.0
##########################################
# PEPTIDE REPRESENTATION
##########################################
# One of "one-hot", "embedding", or "BLOSUM62".
"peptide_amino_acid_encoding": "BLOSUM62",
"use_embedding": false, # maintained for backward compatability
"kmer_size": 15,
##########################################
# NEURAL NETWORK ARCHITECTURE
##########################################
"locally_connected_layers": [
{
"filters": 8,
"activation": "tanh",
"kernel_size": 3
}
]
],
"activation": "relu",
"output_activation": "sigmoid",
"layer_sizes": [
32
],
"dense_layer_l1_regularization": 0.0,
"batch_normalization": false,
"dropout_probability": 0.0,
}]
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment