diff --git a/downloads-generation/models_class1_presentation/GENERATE.WITH_HPC_CLUSTER.sh b/downloads-generation/models_class1_presentation/GENERATE.WITH_HPC_CLUSTER.sh
deleted file mode 100755
index 53125eb7bec329ecbbd0d230b8afe809c1064204..0000000000000000000000000000000000000000
--- a/downloads-generation/models_class1_presentation/GENERATE.WITH_HPC_CLUSTER.sh
+++ /dev/null
@@ -1 +0,0 @@
-bash GENERATE.sh cluster
diff --git a/downloads-generation/models_class1_presentation/GENERATE.sh b/downloads-generation/models_class1_presentation/GENERATE.sh
index 5443356e8b4f4108c0f223ae0c32b59a0defe3f7..81bcfaad047233bbb82580f8f9d9529f6ab1ee4c 100755
--- a/downloads-generation/models_class1_presentation/GENERATE.sh
+++ b/downloads-generation/models_class1_presentation/GENERATE.sh
@@ -80,7 +80,7 @@ else
     time python make_benchmark.py \
         --hits "$(pwd)/hits_with_tpm.csv.bz2" \
         --proteome-peptides "$(mhcflurry-downloads path data_mass_spec_benchmark)/proteome_peptides.all.csv.bz2" \
-        --decoys-per-hit 99 \
+        --decoys-per-hit 2 \
         --exclude-pmid 31844290 31495665 31154438 \
         --only-format MULTIALLELIC \
         --out "$(pwd)/train_data.csv"
diff --git a/downloads-generation/models_class1_presentation/cluster_submit_script_header.mssm_hpc.lsf b/downloads-generation/models_class1_presentation/cluster_submit_script_header.mssm_hpc.lsf
deleted file mode 100644
index b1eec1a69a81d2c49a8feea9ec61c222eead5480..0000000000000000000000000000000000000000
--- a/downloads-generation/models_class1_presentation/cluster_submit_script_header.mssm_hpc.lsf
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-#BSUB -J MHCf-{work_item_num} # Job name
-#BSUB -P acc_nkcancer # allocation account or Unix group
-#BSUB -q gpu # queue
-#BSUB -R rusage[ngpus_excl_p=1]  # 1 exclusive GPU
-#BSUB -R span[hosts=1] # one node
-#BSUB -n 1 # number of compute cores
-#BSUB -W 10:00 # walltime in HH:MM
-#BSUB -R rusage[mem=20000] # mb memory requested
-#BSUB -o {work_dir}/%J.stdout # output log (%J : JobID)
-#BSUB -eo {work_dir}/STDERR # error log
-#BSUB -L /bin/bash # Initialize the execution environment
-#
-
-set -e
-set -x
-
-echo "Subsequent stderr output redirected to stdout" >&2
-exec 2>&1
-
-export TMPDIR=/local/JOBS/mhcflurry-{work_item_num}
-export PATH=$HOME/.conda/envs/py36b/bin/:$PATH
-export PYTHONUNBUFFERED=1
-export KMP_SETTINGS=1
-
-free -m
-
-module add cuda/10.0.130
-module list
-
-export CUDNN_HOME=/hpc/users/odonnt02/oss/cudnn/cuda
-export LD_LIBRARY_PATH=$CUDNN_HOME/lib64:$LD_LIBRARY_PATH
-export CMAKE_LIBRARY_PATH=$CUDNN_HOME/lib64:$CMAKE_LIBRARY_PATH
-export INCLUDE_PATH=$CUDNN_HOME/include:$INCLUDE_PATH
-export C_INCLUDE_PATH=$CUDNN_HOME/include:$C_INCLUDE_PATH
-export CPLUS_INCLUDE_PATH=$CUDNN_HOME/include:$CPLUS_INCLUDE_PATH
-export CMAKE_INCLUDE_PATH=$CUDNN_HOME/include:$CMAKE_INCLUDE_PATH
-
-python -c 'import tensorflow as tf ; print("GPU AVAILABLE" if tf.test.is_gpu_available() else "GPU NOT AVAILABLE")'
-
-env
-
-cd {work_dir}
-
diff --git a/downloads-generation/models_class1_presentation/generate_hyperparameters.py b/downloads-generation/models_class1_presentation/generate_hyperparameters.py
deleted file mode 100644
index 890b814bc070962fa4664eb9eab57db29a00ed3e..0000000000000000000000000000000000000000
--- a/downloads-generation/models_class1_presentation/generate_hyperparameters.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Generate grid of hyperparameters
-"""
-from __future__ import print_function
-from sys import stdout, stderr
-from copy import deepcopy
-from yaml import dump
-
-base_hyperparameters = dict(
-    convolutional_filters=64,
-    convolutional_kernel_size=8,
-    convolutional_kernel_l1_l2=(0.00, 0.0),
-    flanking_averages=True,
-    n_flank_length=15,
-    c_flank_length=15,
-    post_convolutional_dense_layer_sizes=[],
-    minibatch_size=512,
-    dropout_rate=0.5,
-    convolutional_activation="relu",
-    patience=20,
-    learning_rate=0.001)
-
-grid = []
-
-
-def hyperparrameters_grid():
-    for learning_rate in [0.001]:
-        for convolutional_activation in ["tanh", "relu"]:
-            for convolutional_filters in [256, 512]:
-                for flanking_averages in [True]:
-                    for convolutional_kernel_size in [11, 13, 15, 17]:
-                        for l1 in [0.0, 1e-6]:
-                            for s in [[8], [16]]:
-                                for d in [0.3, 0.5]:
-                                    new = deepcopy(base_hyperparameters)
-                                    new["learning_rate"] = learning_rate
-                                    new["convolutional_activation"] = convolutional_activation
-                                    new["convolutional_filters"] = convolutional_filters
-                                    new["flanking_averages"] = flanking_averages
-                                    new["convolutional_kernel_size"] = convolutional_kernel_size
-                                    new["convolutional_kernel_l1_l2"] = (l1, 0.0)
-                                    new["post_convolutional_dense_layer_sizes"] = s
-                                    new["dropout_rate"] = d
-                                    yield new
-
-
-for new in hyperparrameters_grid():
-    if new not in grid:
-        grid.append(new)
-
-print("Hyperparameters grid size: %d" % len(grid), file=stderr)
-dump(grid, stdout)