diff --git a/downloads-generation/data_mass_spec_benchmark/GENERATE.WITH_HPC_CLUSTER.sh b/downloads-generation/data_mass_spec_benchmark/GENERATE.WITH_HPC_CLUSTER.sh
index 7be4347050fe1cbfb77c4b6286bcec6b5d9740f8..e57f235498823232536a8670e6c4125e09d1a677 100755
--- a/downloads-generation/data_mass_spec_benchmark/GENERATE.WITH_HPC_CLUSTER.sh
+++ b/downloads-generation/data_mass_spec_benchmark/GENERATE.WITH_HPC_CLUSTER.sh
@@ -27,6 +27,7 @@ cd $SCRATCH_DIR/$DOWNLOAD_NAME
 
 cp $SCRIPT_DIR/write_proteome_peptides.py .
 cp $SCRIPT_DIR/run_mhcflurry.py .
+cp $SCRIPT_DIR/run_thirdparty_predictors.py .
 cp $SCRIPT_DIR/write_allele_list.py .
 
 PEPTIDES=$(mhcflurry-downloads path data_mass_spec_annotated)/annotated_ms.csv.bz2
@@ -100,7 +101,7 @@ python write_proteome_peptides.py \
 python run_thirdparty_predictors.py \
     proteome_peptides.all.csv \
     --predictor netmhcpan4 \
-    --chunk-size 100000 \
+    --chunk-size 10000 \
     --allele $(cat alleles.txt) \
     --out "predictions/all.netmhcpan4" \
     --worker-log-dir "$SCRATCH_DIR/$DOWNLOAD_NAME" \
@@ -108,9 +109,7 @@ python run_thirdparty_predictors.py \
     --cluster-max-retries 15 \
     --cluster-submit-command bsub \
     --cluster-results-workdir ~/mhcflurry-scratch \
-    --cluster-script-prefix-path $SCRIPT_DIR/cluster_submit_script_header.mssm_hpc.lsf
-
-
+    --cluster-script-prefix-path cluster_submit_script_header.mssm_hpc.nogpu.lsf
 
 
 bzip2 proteome_peptides.chr1.csv
diff --git a/downloads-generation/data_mass_spec_benchmark/cluster_submit_script_header.mssm_hpc.nogpu.lsf b/downloads-generation/data_mass_spec_benchmark/cluster_submit_script_header.mssm_hpc.nogpu.lsf
new file mode 100644
index 0000000000000000000000000000000000000000..7b85917f4bb6dd1d1e0627dac98a841a07e4cdd1
--- /dev/null
+++ b/downloads-generation/data_mass_spec_benchmark/cluster_submit_script_header.mssm_hpc.nogpu.lsf
@@ -0,0 +1,32 @@
+#!/bin/bash
+#BSUB -J MHCf-{work_item_num} # Job name
+#BSUB -P acc_nkcancer # allocation account or Unix group
+#BSUB -q express # queue
+#BSUB -R span[hosts=1] # one node
+#BSUB -n 1 # number of compute cores
+#BSUB -W 46:00 # walltime in HH:MM
+#BSUB -R rusage[mem=30000] # mb memory requested
+#BSUB -o {work_dir}/%J.stdout # output log (%J : JobID)
+#BSUB -eo {work_dir}/STDERR # error log
+#BSUB -L /bin/bash # Initialize the execution environment
+#
+
+set -e
+set -x
+
+echo "Subsequent stderr output redirected to stdout" >&2
+exec 2>&1
+
+export TMPDIR=/local/JOBS/mhcflurry-{work_item_num}
+export PATH=$HOME/.conda/envs/py36b/bin/:$PATH
+export PYTHONUNBUFFERED=1
+export KMP_SETTINGS=1
+export NETMHC_BUNDLE_HOME=$HOME/sinai/git/netmhc-bundle
+export NETMHC_BUNDLE_TMPDIR=/local/JOBS/netmhctmp-{work_item_num}
+export PATH=$NETMHC_BUNDLE_HOME/bin:$PATH
+
+free -m
+env
+
+cd {work_dir}
+