Skip to content
Snippets Groups Projects
Commit c8e7f80c authored by Tim O'Donnell's avatar Tim O'Donnell
Browse files

Refactoring models_class1_pan download

parent 40b32864
No related merge requests found
#!/bin/bash bash GENERATE.sh cluster
#
# Train pan-allele MHCflurry Class I models. Supports re-starting a failed run.
#
# Uses an HPC cluster (Mount Sinai chimera cluster, which uses lsf job
# scheduler). This would need to be modified for other sites.
#
set -e
set -x
DOWNLOAD_NAME=models_class1_pan_unselected
SCRATCH_DIR=${TMPDIR-/tmp}/mhcflurry-downloads-generation
SCRIPT_ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
SCRIPT_DIR=$(dirname "$SCRIPT_ABSOLUTE_PATH")
mkdir -p "$SCRATCH_DIR"
if [ "$1" != "continue-incomplete" ]
then
echo "Fresh run"
rm -rf "$SCRATCH_DIR/$DOWNLOAD_NAME"
mkdir "$SCRATCH_DIR/$DOWNLOAD_NAME"
else
echo "Continuing incomplete run"
fi
# Send stdout and stderr to a logfile included with the archive.
LOG="$SCRATCH_DIR/$DOWNLOAD_NAME/LOG.$(date +%s).txt"
exec > >(tee -ia "$LOG")
exec 2> >(tee -ia "$LOG" >&2)
# Log some environment info
echo "Invocation: $0 $@"
date
pip freeze
git status
mhcflurry-downloads fetch data_curated allele_sequences random_peptide_predictions
cd $SCRATCH_DIR/$DOWNLOAD_NAME
export OMP_NUM_THREADS=1
export PYTHONUNBUFFERED=1
if [ "$1" != "continue-incomplete" ]
then
cp $SCRIPT_DIR/generate_hyperparameters.py .
python generate_hyperparameters.py > hyperparameters.yaml
fi
for kind in combined
do
EXTRA_TRAIN_ARGS=""
if [ "$1" == "continue-incomplete" ] && [ -d "models.${kind}" ]
then
echo "Will continue existing run: $kind"
EXTRA_TRAIN_ARGS="--continue-incomplete"
fi
mhcflurry-class1-train-pan-allele-models \
--data "$(mhcflurry-downloads path data_curated)/curated_training_data.csv.bz2" \
--allele-sequences "$(mhcflurry-downloads path allele_sequences)/allele_sequences.csv" \
--pretrain-data "$(mhcflurry-downloads path random_peptide_predictions)/predictions.csv.bz2" \
--held-out-measurements-per-allele-fraction-and-max 0.25 100 \
--num-folds 4 \
--hyperparameters hyperparameters.yaml \
--out-models-dir $(pwd)/models.${kind} \
--worker-log-dir "$SCRATCH_DIR/$DOWNLOAD_NAME" \
--verbosity 0 \
--cluster-parallelism \
--cluster-submit-command bsub \
--cluster-results-workdir ~/mhcflurry-scratch \
--cluster-script-prefix-path $SCRIPT_DIR/cluster_submit_script_header.mssm_hpc.lsf \
$EXTRA_TRAIN_ARGS
done
cp $SCRIPT_ABSOLUTE_PATH .
bzip2 -f "$LOG"
for i in $(ls LOG-worker.*.txt) ; do bzip2 -f $i ; done
RESULT="$SCRATCH_DIR/${DOWNLOAD_NAME}.$(date +%Y%m%d).tar.bz2"
tar -cjf "$RESULT" *
echo "Created archive: $RESULT"
# Split into <2GB chunks for GitHub
PARTS="${RESULT}.part."
# Check for pre-existing part files and rename them.
for i in $(ls "${PARTS}"* )
do
DEST="${i}.OLD.$(date +%s)"
echo "WARNING: already exists: $i . Moving to $DEST"
mv $i $DEST
done
split -b 2000M "$RESULT" "$PARTS"
echo "Split into parts:"
ls -lh "${PARTS}"*
...@@ -2,6 +2,11 @@ ...@@ -2,6 +2,11 @@
# #
# Train pan-allele MHCflurry Class I models. Supports re-starting a failed run. # Train pan-allele MHCflurry Class I models. Supports re-starting a failed run.
# #
# Usage: GENERATE.sh <local|cluster> <fresh|continue-incomplete>
#
# cluster mode uses an HPC cluster (Mount Sinai chimera cluster, which uses lsf job
# scheduler). This would need to be modified for other sites.
#
set -e set -e
set -x set -x
...@@ -10,8 +15,27 @@ SCRATCH_DIR=${TMPDIR-/tmp}/mhcflurry-downloads-generation ...@@ -10,8 +15,27 @@ SCRATCH_DIR=${TMPDIR-/tmp}/mhcflurry-downloads-generation
SCRIPT_ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")" SCRIPT_ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
SCRIPT_DIR=$(dirname "$SCRIPT_ABSOLUTE_PATH") SCRIPT_DIR=$(dirname "$SCRIPT_ABSOLUTE_PATH")
if [ "$1" != "cluster" ]
then
GPUS=$(nvidia-smi -L 2> /dev/null | wc -l) || GPUS=0
echo "Detected GPUS: $GPUS"
PROCESSORS=$(getconf _NPROCESSORS_ONLN)
echo "Detected processors: $PROCESSORS"
if [ "$GPUS" -eq "0" ]; then
NUM_JOBS=${NUM_JOBS-1}
else
NUM_JOBS=${NUM_JOBS-$GPUS}
fi
echo "Num jobs: $NUM_JOBS"
PARALLELISM_ARGS+=" --num-jobs $NUM_JOBS --max-tasks-per-worker 1 --gpus $GPUS --max-workers-per-gpu 1"
else
PARALLELISM_ARGS+=" --cluster-parallelism --cluster-max-retries 3 --cluster-submit-command bsub --cluster-results-workdir $HOME/mhcflurry-scratch --cluster-script-prefix-path $SCRIPT_DIR/cluster_submit_script_header.mssm_hpc.gpu.lsf"
fi
mkdir -p "$SCRATCH_DIR" mkdir -p "$SCRATCH_DIR"
if [ "$1" != "continue-incomplete" ] if [ "$2" != "continue-incomplete" ]
then then
echo "Fresh run" echo "Fresh run"
rm -rf "$SCRATCH_DIR/$DOWNLOAD_NAME" rm -rf "$SCRATCH_DIR/$DOWNLOAD_NAME"
...@@ -31,29 +55,14 @@ date ...@@ -31,29 +55,14 @@ date
pip freeze pip freeze
git status git status
mhcflurry-downloads fetch data_curated allele_sequences random_peptide_predictions
cd $SCRATCH_DIR/$DOWNLOAD_NAME cd $SCRATCH_DIR/$DOWNLOAD_NAME
cp $SCRIPT_DIR/generate_hyperparameters.py . export OMP_NUM_THREADS=1
python generate_hyperparameters.py > hyperparameters.yaml
GPUS=$(nvidia-smi -L 2> /dev/null | wc -l) || GPUS=0
echo "Detected GPUS: $GPUS"
PROCESSORS=$(getconf _NPROCESSORS_ONLN)
echo "Detected processors: $PROCESSORS"
if [ "$GPUS" -eq "0" ]; then
NUM_JOBS=${NUM_JOBS-1}
else
NUM_JOBS=${NUM_JOBS-$GPUS}
fi
echo "Num jobs: $NUM_JOBS"
export PYTHONUNBUFFERED=1 export PYTHONUNBUFFERED=1
if [ "$1" != "continue-incomplete" ] cp $SCRIPT_DIR/additional_alleles.txt .
if [ "$2" != "continue-incomplete" ]
then then
cp $SCRIPT_DIR/generate_hyperparameters.py . cp $SCRIPT_DIR/generate_hyperparameters.py .
python generate_hyperparameters.py > hyperparameters.yaml python generate_hyperparameters.py > hyperparameters.yaml
...@@ -61,25 +70,60 @@ fi ...@@ -61,25 +70,60 @@ fi
for kind in combined for kind in combined
do do
EXTRA_TRAIN_ARGS="" CONTINUE_INCOMPLETE_ARGS=""
if [ "$1" == "continue-incomplete" ] && [ -d "models.${kind}" ] if [ "$2" == "continue-incomplete" ] && [ -d "models.unselected.${kind}" ]
then then
echo "Will continue existing run: $kind" echo "Will continue existing run: $kind"
EXTRA_TRAIN_ARGS="--continue-incomplete" CONTINUE_INCOMPLETE_ARGS="--continue-incomplete"
fi fi
ALLELE_SEQUENCES="$(mhcflurry-downloads path allele_sequences)/allele_sequences.csv"
TRAINING_DATA="$(mhcflurry-downloads path data_curated)/curated_training_data.csv.bz2"
mhcflurry-class1-train-pan-allele-models \ mhcflurry-class1-train-pan-allele-models \
--data "$(mhcflurry-downloads path data_curated)/curated_training_data.csv.bz2" \ --data "$TRAINING_DATA" \
--allele-sequences "$(mhcflurry-downloads path allele_sequences)/allele_sequences.csv" \ --allele-sequences "$ALLELE_SEQUENCES" \
--pretrain-data "$(mhcflurry-downloads path random_peptide_predictions)/predictions.csv.bz2" \ --pretrain-data "$(mhcflurry-downloads path random_peptide_predictions)/predictions.csv.bz2" \
--held-out-measurements-per-allele-fraction-and-max 0.25 100 \ --held-out-measurements-per-allele-fraction-and-max 0.25 100 \
--num-folds 4 \ --num-folds 4 \
--hyperparameters hyperparameters.yaml \ --hyperparameters "$HYPERPARAMETERS" \
--out-models-dir models.${kind} \ --out-models-dir $(pwd)/models.unselected.${kind} \
--worker-log-dir "$SCRATCH_DIR/$DOWNLOAD_NAME" \ --worker-log-dir "$SCRATCH_DIR/$DOWNLOAD_NAME" \
--verbosity 0 \ $PARALLELISM_ARGS $CONTINUE_INCOMPLETE_ARGS
--num-jobs $NUM_JOBS --max-tasks-per-worker 1 --gpus $GPUS --max-workers-per-gpu 1 \ done
$EXTRA_TRAIN_ARGS
echo "Done training. Beginning model selection."
for kind in combined
do
MODELS_DIR="models.unselected.${kind}"
# For now we calibrate percentile ranks only for alleles for which there
# is training data. Calibrating all alleles would be too slow.
# This could be improved though.
ALLELE_LIST=$(bzcat "$MODELS_DIR/train_data.csv.bz2" | cut -f 1 -d , | grep -v allele | uniq | sort | uniq)
ALLELE_LIST+=$(echo " " $(cat additional_alleles.txt | grep -v '#') )
mhcflurry-class1-select-pan-allele-models \
--data "$MODELS_DIR/train_data.csv.bz2" \
--models-dir "$MODELS_DIR" \
--out-models-dir models.${kind} \
--min-models 2 \
--max-models 8 \
$PARALLELISM_ARGS
cp "$MODELS_DIR/train_data.csv.bz2" "models.${kind}/train_data.csv.bz2"
# For now we calibrate percentile ranks only for alleles for which there
# is training data. Calibrating all alleles would be too slow.
# This could be improved though.
time mhcflurry-calibrate-percentile-ranks \
--models-dir models.${kind} \
--match-amino-acid-distribution-data "$MODELS_DIR/train_data.csv.bz2" \
--motif-summary \
--num-peptides-per-length 100000 \
--allele $ALLELE_LIST \
--verbosity 1 \
$PARALLELISM_ARGS
done done
cp $SCRIPT_ABSOLUTE_PATH . cp $SCRIPT_ABSOLUTE_PATH .
...@@ -102,3 +146,11 @@ split -b 2000M "$RESULT" "$PARTS" ...@@ -102,3 +146,11 @@ split -b 2000M "$RESULT" "$PARTS"
echo "Split into parts:" echo "Split into parts:"
ls -lh "${PARTS}"* ls -lh "${PARTS}"*
# Write out just the selected models
# Move unselected into a hidden dir so it is excluded in the glob (*).
mkdir .ignored
mv models.unselected.* .ignored/
RESULT="$SCRATCH_DIR/${DOWNLOAD_NAME}.selected.$(date +%Y%m%d).tar.bz2"
tar -cjf "$RESULT" *
mv .ignored/* . && rmdir .ignored
echo "Created archive: $RESULT"
# Additional alleles besides those in the training data to include in percentile rank calibration
HLA-C*02:10
HLA-A*02:20
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment