Skip to content
Snippets Groups Projects
Commit 146927bc authored by Tim O'Donnell's avatar Tim O'Donnell
Browse files

Add models_class1_pan_variants download

parent 92720e6b
No related merge requests found
bash GENERATE.sh cluster
#!/bin/bash
#
# Uses an HPC cluster (Mount Sinai chimera cluster, which uses lsf job
# scheduler). This would need to be modified for other sites.
#
# Usage: GENERATE.sh <local|cluster> <fresh|continue-incomplete>
#
set -e
set -x
DOWNLOAD_NAME=models_class1_pan_variants
SCRATCH_DIR=${TMPDIR-/tmp}/mhcflurry-downloads-generation
SCRIPT_ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
SCRIPT_DIR=$(dirname "$SCRIPT_ABSOLUTE_PATH")
if [ "$1" != "cluster" ]
then
GPUS=$(nvidia-smi -L 2> /dev/null | wc -l) || GPUS=0
echo "Detected GPUS: $GPUS"
PROCESSORS=$(getconf _NPROCESSORS_ONLN)
echo "Detected processors: $PROCESSORS"
if [ "$GPUS" -eq "0" ]; then
NUM_JOBS=${NUM_JOBS-1}
else
NUM_JOBS=${NUM_JOBS-$GPUS}
fi
echo "Num jobs: $NUM_JOBS"
PARALLELISM_ARGS+=" --num-jobs $NUM_JOBS --max-tasks-per-worker 1 --gpus $GPUS --max-workers-per-gpu 1"
else
PARALLELISM_ARGS+=" --cluster-parallelism --cluster-max-retries 3 --cluster-submit-command bsub --cluster-results-workdir $HOME/mhcflurry-scratch $SCRIPT_DIR/cluster_submit_script_header.mssm_hpc.gpu.lsf"
fi
mkdir -p "$SCRATCH_DIR"
if [ "$2" != "continue-incomplete" ]
then
echo "Fresh run"
rm -rf "$SCRATCH_DIR/$DOWNLOAD_NAME"
mkdir "$SCRATCH_DIR/$DOWNLOAD_NAME"
else
echo "Continuing incomplete run"
fi
# Send stdout and stderr to a logfile included with the archive.
LOG="$SCRATCH_DIR/$DOWNLOAD_NAME/LOG.$(date +%s).txt"
exec > >(tee -ia "$LOG")
exec 2> >(tee -ia "$LOG" >&2)
# Log some environment info
echo "Invocation: $0 $@"
date
pip freeze
git status
cd $SCRATCH_DIR/$DOWNLOAD_NAME
export OMP_NUM_THREADS=1
export PYTHONUNBUFFERED=1
if [ "$2" != "continue-incomplete" ]
then
cp $SCRIPT_DIR/generate_hyperparameters.production.py .
cp $SCRIPT_DIR/generate_hyperparameters.py .
python generate_hyperparameters.production.py > hyperparameters.production.json
python generate_hyperparameters.py hyperparameters.production.json no_pretrain > hyperparameters.no_pretrain.yaml
python generate_hyperparameters.py hyperparameters.no_pretrain.yaml single_hidden > hyperparameters.single_hidden_no_pretrain.yaml
fi
for kind in single_hidden_no_pretrain no_pretrain 34mer_sequence
do
CONTINUE_INCOMPLETE_ARGS=""
if [ "$2" == "continue-incomplete" ] && [ -d "models.${kind}" ]
then
echo "Will continue existing run: $kind"
CONTINUE_INCOMPLETE_ARGS="--continue-incomplete"
fi
ALLELE_SEQUENCES="$(mhcflurry-downloads path allele_sequences)/allele_sequences.csv"
HYPERPARAMETERS=hyperparameters.$kind.yaml
if [ "$kind" == "34mer_sequence" ]
then
ALLELE_SEQUENCES="$(mhcflurry-downloads path allele_sequences)/class1_pseudosequences.csv"
HYPERPARAMETERS=hyperparameters.production.yaml
fi
mhcflurry-class1-train-pan-allele-models \
--data "$(mhcflurry-downloads path data_curated)/curated_training_data.with_mass_spec.csv.bz2" \
--allele-sequences "$ALLELE_SEQUENCES" \
--pretrain-data "$(mhcflurry-downloads path random_peptide_predictions)/predictions.csv.bz2" \
--held-out-measurements-per-allele-fraction-and-max 0.25 100 \
--num-folds 4 \
--hyperparameters "$HYPERPARAMETERS" \
--out-models-dir $(pwd)/models.unselected.${kind} \
--worker-log-dir "$SCRATCH_DIR/$DOWNLOAD_NAME" \
$PARALLELISM_ARGS $CONTINUE_INCOMPLETE_ARGS
done
echo "Done training. Beginning model selection."
for kind in single_hidden_no_pretrain no_pretrain 34mer_sequence
do
MODELS_DIR="models.unselected.${kind}"
mhcflurry-class1-select-pan-allele-models \
--data "$MODELS_DIR/train_data.csv.bz2" \
--models-dir "$MODELS_DIR" \
--out-models-dir models.${kind} \
--min-models 2 \
--max-models 8 \
$PARALLELISM_ARGS
ln -s "$MODELS_DIR/train_data.csv.bz2" "models.${kind}/train_data.csv.bz2"
done
cp $SCRIPT_ABSOLUTE_PATH .
bzip2 -f "$LOG"
for i in $(ls LOG-worker.*.txt) ; do bzip2 -f $i ; done
RESULT="$SCRATCH_DIR/${DOWNLOAD_NAME}.$(date +%Y%m%d).tar.bz2"
tar -cjf "$RESULT" *
echo "Created archive: $RESULT"
# Split into <2GB chunks for GitHub
PARTS="${RESULT}.part."
# Check for pre-existing part files and rename them.
for i in $(ls "${PARTS}"* )
do
DEST="${i}.OLD.$(date +%s)"
echo "WARNING: already exists: $i . Moving to $DEST"
mv $i $DEST
done
split -b 2000M "$RESULT" "$PARTS"
echo "Split into parts:"
ls -lh "${PARTS}"*
#!/bin/bash
#BSUB -J MHCf-{work_item_num} # Job name
#BSUB -P acc_nkcancer # allocation account or Unix group
#BSUB -q gpu # queue
#BSUB -R rusage[ngpus_excl_p=1] # 1 exclusive GPU
#BSUB -R span[hosts=1] # one node
#BSUB -n 1 # number of compute cores
#BSUB -W 46:00 # walltime in HH:MM
#BSUB -R rusage[mem=30000] # mb memory requested
#BSUB -o {work_dir}/%J.stdout # output log (%J : JobID)
#BSUB -eo {work_dir}/STDERR # error log
#BSUB -L /bin/bash # Initialize the execution environment
#
set -e
set -x
echo "Subsequent stderr output redirected to stdout" >&2
exec 2>&1
export TMPDIR=/local/JOBS/mhcflurry-{work_item_num}
export PATH=$HOME/.conda/envs/py36b/bin/:$PATH
export PYTHONUNBUFFERED=1
export KMP_SETTINGS=1
export NETMHC_BUNDLE_HOME=$HOME/sinai/git/netmhc-bundle
export NETMHC_BUNDLE_TMPDIR=/local/JOBS/netmhctmp-{work_item_num}
export PATH=$NETMHC_BUNDLE_HOME/bin:$PATH
free -m
module add cuda/10.0.130 cudnn/7.1.1
module list
# python -c 'import tensorflow as tf ; print("GPU AVAILABLE" if tf.test.is_gpu_available() else "GPU NOT AVAILABLE")'
env
cd {work_dir}
../models_class1_pan_unselected/generate_hyperparameters.py
\ No newline at end of file
"""
Generate grid of hyperparameters
"""
from sys import stdout, argv
from copy import deepcopy
from yaml import dump, load
import argparse
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"production_hyperparameters",
metavar="data.json",
help="Production (i.e. standard) hyperparameters grid.")
parser.add_argument(
"kind",
choices=('single_hidden', 'no_pretrain'),
help="Hyperameters variant to output")
args = parser.parse_args(argv[1:])
with open(args.production_hyperparameters) as fd:
production_hyperparameters_list = load(fd)
def transform_to_single_hidden(hyperparameters):
result = []
for size in [64, 128, 256, 1024]:
hyperparameters['layer_sizes'] = [size]
result.append(deepcopy(hyperparameters))
return result
def transform_to_no_pretrain(hyperparameters):
result = deepcopy(hyperparameters)
result['train_data']['pretrain'] = False
return [result]
TRANSFORMS={
"single_hidden": transform_to_single_hidden,
"no_pretrain": transform_to_no_pretrain,
}
transform = TRANSFORMS[args.kind]
result_list = []
for item in production_hyperparameters_list:
for result_item in transform(item):
if result_item not in result_list:
result_list.append(item)
dump(result_list, stdout)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment