Skip to content
Snippets Groups Projects
Commit 4ddd3539 authored by Tim O'Donnell's avatar Tim O'Donnell
Browse files

fix

parent 2ffc5c55
No related branches found
No related tags found
No related merge requests found
bash GENERATE.sh cluster
......@@ -80,7 +80,7 @@ else
time python make_benchmark.py \
--hits "$(pwd)/hits_with_tpm.csv.bz2" \
--proteome-peptides "$(mhcflurry-downloads path data_mass_spec_benchmark)/proteome_peptides.all.csv.bz2" \
--decoys-per-hit 99 \
--decoys-per-hit 2 \
--exclude-pmid 31844290 31495665 31154438 \
--only-format MULTIALLELIC \
--out "$(pwd)/train_data.csv"
......
#!/bin/bash
#BSUB -J MHCf-{work_item_num} # Job name
#BSUB -P acc_nkcancer # allocation account or Unix group
#BSUB -q gpu # queue
#BSUB -R rusage[ngpus_excl_p=1] # 1 exclusive GPU
#BSUB -R span[hosts=1] # one node
#BSUB -n 1 # number of compute cores
#BSUB -W 10:00 # walltime in HH:MM
#BSUB -R rusage[mem=20000] # mb memory requested
#BSUB -o {work_dir}/%J.stdout # output log (%J : JobID)
#BSUB -eo {work_dir}/STDERR # error log
#BSUB -L /bin/bash # Initialize the execution environment
#
set -e
set -x
echo "Subsequent stderr output redirected to stdout" >&2
exec 2>&1
export TMPDIR=/local/JOBS/mhcflurry-{work_item_num}
export PATH=$HOME/.conda/envs/py36b/bin/:$PATH
export PYTHONUNBUFFERED=1
export KMP_SETTINGS=1
free -m
module add cuda/10.0.130
module list
export CUDNN_HOME=/hpc/users/odonnt02/oss/cudnn/cuda
export LD_LIBRARY_PATH=$CUDNN_HOME/lib64:$LD_LIBRARY_PATH
export CMAKE_LIBRARY_PATH=$CUDNN_HOME/lib64:$CMAKE_LIBRARY_PATH
export INCLUDE_PATH=$CUDNN_HOME/include:$INCLUDE_PATH
export C_INCLUDE_PATH=$CUDNN_HOME/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=$CUDNN_HOME/include:$CPLUS_INCLUDE_PATH
export CMAKE_INCLUDE_PATH=$CUDNN_HOME/include:$CMAKE_INCLUDE_PATH
python -c 'import tensorflow as tf ; print("GPU AVAILABLE" if tf.test.is_gpu_available() else "GPU NOT AVAILABLE")'
env
cd {work_dir}
"""
Generate grid of hyperparameters
"""
from __future__ import print_function
from sys import stdout, stderr
from copy import deepcopy
from yaml import dump
base_hyperparameters = dict(
convolutional_filters=64,
convolutional_kernel_size=8,
convolutional_kernel_l1_l2=(0.00, 0.0),
flanking_averages=True,
n_flank_length=15,
c_flank_length=15,
post_convolutional_dense_layer_sizes=[],
minibatch_size=512,
dropout_rate=0.5,
convolutional_activation="relu",
patience=20,
learning_rate=0.001)
grid = []
def hyperparrameters_grid():
for learning_rate in [0.001]:
for convolutional_activation in ["tanh", "relu"]:
for convolutional_filters in [256, 512]:
for flanking_averages in [True]:
for convolutional_kernel_size in [11, 13, 15, 17]:
for l1 in [0.0, 1e-6]:
for s in [[8], [16]]:
for d in [0.3, 0.5]:
new = deepcopy(base_hyperparameters)
new["learning_rate"] = learning_rate
new["convolutional_activation"] = convolutional_activation
new["convolutional_filters"] = convolutional_filters
new["flanking_averages"] = flanking_averages
new["convolutional_kernel_size"] = convolutional_kernel_size
new["convolutional_kernel_l1_l2"] = (l1, 0.0)
new["post_convolutional_dense_layer_sizes"] = s
new["dropout_rate"] = d
yield new
for new in hyperparrameters_grid():
if new not in grid:
grid.append(new)
print("Hyperparameters grid size: %d" % len(grid), file=stderr)
dump(grid, stdout)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment