diff --git a/downloads-generation/models_class1/generate_hyperparameters.py b/downloads-generation/models_class1/generate_hyperparameters.py index 65491eafdbd38902096af3d5e3308b40b118b912..a805a32c1f38f938bfb5fc3f08e7d35d689905cb 100644 --- a/downloads-generation/models_class1/generate_hyperparameters.py +++ b/downloads-generation/models_class1/generate_hyperparameters.py @@ -64,7 +64,7 @@ base_hyperparameters = { grid = [] for train_subset in ["all", "quantitative"]: - for minibatch_size in [32]: + for minibatch_size in [128]: for dense_layer_size in [8, 16, 32, 64]: for l1 in [0.0, 0.001]: for num_lc in [0, 1, 2]: diff --git a/mhcflurry/train_allele_specific_models_command.py b/mhcflurry/train_allele_specific_models_command.py index eac3f9c3a9fabfeb51a42e3a584b87305e17a383..694e4a47c4e7aed1d4363b3990525371e9ea3f0e 100644 --- a/mhcflurry/train_allele_specific_models_command.py +++ b/mhcflurry/train_allele_specific_models_command.py @@ -10,6 +10,7 @@ import traceback import random from functools import partial +import numpy import pandas import yaml from mhcnames import normalize_allele_name @@ -472,6 +473,9 @@ def calibrate_percentile_ranks(allele, predictor, peptides=None): def worker_init(keras_backend=None, gpu_device_nums=None): + # Each worker needs distinct random numbers + numpy.random.seed() + random.seed() if keras_backend or gpu_device_nums: print("WORKER pid=%d assigned GPU devices: %s" % ( os.getpid(), gpu_device_nums))