From 24a0dadc583cc97f722b6fcdcd2ffe99089d4432 Mon Sep 17 00:00:00 2001 From: Tim O'Donnell <timodonnell@gmail.com> Date: Sat, 27 Jan 2018 16:36:21 -0500 Subject: [PATCH] better logging --- mhcflurry/train_allele_specific_models_command.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mhcflurry/train_allele_specific_models_command.py b/mhcflurry/train_allele_specific_models_command.py index f1db3a66..82763d6e 100644 --- a/mhcflurry/train_allele_specific_models_command.py +++ b/mhcflurry/train_allele_specific_models_command.py @@ -215,15 +215,16 @@ def run(argv=sys.argv[1:]): # which it adds models to, so no merging is required. It also saves # as it goes so no saving is required at the end. start = time.time() - for _ in tqdm.trange(len(work_items)): item = work_items.pop(0) # want to keep freeing up memory work_predictor = work_entrypoint(item) assert work_predictor is predictor + assert not work_items print("*" * 30) - print("Trained %d networks in %0.2f sec." % ( - len(predictor.neural_networks), time.time() - start)) + training_time = time.time() - start + print("Trained affinity predictor with %d networks in %0.2f sec." % ( + len(predictor.neural_networks), training_time)) print("*" * 30) if args.percent_rank_calibration_num_peptides_per_length > 0: @@ -232,14 +233,19 @@ def run(argv=sys.argv[1:]): predictor.calibrate_percentile_ranks( num_peptides_per_length=args.percent_rank_calibration_num_peptides_per_length, worker_pool=worker_pool) + percent_rank_calibration_time = time.time() - start print("Finished calibrating percent ranks in %0.2f sec." % ( - time.time() - start)) + percent_rank_calibration_time)) predictor.save(args.out_models_dir, model_names_to_write=[]) if worker_pool: worker_pool.close() worker_pool.join() + print("Train time: %0.2f sec. Percent rank calibration time: %0.2f sec." % ( + training_time, percent_rank_calibration_time)) + print("Predictor written to: %s" % args.out_models_dir) + def work_entrypoint(item): return process_work(**item) -- GitLab