From 57edfb49466c66eece94c6ece9df840b1ecc0b9b Mon Sep 17 00:00:00 2001
From: Tim O'Donnell <timodonnell@gmail.com>
Date: Sat, 7 Sep 2019 13:47:40 -0400
Subject: [PATCH] fix

---
 .travis.yml                            |  2 --
 mhcflurry/class1_affinity_predictor.py | 14 ++++++++++----
 test/test_network_merging.py           |  8 ++++++--
 3 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 08b33360..d1ec644f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -35,8 +35,6 @@ install:
 env:
   global:
     - PYTHONHASHSEED=0
-    - CUDA_VISIBLE_DEVICES=""  # for tensorflow
-  matrix:
     - KERAS_BACKEND=tensorflow
 script:
   # download data and models, then run tests
diff --git a/mhcflurry/class1_affinity_predictor.py b/mhcflurry/class1_affinity_predictor.py
index 662b0cf8..d464a372 100644
--- a/mhcflurry/class1_affinity_predictor.py
+++ b/mhcflurry/class1_affinity_predictor.py
@@ -418,7 +418,7 @@ class Class1AffinityPredictor(object):
             logging.info("Wrote: %s", percent_ranks_path)
 
     @staticmethod
-    def load(models_dir=None, max_models=None):
+    def load(models_dir=None, max_models=None, optimization_level=None):
         """
         Deserialize a predictor from a directory on disk.
         
@@ -431,12 +431,18 @@ class Class1AffinityPredictor(object):
         max_models : int, optional
             Maximum number of `Class1NeuralNetwork` instances to load
 
+        optimization_level : int
+            If >0, model optimization will be attempted. Defaults to value of
+            environment variable MHCFLURRY_OPTIMIZATION_LEVEL.
+
         Returns
         -------
         `Class1AffinityPredictor` instance
         """
         if models_dir is None:
             models_dir = get_default_class1_models_dir()
+        if optimization_level is None:
+            optimization_level = OPTIMIZATION_LEVEL
 
         manifest_path = join(models_dir, "manifest.csv")
         manifest_df = pandas.read_csv(manifest_path, nrows=max_models)
@@ -497,11 +503,11 @@ class Class1AffinityPredictor(object):
             manifest_df=manifest_df,
             allele_to_percent_rank_transform=allele_to_percent_rank_transform,
         )
-        if OPTIMIZATION_LEVEL >= 1:
-            logging.info("Optimizing models")
+        if optimization_level >= 1:
             optimized = result.optimize()
             logging.info(
-                "Optimization %s", ("succeeded" if optimized else "failed"))
+                "Model optimization %s",
+                "succeeded" if optimized else "not supported for these models")
         return result
 
     def optimize(self):
diff --git a/test/test_network_merging.py b/test/test_network_merging.py
index 1f083e8a..4c486975 100644
--- a/test/test_network_merging.py
+++ b/test/test_network_merging.py
@@ -14,13 +14,16 @@ from mhcflurry.common import random_peptides
 from mhcflurry.downloads import get_path
 
 ALLELE_SPECIFIC_PREDICTOR = Class1AffinityPredictor.load(
-    get_path("models_class1", "models"))
+    get_path("models_class1", "models"), optimization_level=0)
 
 PAN_ALLELE_PREDICTOR = Class1AffinityPredictor.load(
-    get_path("models_class1_pan", "models.with_mass_spec"))
+    get_path("models_class1_pan", "models.with_mass_spec"),
+    optimization_level=0)
 
 
 def test_merge():
+    assert len(PAN_ALLELE_PREDICTOR.class1_pan_allele_models) > 1
+
     peptides = random_peptides(100, length=9)
     peptides.extend(random_peptides(100, length=10))
     peptides = pandas.Series(peptides).sample(frac=1.0)
@@ -40,3 +43,4 @@ def test_merge():
     )
     predictions2 = merged_predictor.predict(peptides=peptides, alleles=alleles)
     numpy.testing.assert_allclose(predictions1, predictions2, atol=0.1)
+
-- 
GitLab