diff --git a/mhcflurry/class1_neural_network.py b/mhcflurry/class1_neural_network.py
index ffb3435bf810b5bca11ee91354cc71a6d1d23f97..e434e1b21422b1db1d4c1636076151bafd56ea34 100644
--- a/mhcflurry/class1_neural_network.py
+++ b/mhcflurry/class1_neural_network.py
@@ -519,14 +519,16 @@ class Class1NeuralNetwork(object):
             use_multiprocessing=False,
             workers=1,
             validation_data=(validation_x_dict, validation_y_dict),
+            verbose=verbose,
             callbacks=[keras.callbacks.EarlyStopping(
                 monitor="val_loss",
                 patience=patience,
-                verbose=1)]
+                verbose=verbose)]
         )
         if verbose > 0:
             print("fit_generator completed in %0.2f sec (%d total points)" % (
                 time.time() - start, yielded_values_box[0]))
+        return result
 
 
     def fit(
diff --git a/mhcflurry/parallelism.py b/mhcflurry/parallelism.py
index 88913986287c7694ceef2e506196612eb330a20e..0e652a964908a2e55f1fdae9e831250fca559b03 100644
--- a/mhcflurry/parallelism.py
+++ b/mhcflurry/parallelism.py
@@ -221,7 +221,11 @@ def worker_init_entry_point(
 def worker_init(keras_backend=None, gpu_device_nums=None, worker_log_dir=None):
     if worker_log_dir:
         sys.stderr = sys.stdout = open(
-            os.path.join(worker_log_dir, "LOG-worker.%d.txt" % os.getpid()), "w")
+            os.path.join(worker_log_dir, "LOG-"
+                                         ""
+                                         ""
+                                         ""
+                                         "worker.%d.txt" % os.getpid()), "w")
 
     # Each worker needs distinct random numbers
     numpy.random.seed()