diff --git a/downloads-generation/models_class1_pan_unselected/generate_hyperparameters.py b/downloads-generation/models_class1_pan_unselected/generate_hyperparameters.py
index 471014080f062c980c5403bc515bd9a109c35189..defa64812ca422e9352468044b20f53544066c72 100644
--- a/downloads-generation/models_class1_pan_unselected/generate_hyperparameters.py
+++ b/downloads-generation/models_class1_pan_unselected/generate_hyperparameters.py
@@ -16,7 +16,7 @@ base_hyperparameters = {
     'early_stopping': True,
     'init': 'glorot_uniform',
     'layer_sizes': [1024, 512],
-    'learning_rate': None,
+    'learning_rate': 0.001,
     'locally_connected_layers': [],
     'loss': 'custom:mse_with_inequalities',
     'max_epochs': 5000,
@@ -59,11 +59,13 @@ for layer_sizes in [[512, 256], [512, 512], [1024, 512], [1024, 1024]]:
     for pretrain in [True]:
         l1_base = 0.0000001
         for l1 in [l1_base, l1_base / 10, l1_base / 100, l1_base / 1000, 0.0]:
-            new = deepcopy(base_hyperparameters)
-            new["layer_sizes"] = layer_sizes
-            new["dense_layer_l1_regularization"] = l1
-            new["train_data"]["pretrain"] = pretrain
-            if not grid or new not in grid:
-                grid.append(new)
+            for lr in [0.001, 0.01]:
+                new = deepcopy(base_hyperparameters)
+                new["layer_sizes"] = layer_sizes
+                new["dense_layer_l1_regularization"] = l1
+                new["train_data"]["pretrain"] = pretrain
+                new["learning_rate"] = lr
+                if not grid or new not in grid:
+                    grid.append(new)
 
 dump(grid, stdout)
diff --git a/mhcflurry/class1_neural_network.py b/mhcflurry/class1_neural_network.py
index 9cb41d3f8b654d3b62603ae80209333d16a1d297..583832015810e15f984dc7d76d683b55c0d5f8ff 100644
--- a/mhcflurry/class1_neural_network.py
+++ b/mhcflurry/class1_neural_network.py
@@ -602,9 +602,8 @@ class Class1NeuralNetwork(object):
                 epochs, max(min_val_loss_iteration + patience, min_epochs))
 
             progress_message = (
-                "epoch %3d / %3d [%0.2f sec.]: loss=%g val_loss=%g. Min val "
-                "loss (%g) at epoch %s. Cumulative training points: %d. "
-                "Earliest stop epoch: %d." % (
+                "epoch %3d/%3d [%0.2f sec.]: loss=%g val_loss=%g. Min val "
+                "loss %g at epoch %s. Cum. points: %d. Stop at epoch %d." % (
                     epoch,
                     epochs,
                     epoch_time,
diff --git a/mhcflurry/cluster_parallelism.py b/mhcflurry/cluster_parallelism.py
index 26f65613156ac157c4fd763feef47c02ca78b37a..bb5d669cbf1b5a557bbf35facfd3831e49930823 100644
--- a/mhcflurry/cluster_parallelism.py
+++ b/mhcflurry/cluster_parallelism.py
@@ -139,16 +139,18 @@ def cluster_results(
     def result_generator():
         start = time.time()
         while result_items:
+            print("[%0.1f sec elapsed] waiting on %d / %d items." % (
+                time.time() - start, len(result_items), len(work_items)))
             while True:
                 result_item = None
                 for d in result_items:
-                    if os.path.exists(item['finished_path']):
+                    if os.path.exists(d['finished_path']):
                         result_item = d
                         break
                 if result_item is None:
                     os.sleep(60)
                 else:
-                    del result_items[result_item]
+                    result_items.remove(result_item)
                     break
 
             complete_dir = result_item['finished_path']
diff --git a/test/test_train_pan_allele_models_command.py b/test/test_train_pan_allele_models_command.py
index 9fafea45a91dddac9e79db3665522653510bcc72..cb00de4dbe7f28cf0071a97e51af239032c3450f 100644
--- a/test/test_train_pan_allele_models_command.py
+++ b/test/test_train_pan_allele_models_command.py
@@ -163,4 +163,5 @@ def test_run_cluster_parallelism():
 
 
 if __name__ == "__main__":
-    run_and_check(n_jobs=0, delete=False)
+    #run_and_check(n_jobs=0, delete=False)
+    test_run_cluster_parallelism()