Skip to content
Snippets Groups Projects
Commit fcaaf32f authored by Tim O'Donnell's avatar Tim O'Donnell
Browse files

fix

parent fee8bf49
No related branches found
No related tags found
No related merge requests found
...@@ -16,7 +16,7 @@ base_hyperparameters = { ...@@ -16,7 +16,7 @@ base_hyperparameters = {
'early_stopping': True, 'early_stopping': True,
'init': 'glorot_uniform', 'init': 'glorot_uniform',
'layer_sizes': [1024, 512], 'layer_sizes': [1024, 512],
'learning_rate': None, 'learning_rate': 0.001,
'locally_connected_layers': [], 'locally_connected_layers': [],
'loss': 'custom:mse_with_inequalities', 'loss': 'custom:mse_with_inequalities',
'max_epochs': 5000, 'max_epochs': 5000,
...@@ -59,11 +59,13 @@ for layer_sizes in [[512, 256], [512, 512], [1024, 512], [1024, 1024]]: ...@@ -59,11 +59,13 @@ for layer_sizes in [[512, 256], [512, 512], [1024, 512], [1024, 1024]]:
for pretrain in [True]: for pretrain in [True]:
l1_base = 0.0000001 l1_base = 0.0000001
for l1 in [l1_base, l1_base / 10, l1_base / 100, l1_base / 1000, 0.0]: for l1 in [l1_base, l1_base / 10, l1_base / 100, l1_base / 1000, 0.0]:
new = deepcopy(base_hyperparameters) for lr in [0.001, 0.01]:
new["layer_sizes"] = layer_sizes new = deepcopy(base_hyperparameters)
new["dense_layer_l1_regularization"] = l1 new["layer_sizes"] = layer_sizes
new["train_data"]["pretrain"] = pretrain new["dense_layer_l1_regularization"] = l1
if not grid or new not in grid: new["train_data"]["pretrain"] = pretrain
grid.append(new) new["learning_rate"] = lr
if not grid or new not in grid:
grid.append(new)
dump(grid, stdout) dump(grid, stdout)
...@@ -602,9 +602,8 @@ class Class1NeuralNetwork(object): ...@@ -602,9 +602,8 @@ class Class1NeuralNetwork(object):
epochs, max(min_val_loss_iteration + patience, min_epochs)) epochs, max(min_val_loss_iteration + patience, min_epochs))
progress_message = ( progress_message = (
"epoch %3d / %3d [%0.2f sec.]: loss=%g val_loss=%g. Min val " "epoch %3d/%3d [%0.2f sec.]: loss=%g val_loss=%g. Min val "
"loss (%g) at epoch %s. Cumulative training points: %d. " "loss %g at epoch %s. Cum. points: %d. Stop at epoch %d." % (
"Earliest stop epoch: %d." % (
epoch, epoch,
epochs, epochs,
epoch_time, epoch_time,
......
...@@ -139,16 +139,18 @@ def cluster_results( ...@@ -139,16 +139,18 @@ def cluster_results(
def result_generator(): def result_generator():
start = time.time() start = time.time()
while result_items: while result_items:
print("[%0.1f sec elapsed] waiting on %d / %d items." % (
time.time() - start, len(result_items), len(work_items)))
while True: while True:
result_item = None result_item = None
for d in result_items: for d in result_items:
if os.path.exists(item['finished_path']): if os.path.exists(d['finished_path']):
result_item = d result_item = d
break break
if result_item is None: if result_item is None:
os.sleep(60) os.sleep(60)
else: else:
del result_items[result_item] result_items.remove(result_item)
break break
complete_dir = result_item['finished_path'] complete_dir = result_item['finished_path']
......
...@@ -163,4 +163,5 @@ def test_run_cluster_parallelism(): ...@@ -163,4 +163,5 @@ def test_run_cluster_parallelism():
if __name__ == "__main__": if __name__ == "__main__":
run_and_check(n_jobs=0, delete=False) #run_and_check(n_jobs=0, delete=False)
test_run_cluster_parallelism()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment