diff --git a/mhcflurry/class1_neural_network.py b/mhcflurry/class1_neural_network.py index c1f8e106820a47dcddd76ecae350191f64716121..80e58b883932653405461c49e406cf393224910c 100644 --- a/mhcflurry/class1_neural_network.py +++ b/mhcflurry/class1_neural_network.py @@ -719,6 +719,8 @@ class Class1NeuralNetwork(object): ]), } adjusted_inequalities_with_random_negatives = None + assert numpy.isnan(y_dict_with_random_negatives['output']).sum() == 0, ( + y_dict_with_random_negatives) if sample_weights is not None: sample_weights_with_random_negatives = numpy.concatenate([ numpy.ones(int(num_random_negative.sum())), diff --git a/mhcflurry/custom_loss.py b/mhcflurry/custom_loss.py index eabdd130a99dcbd828b58a9f2c327832bbf18eeb..fcb970bd89dadf3915260837a5a9e21da93dd40e 100644 --- a/mhcflurry/custom_loss.py +++ b/mhcflurry/custom_loss.py @@ -82,11 +82,11 @@ class MSEWithInequalities(Loss): def encode_y(y, inequalities=None): y = array(y, dtype="float32") if isnan(y).any(): - raise ValueError("y contains NaN: %s" % str(y)) + raise ValueError("y contains NaN", y) if (y > 1.0).any(): - raise ValueError("y contains values > 1.0") + raise ValueError("y contains values > 1.0", y) if (y < 0.0).any(): - raise ValueError("y contains values < 0.0") + raise ValueError("y contains values < 0.0", y) if inequalities is None: encoded = y @@ -141,11 +141,11 @@ class MSEWithInequalitiesAndMultipleOutputs(Loss): def encode_y(y, inequalities=None, output_indices=None): y = array(y, dtype="float32") if isnan(y).any(): - raise ValueError("y contains NaN: %s" % str(y)) + raise ValueError("y contains NaN", y) if (y > 1.0).any(): - raise ValueError("y contains values > 1.0") + raise ValueError("y contains values > 1.0", y) if (y < 0.0).any(): - raise ValueError("y contains values < 0.0") + raise ValueError("y contains values < 0.0", y) encoded = MSEWithInequalities.encode_y( y, inequalities=inequalities)