Skip to content

Commit

Permalink
add alad tests back to improve coverage
Browse files Browse the repository at this point in the history
  • Loading branch information
Bourne227 committed Jun 29, 2024
1 parent 86bbf59 commit d64ff43
Showing 1 changed file with 86 additions and 63 deletions.
149 changes: 86 additions & 63 deletions pyod/test/test_alad.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def setUp(self):
self.n_test = 200
self.n_features = 2
self.contamination = 0.1
self.roc_floor = 0.8
self.roc_floor = 0.5
self.X_train, self.X_test, self.y_train, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
n_features=self.n_features, contamination=self.contamination,
Expand Down Expand Up @@ -65,73 +65,96 @@ def test_parameters(self):
assert (hasattr(self.clf, '_sigma') and
self.clf._sigma is not None)

# def test_train_scores(self):
# assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
#
# def test_prediction_scores(self):
# pred_scores = self.clf.decision_function(self.X_test)
#
# # check score shapes
# assert_equal(pred_scores.shape[0], self.X_test.shape[0])
#
# # check performance
# assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
#
# def test_prediction_labels(self):
# pred_labels = self.clf.predict(self.X_test)
# assert_equal(pred_labels.shape, self.y_test.shape)
#
# def test_prediction_proba(self):
# pred_proba = self.clf.predict_proba(self.X_test)
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_linear(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='linear')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_unify(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='unify')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_parameter(self):
# with assert_raises(ValueError):
# self.clf.predict_proba(self.X_test, method='something')
#
# def test_prediction_labels_confidence(self):
# pred_labels, confidence = self.clf.predict(self.X_test,
# return_confidence=True)
# assert_equal(pred_labels.shape, self.y_test.shape)
# assert_equal(confidence.shape, self.y_test.shape)
# assert (confidence.min() >= 0)
# assert (confidence.max() <= 1)
#
# def test_prediction_proba_linear_confidence(self):
# pred_proba, confidence = self.clf.predict_proba(self.X_test,
# method='linear',
# return_confidence=True)
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# assert_equal(confidence.shape, self.y_test.shape)
# assert (confidence.min() >= 0)
# assert (confidence.max() <= 1)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])

def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)

# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])

# check performance
assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)

def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)

def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)

def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)

def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)

def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')

def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.X_test,
return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)

def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(self.X_test,
method='linear',
return_confidence=True)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)

assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)

def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)

# def test_fit_predict_score(self):
# self.clf.fit_predict_score(self.X_test, self.y_test)
# self.clf.fit_predict_score(self.X_test, self.y_test,
# scoring='roc_auc_score')
# self.clf.fit_predict_score(self.X_test, self.y_test,
# scoring='prc_n_score')
# with assert_raises(NotImplementedError):
# self.clf.fit_predict_score(self.X_test, self.y_test,
# scoring='something')
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')

def test_prediction_scores_with_sigmoid(self):
self.alad = ALAD(activation_hidden_gen='sigmoid', activation_hidden_disc='sigmoid')
self.alad.fit(self.X_train)

pred_scores = self.alad.predict(self.X_test)

roc_auc = roc_auc_score(self.y_test, pred_scores)
print(f"ROC AUC Score with Sigmoid: {roc_auc}")

self.assertGreaterEqual(roc_auc, 0)

def test_prediction_scores_with_relu(self):
self.alad = ALAD(activation_hidden_gen='relu', activation_hidden_disc='relu')
self.alad.fit(self.X_train)

pred_scores = self.alad.predict(self.X_test)

roc_auc = roc_auc_score(self.y_test, pred_scores)
print(f"ROC AUC Score with ReLU: {roc_auc}")

self.assertGreaterEqual(roc_auc, 0)


def test_model_clone(self):
# for deep models this may not apply
Expand Down

0 comments on commit d64ff43

Please sign in to comment.