We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
so we're not completely bombarded, here's results for the first few files of ethicml.metrics
ethicml.metrics
---- ethicml/metrics/accuracy.py (6) ---- [3/1889] # mutant 6500 --- ethicml/metrics/accuracy.py +++ ethicml/metrics/accuracy.py @@ -21,7 +21,7 @@ name: str, pos_class: Optional[int] = None, ): - if pos_class is not None: + if pos_class is None: super().__init__(pos_class=pos_class) else: super().__init__() # mutant 6501 --- ethicml/metrics/accuracy.py +++ ethicml/metrics/accuracy.py @@ -25,7 +25,7 @@ super().__init__(pos_class=pos_class) else: super().__init__() - self._metric = sklearn_metric + self._metric = None self._name = name @implements(Metric) # mutant 6502 --- ethicml/metrics/accuracy.py +++ ethicml/metrics/accuracy.py @@ -26,7 +26,7 @@ else: super().__init__() self._metric = sklearn_metric - self._name = name + self._name = None @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6503 --- ethicml/metrics/accuracy.py +++ ethicml/metrics/accuracy.py @@ -28,7 +28,6 @@ self._metric = sklearn_metric self._name = name - @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: return self._metric(actual.y, prediction.hard) # mutant 6504 --- ethicml/metrics/accuracy.py +++ ethicml/metrics/accuracy.py @@ -37,7 +37,7 @@ """Classification accuracy.""" def __init__(self, pos_class: Optional[int] = None): - super().__init__(accuracy_score, "Accuracy", pos_class=pos_class) + super().__init__(accuracy_score, "XXAccuracyXX", pos_class=pos_class) class F1(SklearnMetric): # mutant 6505 --- ethicml/metrics/accuracy.py +++ ethicml/metrics/accuracy.py @@ -44,5 +44,5 @@ """F1 score: harmonic mean of precision and recall.""" def __init__(self, pos_class: Optional[int] = None): - super().__init__(f1_score, "F1", pos_class=pos_class) + super().__init__(f1_score, "XXF1XX", pos_class=pos_class)
---- ethicml/metrics/anti_spur.py (9) ---- # mutant 6506 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -14,7 +14,7 @@ Computes :math:`P(\hat{y}=y|y\neq s)`. """ - _name: str = "anti_spurious" + _name: str = "XXanti_spuriousXX" @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6507 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -14,7 +14,7 @@ Computes :math:`P(\hat{y}=y|y\neq s)`. """ - _name: str = "anti_spurious" + _name: str = None @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6508 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -16,7 +16,6 @@ _name: str = "anti_spurious" - @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: preds = prediction.hard.to_numpy()[:, np.newaxis] sens = actual.s.to_numpy() # mutant 6509 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -18,7 +18,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: - preds = prediction.hard.to_numpy()[:, np.newaxis] + preds = None sens = actual.s.to_numpy() labels = actual.y.to_numpy() s_uneq_y = sens != labels # mutant 6510 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -19,7 +19,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: preds = prediction.hard.to_numpy()[:, np.newaxis] - sens = actual.s.to_numpy() + sens = None labels = actual.y.to_numpy() s_uneq_y = sens != labels return (preds[s_uneq_y] == labels[s_uneq_y]).mean() # mutant 6511 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -20,7 +20,7 @@ def score(self, prediction: Prediction, actual: DataTuple) -> float: preds = prediction.hard.to_numpy()[:, np.newaxis] sens = actual.s.to_numpy() - labels = actual.y.to_numpy() + labels = None s_uneq_y = sens != labels return (preds[s_uneq_y] == labels[s_uneq_y]).mean() # mutant 6512 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -21,6 +21,6 @@ preds = prediction.hard.to_numpy()[:, np.newaxis] sens = actual.s.to_numpy() labels = actual.y.to_numpy() - s_uneq_y = sens != labels + s_uneq_y = sens == labels return (preds[s_uneq_y] == labels[s_uneq_y]).mean() # mutant 6513 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -21,6 +21,6 @@ preds = prediction.hard.to_numpy()[:, np.newaxis] sens = actual.s.to_numpy() labels = actual.y.to_numpy() - s_uneq_y = sens != labels + s_uneq_y = None return (preds[s_uneq_y] == labels[s_uneq_y]).mean() # mutant 6514 --- ethicml/metrics/anti_spur.py +++ ethicml/metrics/anti_spur.py @@ -22,5 +22,5 @@ sens = actual.s.to_numpy() labels = actual.y.to_numpy() s_uneq_y = sens != labels - return (preds[s_uneq_y] == labels[s_uneq_y]).mean() + return (preds[s_uneq_y] != labels[s_uneq_y]).mean()
---- ethicml/metrics/balanced_accuracy.py (13) ---- # mutant 6169 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -10,7 +10,7 @@ class BalancedAccuracy(Metric): """Accuracy that is balanced with respect to the class labels.""" - _name: str = "Balanced Accuracy" + _name: str = "XXBalanced AccuracyXX" @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6170 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -10,7 +10,7 @@ class BalancedAccuracy(Metric): """Accuracy that is balanced with respect to the class labels.""" - _name: str = "Balanced Accuracy" + _name: str = None @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6171 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -12,7 +12,6 @@ _name: str = "Balanced Accuracy" - @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) tpr = t_pos / (t_pos + f_neg) # mutant 6172 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -14,7 +14,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: - t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) + t_neg, f_pos, f_neg, t_pos = None tpr = t_pos / (t_pos + f_neg) tnr = t_neg / (t_neg + f_pos) return 0.5 * (tpr + tnr) # mutant 6173 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -15,7 +15,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) - tpr = t_pos / (t_pos + f_neg) + tpr = t_pos * (t_pos + f_neg) tnr = t_neg / (t_neg + f_pos) return 0.5 * (tpr + tnr) # mutant 6174 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -15,7 +15,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) - tpr = t_pos / (t_pos + f_neg) + tpr = t_pos / (t_pos - f_neg) tnr = t_neg / (t_neg + f_pos) return 0.5 * (tpr + tnr) # mutant 6175 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -15,7 +15,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) - tpr = t_pos / (t_pos + f_neg) + tpr = None tnr = t_neg / (t_neg + f_pos) return 0.5 * (tpr + tnr) # mutant 6176 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -16,6 +16,6 @@ def score(self, prediction: Prediction, actual: DataTuple) -> float: t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) tpr = t_pos / (t_pos + f_neg) - tnr = t_neg / (t_neg + f_pos) + tnr = t_neg * (t_neg + f_pos) return 0.5 * (tpr + tnr) # mutant 6177 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -16,6 +16,6 @@ def score(self, prediction: Prediction, actual: DataTuple) -> float: t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) tpr = t_pos / (t_pos + f_neg) - tnr = t_neg / (t_neg + f_pos) + tnr = t_neg / (t_neg - f_pos) return 0.5 * (tpr + tnr) # mutant 6178 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -16,6 +16,6 @@ def score(self, prediction: Prediction, actual: DataTuple) -> float: t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) tpr = t_pos / (t_pos + f_neg) - tnr = t_neg / (t_neg + f_pos) + tnr = None return 0.5 * (tpr + tnr) # mutant 6179 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -17,5 +17,5 @@ t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) tpr = t_pos / (t_pos + f_neg) tnr = t_neg / (t_neg + f_pos) - return 0.5 * (tpr + tnr) + return 1.5 * (tpr + tnr) # mutant 6180 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -17,5 +17,5 @@ t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) tpr = t_pos / (t_pos + f_neg) tnr = t_neg / (t_neg + f_pos) - return 0.5 * (tpr + tnr) + return 0.5 / (tpr + tnr) # mutant 6181 --- ethicml/metrics/balanced_accuracy.py +++ ethicml/metrics/balanced_accuracy.py @@ -17,5 +17,5 @@ t_neg, f_pos, f_neg, t_pos = confusion_matrix(prediction, actual, self.positive_class) tpr = t_pos / (t_pos + f_neg) tnr = t_neg / (t_neg + f_pos) - return 0.5 * (tpr + tnr) + return 0.5 * (tpr - tnr)
---- ethicml/metrics/bcr.py (10) ---- # mutant 6182 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -11,7 +11,7 @@ class BCR(Metric): """Balanced Classification Rate.""" - _name: str = "BCR" + _name: str = "XXBCRXX" @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6183 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -11,7 +11,7 @@ class BCR(Metric): """Balanced Classification Rate.""" - _name: str = "BCR" + _name: str = None @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6184 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -13,7 +13,6 @@ _name: str = "BCR" - @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: tpr_metric = TPR() tpr = tpr_metric.score(prediction, actual) # mutant 6185 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -15,7 +15,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: - tpr_metric = TPR() + tpr_metric = None tpr = tpr_metric.score(prediction, actual) tnr_metric = TNR() # mutant 6186 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -16,7 +16,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: tpr_metric = TPR() - tpr = tpr_metric.score(prediction, actual) + tpr = None tnr_metric = TNR() tnr = tnr_metric.score(prediction, actual) # mutant 6187 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -18,7 +18,7 @@ tpr_metric = TPR() tpr = tpr_metric.score(prediction, actual) - tnr_metric = TNR() + tnr_metric = None tnr = tnr_metric.score(prediction, actual) return (tpr + tnr) / 2 # mutant 6188 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -19,7 +19,7 @@ tpr = tpr_metric.score(prediction, actual) tnr_metric = TNR() - tnr = tnr_metric.score(prediction, actual) + tnr = None return (tpr + tnr) / 2 # mutant 6189 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -21,5 +21,5 @@ tnr_metric = TNR() tnr = tnr_metric.score(prediction, actual) - return (tpr + tnr) / 2 + return (tpr - tnr) / 2 # mutant 6190 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -21,5 +21,5 @@ tnr_metric = TNR() tnr = tnr_metric.score(prediction, actual) - return (tpr + tnr) / 2 + return (tpr + tnr) * 2 # mutant 6191 --- ethicml/metrics/bcr.py +++ ethicml/metrics/bcr.py @@ -21,5 +21,5 @@ tnr_metric = TNR() tnr = tnr_metric.score(prediction, actual) - return (tpr + tnr) / 2 + return (tpr + tnr) / 3
---- ethicml/metrics/confusion_matrix.py (23) ---- # mutant 6347 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -16,7 +16,7 @@ prediction: Prediction, actual: DataTuple, pos_cls: int ) -> Tuple[int, int, int, int]: """Apply sci-kit learn's confusion matrix.""" - actual_y: np.ndarray[np.int32] = actual.y.to_numpy(dtype=np.int32) + actual_y: np.ndarray[np.int32] = None labels: np.ndarray[np.int32] = np.unique(actual_y) if labels.size == 1: labels = np.array([0, 1], dtype=np.int32) # mutant 6348 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -17,7 +17,7 @@ ) -> Tuple[int, int, int, int]: """Apply sci-kit learn's confusion matrix.""" actual_y: np.ndarray[np.int32] = actual.y.to_numpy(dtype=np.int32) - labels: np.ndarray[np.int32] = np.unique(actual_y) + labels: np.ndarray[np.int32] = None if labels.size == 1: labels = np.array([0, 1], dtype=np.int32) conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) # mutant 6349 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -18,7 +18,7 @@ """Apply sci-kit learn's confusion matrix.""" actual_y: np.ndarray[np.int32] = actual.y.to_numpy(dtype=np.int32) labels: np.ndarray[np.int32] = np.unique(actual_y) - if labels.size == 1: + if labels.size != 1: labels = np.array([0, 1], dtype=np.int32) conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) # mutant 6350 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -18,7 +18,7 @@ """Apply sci-kit learn's confusion matrix.""" actual_y: np.ndarray[np.int32] = actual.y.to_numpy(dtype=np.int32) labels: np.ndarray[np.int32] = np.unique(actual_y) - if labels.size == 1: + if labels.size == 2: labels = np.array([0, 1], dtype=np.int32) conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) # mutant 6351 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -19,7 +19,7 @@ actual_y: np.ndarray[np.int32] = actual.y.to_numpy(dtype=np.int32) labels: np.ndarray[np.int32] = np.unique(actual_y) if labels.size == 1: - labels = np.array([0, 1], dtype=np.int32) + labels = np.array([1, 1], dtype=np.int32) conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) if pos_cls not in labels: # mutant 6352 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -19,7 +19,7 @@ actual_y: np.ndarray[np.int32] = actual.y.to_numpy(dtype=np.int32) labels: np.ndarray[np.int32] = np.unique(actual_y) if labels.size == 1: - labels = np.array([0, 1], dtype=np.int32) + labels = np.array([0, 2], dtype=np.int32) conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) if pos_cls not in labels: # mutant 6353 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -19,7 +19,7 @@ actual_y: np.ndarray[np.int32] = actual.y.to_numpy(dtype=np.int32) labels: np.ndarray[np.int32] = np.unique(actual_y) if labels.size == 1: - labels = np.array([0, 1], dtype=np.int32) + labels = None conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) if pos_cls not in labels: # mutant 6354 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -20,7 +20,7 @@ labels: np.ndarray[np.int32] = np.unique(actual_y) if labels.size == 1: labels = np.array([0, 1], dtype=np.int32) - conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) + conf_matr: np.ndarray = None if pos_cls not in labels: raise LabelOutOfBounds("Positive class specified must exist in the test set") # mutant 6355 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -22,7 +22,7 @@ labels = np.array([0, 1], dtype=np.int32) conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) - if pos_cls not in labels: + if pos_cls in labels: raise LabelOutOfBounds("Positive class specified must exist in the test set") tp_idx: np.int64 = (labels == pos_cls).nonzero()[0].item() # mutant 6356 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -23,7 +23,7 @@ conf_matr: np.ndarray = conf_mtx(y_true=actual_y, y_pred=prediction.hard, labels=labels) if pos_cls not in labels: - raise LabelOutOfBounds("Positive class specified must exist in the test set") + raise LabelOutOfBounds("XXPositive class specified must exist in the test setXX") tp_idx: np.int64 = (labels == pos_cls).nonzero()[0].item() tp_idx = int(tp_idx) # mutant 6357 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -25,7 +25,7 @@ if pos_cls not in labels: raise LabelOutOfBounds("Positive class specified must exist in the test set") - tp_idx: np.int64 = (labels == pos_cls).nonzero()[0].item() + tp_idx: np.int64 = (labels != pos_cls).nonzero()[0].item() tp_idx = int(tp_idx) true_pos = conf_matr[tp_idx, tp_idx] # mutant 6358 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -25,7 +25,7 @@ if pos_cls not in labels: raise LabelOutOfBounds("Positive class specified must exist in the test set") - tp_idx: np.int64 = (labels == pos_cls).nonzero()[0].item() + tp_idx: np.int64 = (labels == pos_cls).nonzero()[1].item() tp_idx = int(tp_idx) true_pos = conf_matr[tp_idx, tp_idx] # mutant 6359 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -25,7 +25,7 @@ if pos_cls not in labels: raise LabelOutOfBounds("Positive class specified must exist in the test set") - tp_idx: np.int64 = (labels == pos_cls).nonzero()[0].item() + tp_idx: np.int64 = None tp_idx = int(tp_idx) true_pos = conf_matr[tp_idx, tp_idx] # mutant 6360 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -26,7 +26,7 @@ raise LabelOutOfBounds("Positive class specified must exist in the test set") tp_idx: np.int64 = (labels == pos_cls).nonzero()[0].item() - tp_idx = int(tp_idx) + tp_idx = None true_pos = conf_matr[tp_idx, tp_idx] false_pos = conf_matr[:, tp_idx].sum() - true_pos # mutant 6361 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -28,7 +28,7 @@ tp_idx: np.int64 = (labels == pos_cls).nonzero()[0].item() tp_idx = int(tp_idx) - true_pos = conf_matr[tp_idx, tp_idx] + true_pos = None false_pos = conf_matr[:, tp_idx].sum() - true_pos false_neg = conf_matr[tp_idx, :].sum() - true_pos true_neg = conf_matr.sum() - true_pos - false_pos - false_neg # mutant 6362 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -29,7 +29,7 @@ tp_idx = int(tp_idx) true_pos = conf_matr[tp_idx, tp_idx] - false_pos = conf_matr[:, tp_idx].sum() - true_pos + false_pos = conf_matr[:, tp_idx].sum() + true_pos false_neg = conf_matr[tp_idx, :].sum() - true_pos true_neg = conf_matr.sum() - true_pos - false_pos - false_neg return true_neg, false_pos, false_neg, true_pos # mutant 6363 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -29,7 +29,7 @@ tp_idx = int(tp_idx) true_pos = conf_matr[tp_idx, tp_idx] - false_pos = conf_matr[:, tp_idx].sum() - true_pos + false_pos = None false_neg = conf_matr[tp_idx, :].sum() - true_pos true_neg = conf_matr.sum() - true_pos - false_pos - false_neg return true_neg, false_pos, false_neg, true_pos # mutant 6364 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -30,7 +30,7 @@ true_pos = conf_matr[tp_idx, tp_idx] false_pos = conf_matr[:, tp_idx].sum() - true_pos - false_neg = conf_matr[tp_idx, :].sum() - true_pos + false_neg = conf_matr[tp_idx, :].sum() + true_pos true_neg = conf_matr.sum() - true_pos - false_pos - false_neg return true_neg, false_pos, false_neg, true_pos # mutant 6365 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -30,7 +30,7 @@ true_pos = conf_matr[tp_idx, tp_idx] false_pos = conf_matr[:, tp_idx].sum() - true_pos - false_neg = conf_matr[tp_idx, :].sum() - true_pos + false_neg = None true_neg = conf_matr.sum() - true_pos - false_pos - false_neg return true_neg, false_pos, false_neg, true_pos # mutant 6366 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -31,6 +31,6 @@ true_pos = conf_matr[tp_idx, tp_idx] false_pos = conf_matr[:, tp_idx].sum() - true_pos false_neg = conf_matr[tp_idx, :].sum() - true_pos - true_neg = conf_matr.sum() - true_pos - false_pos - false_neg + true_neg = conf_matr.sum() + true_pos - false_pos - false_neg return true_neg, false_pos, false_neg, true_pos # mutant 6367 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -31,6 +31,6 @@ true_pos = conf_matr[tp_idx, tp_idx] false_pos = conf_matr[:, tp_idx].sum() - true_pos false_neg = conf_matr[tp_idx, :].sum() - true_pos - true_neg = conf_matr.sum() - true_pos - false_pos - false_neg + true_neg = conf_matr.sum() - true_pos + false_pos - false_neg return true_neg, false_pos, false_neg, true_pos # mutant 6368 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -31,6 +31,6 @@ true_pos = conf_matr[tp_idx, tp_idx] false_pos = conf_matr[:, tp_idx].sum() - true_pos false_neg = conf_matr[tp_idx, :].sum() - true_pos - true_neg = conf_matr.sum() - true_pos - false_pos - false_neg + true_neg = conf_matr.sum() - true_pos - false_pos + false_neg return true_neg, false_pos, false_neg, true_pos # mutant 6369 --- ethicml/metrics/confusion_matrix.py +++ ethicml/metrics/confusion_matrix.py @@ -31,6 +31,6 @@ true_pos = conf_matr[tp_idx, tp_idx] false_pos = conf_matr[:, tp_idx].sum() - true_pos false_neg = conf_matr[tp_idx, :].sum() - true_pos - true_neg = conf_matr.sum() - true_pos - false_pos - false_neg + true_neg = None return true_neg, false_pos, false_neg, true_pos
cv.py
---- ethicml/metrics/cv.py (18) ---- # mutant 6376 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -10,7 +10,7 @@ class CV(Metric): """Calder-Verwer.""" - _name: str = "CV" + _name: str = "XXCVXX" @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6377 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -10,7 +10,7 @@ class CV(Metric): """Calder-Verwer.""" - _name: str = "CV" + _name: str = None @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6378 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -12,7 +12,6 @@ _name: str = "CV" - @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # has to be imported on demand because otherwise we get circular imports from ethicml.evaluators.per_sensitive_attribute import ( # mutant 6379 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -20,7 +20,7 @@ diff_per_sensitive_attribute, ) - per_sens = metric_per_sensitive_attribute(prediction, actual, ProbPos()) + per_sens = None diffs = diff_per_sensitive_attribute(per_sens) return 1 - list(diffs.values())[0] # mutant 6380 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -21,7 +21,7 @@ ) per_sens = metric_per_sensitive_attribute(prediction, actual, ProbPos()) - diffs = diff_per_sensitive_attribute(per_sens) + diffs = None return 1 - list(diffs.values())[0] # mutant 6381 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -23,7 +23,7 @@ per_sens = metric_per_sensitive_attribute(prediction, actual, ProbPos()) diffs = diff_per_sensitive_attribute(per_sens) - return 1 - list(diffs.values())[0] + return 2 - list(diffs.values())[0] @property def apply_per_sensitive(self) -> bool: # mutant 6382 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -23,7 +23,7 @@ per_sens = metric_per_sensitive_attribute(prediction, actual, ProbPos()) diffs = diff_per_sensitive_attribute(per_sens) - return 1 - list(diffs.values())[0] + return 1 + list(diffs.values())[0] @property def apply_per_sensitive(self) -> bool: # mutant 6383 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -23,7 +23,7 @@ per_sens = metric_per_sensitive_attribute(prediction, actual, ProbPos()) diffs = diff_per_sensitive_attribute(per_sens) - return 1 - list(diffs.values())[0] + return 1 - list(diffs.values())[1] @property def apply_per_sensitive(self) -> bool: # mutant 6384 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -25,7 +25,6 @@ return 1 - list(diffs.values())[0] - @property def apply_per_sensitive(self) -> bool: """Can this metric be applied per sensitive attribute group?""" return False # mutant 6385 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -28,7 +28,7 @@ @property def apply_per_sensitive(self) -> bool: """Can this metric be applied per sensitive attribute group?""" - return False + return True class AbsCV(CV): # mutant 6386 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -37,7 +37,7 @@ This metric is supposed to make it easier to compare results. """ - _name: str = "CV absolute" + _name: str = "XXCV absoluteXX" @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6387 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -37,7 +37,7 @@ This metric is supposed to make it easier to compare results. """ - _name: str = "CV absolute" + _name: str = None @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: # mutant 6388 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -39,7 +39,6 @@ _name: str = "CV absolute" - @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: cv_score = super().score(prediction, actual) # the following is equivalent to 1 - abs(diff) # mutant 6389 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -41,7 +41,7 @@ @implements(Metric) def score(self, prediction: Prediction, actual: DataTuple) -> float: - cv_score = super().score(prediction, actual) + cv_score = None # the following is equivalent to 1 - abs(diff) if cv_score > 1: return 2 - cv_score # mutant 6390 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -43,7 +43,7 @@ def score(self, prediction: Prediction, actual: DataTuple) -> float: cv_score = super().score(prediction, actual) # the following is equivalent to 1 - abs(diff) - if cv_score > 1: + if cv_score >= 1: return 2 - cv_score return cv_score # mutant 6391 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -43,7 +43,7 @@ def score(self, prediction: Prediction, actual: DataTuple) -> float: cv_score = super().score(prediction, actual) # the following is equivalent to 1 - abs(diff) - if cv_score > 1: + if cv_score > 2: return 2 - cv_score return cv_score # mutant 6392 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -44,6 +44,6 @@ cv_score = super().score(prediction, actual) # the following is equivalent to 1 - abs(diff) if cv_score > 1: - return 2 - cv_score + return 3 - cv_score return cv_score # mutant 6393 --- ethicml/metrics/cv.py +++ ethicml/metrics/cv.py @@ -44,6 +44,6 @@ cv_score = super().score(prediction, actual) # the following is equivalent to 1 - abs(diff) if cv_score > 1: - return 2 - cv_score + return 2 + cv_score return cv_score
The text was updated successfully, but these errors were encountered:
No branches or pull requests
so we're not completely bombarded, here's results for the first few files of
ethicml.metrics
accuracy.py
anti_spur.py
balanced_accuracy.py
bcr.py
confusion_matrix.py
cv.py
The text was updated successfully, but these errors were encountered: