From 4f0bd4c09b224ab8ce1f7fe3f760add14be0ba5d Mon Sep 17 00:00:00 2001 From: VNMabus Date: Fri, 13 Oct 2023 20:55:16 +0200 Subject: [PATCH] Remove some warnings in tests. --- skfda/inference/anova/_anova_oneway.py | 8 ++- skfda/misc/metrics/_utils.py | 4 +- .../ml/classification/_logistic_regression.py | 4 +- skfda/ml/clustering/_hierarchical.py | 2 +- .../dim_reduction/variable_selection/_rkvs.py | 2 +- .../dim_reduction/variable_selection/mrmr.py | 2 +- .../recursive_maxima_hunting.py | 49 ++----------------- 7 files changed, 16 insertions(+), 55 deletions(-) diff --git a/skfda/inference/anova/_anova_oneway.py b/skfda/inference/anova/_anova_oneway.py index 9b4752c20..d95430b71 100644 --- a/skfda/inference/anova/_anova_oneway.py +++ b/skfda/inference/anova/_anova_oneway.py @@ -182,7 +182,13 @@ def v_asymptotic_stat( .. footbibliography:: """ - return float(_v_asymptotic_stat_with_reps(*fd, weights=weights, p=p)) + return float( + _v_asymptotic_stat_with_reps( + *fd, + weights=weights, + p=p, + ).reshape(()) + ) def _anova_bootstrap( diff --git a/skfda/misc/metrics/_utils.py b/skfda/misc/metrics/_utils.py index 9850057a0..30d1fae7e 100644 --- a/skfda/misc/metrics/_utils.py +++ b/skfda/misc/metrics/_utils.py @@ -120,8 +120,8 @@ class NormInducedMetric(Metric[VectorType]): >>> l2_distance = NormInducedMetric(l2_norm) >>> d = l2_distance(fd, fd2) - >>> float('%.3f'% d) - 0.289 + >>> float(d[0]) + 0.288... """ diff --git a/skfda/ml/classification/_logistic_regression.py b/skfda/ml/classification/_logistic_regression.py index ae95a1380..cd0fe6093 100644 --- a/skfda/ml/classification/_logistic_regression.py +++ b/skfda/ml/classification/_logistic_regression.py @@ -132,11 +132,9 @@ def fit( # noqa: D102, WPS210 (self.max_features, n_features), ) - penalty = 'none' if self.penalty is None else self.penalty - # multivariate logistic regression mvlr = mvLogisticRegression( - penalty=penalty, + penalty=self.penalty, C=self.C, solver=self.solver, max_iter=self.max_iter, diff --git a/skfda/ml/clustering/_hierarchical.py b/skfda/ml/clustering/_hierarchical.py index c8f63cf81..75e06e8f3 100644 --- a/skfda/ml/clustering/_hierarchical.py +++ b/skfda/ml/clustering/_hierarchical.py @@ -173,7 +173,7 @@ def _init_estimator(self) -> None: self._estimator = sklearn.cluster.AgglomerativeClustering( n_clusters=self.n_clusters, - affinity='precomputed', + metric='precomputed', memory=self.memory, connectivity=self.connectivity, compute_full_tree=self.compute_full_tree, diff --git a/skfda/preprocessing/dim_reduction/variable_selection/_rkvs.py b/skfda/preprocessing/dim_reduction/variable_selection/_rkvs.py index 4da5810b7..298a68fea 100644 --- a/skfda/preprocessing/dim_reduction/variable_selection/_rkvs.py +++ b/skfda/preprocessing/dim_reduction/variable_selection/_rkvs.py @@ -83,7 +83,7 @@ def _rkhs_vs( [indexes[j]], ]) - new_means = np.atleast_2d(means[new_selection]) + new_means = means[new_selection] lstsq_solution = linalg.lstsq( variances[new_selection[:, np.newaxis], new_selection], diff --git a/skfda/preprocessing/dim_reduction/variable_selection/mrmr.py b/skfda/preprocessing/dim_reduction/variable_selection/mrmr.py index 638d93bf3..036663ccf 100644 --- a/skfda/preprocessing/dim_reduction/variable_selection/mrmr.py +++ b/skfda/preprocessing/dim_reduction/variable_selection/mrmr.py @@ -160,7 +160,7 @@ def _mrmr( redundancies[last_selected, j] = redundancy_dependence_measure( X[:, last_selected, np.newaxis], X[:, j, np.newaxis], - ) + ).item() redundancies[j, last_selected] = redundancies[last_selected, j] W = np.mean( diff --git a/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py b/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py index ca7b377b8..432b65bb8 100644 --- a/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py +++ b/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py @@ -17,6 +17,7 @@ overload, ) +import dcor import numpy as np import numpy.linalg as linalg import numpy.ma as ma @@ -25,7 +26,6 @@ from sklearn.base import clone from typing_extensions import Literal -import dcor from skfda.exploratory.stats.covariance import ( CovarianceEstimator, EmpiricalCovariance, @@ -41,7 +41,6 @@ if TYPE_CHECKING: from ....misc.covariances import CovarianceLike - import GPy def _transform_to_2d(t: ArrayLike) -> NDArrayFloat: @@ -56,48 +55,6 @@ def _transform_to_2d(t: ArrayLike) -> NDArrayFloat: return t -class _PicklableKernel(): - """Class used to pickle GPy kernels.""" - - def __init__(self, kernel: GPy.kern.Kern) -> None: - super().__setattr__('_PicklableKernel__kernel', kernel) - - def __getattr__(self, name: str) -> Any: - if name != '__deepcopy__': - return getattr(self.__kernel, name) - - def __setattr__(self, name: str, value: Any) -> None: - setattr(self.__kernel, name, value) - - def __getstate__(self) -> Mapping[str, Any]: - return { - 'class': self.__kernel.__class__, - 'input_dim': self.__kernel.input_dim, - 'values': self.__kernel.param_array, - } - - def __setstate__(self, state: Mapping[str, Any]) -> None: - super().__setattr__('_PicklableKernel__kernel', state['class']( - input_dim=state['input_dim']), - ) - self.__kernel.param_array[...] = state['values'] - - def __call__(self, *args: Any, **kwargs: Any) -> NDArrayFloat: - return self.__kernel.K(*args, **kwargs) # type: ignore[no-any-return] - - -def make_kernel(k: CovarianceLike) -> CovarianceLike: - try: - import GPy - except ImportError: - return k - - if isinstance(k, GPy.kern.Kern): - return _PicklableKernel(k) - - return k - - def _absolute_argmax( function: FDataGrid, *, @@ -255,7 +212,7 @@ def __init__( super().__init__() self.mean = mean - self.cov = make_kernel(cov) + self.cov = cov def _evaluate_mean(self, t: NDArrayFloat) -> NDArrayFloat: @@ -625,7 +582,7 @@ def __call__( **kwargs: Any, ) -> bool: - score = float(dependences.data_matrix[0, selected_index, 0]) + score = float(dependences.data_matrix[(0,) + selected_index + (0,)]) return score < self.threshold