From be996d8a3c4aff03b3ba494430a9a2d9390c985f Mon Sep 17 00:00:00 2001 From: phawkins Date: Tue, 6 Aug 2024 15:34:57 -0700 Subject: [PATCH] [numpy] Fix users of NumPy APIs that are removed in NumPy 2.0. This change migrates users of APIs removed in NumPy 2.0 to their recommended replacements (https://numpy.org/doc/stable/numpy_2_0_migration_guide.html). This change replaces uses of `np.math`, which is a deprecated alias for the builtin `math` module, with the builtin `math` module. `np.math` is removed in NumPy 2.0. PiperOrigin-RevId: 660122558 --- .../python/distributions/gumbel_test.py | 21 +++++++++++------- .../python/distributions/weibull_test.py | 22 +++++++++++++------ 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/tensorflow_probability/python/distributions/gumbel_test.py b/tensorflow_probability/python/distributions/gumbel_test.py index b140a1549e..f1482664ba 100644 --- a/tensorflow_probability/python/distributions/gumbel_test.py +++ b/tensorflow_probability/python/distributions/gumbel_test.py @@ -14,15 +14,16 @@ # ============================================================================ """Tests for Gumbel.""" +import math + # Dependency imports + import numpy as np from scipy import stats - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions import gumbel from tensorflow_probability.python.distributions import kullback_leibler - from tensorflow_probability.python.internal import test_util @@ -258,12 +259,16 @@ def testGumbelGumbelKL(self): a = gumbel.Gumbel(loc=a_loc, scale=a_scale, validate_args=True) b = gumbel.Gumbel(loc=b_loc, scale=b_scale, validate_args=True) - true_kl = (np.log(b_scale) - np.log(a_scale) - + np.euler_gamma * (a_scale / b_scale - 1.) - + np.expm1((b_loc - a_loc) / b_scale - + np.vectorize(np.math.lgamma)(a_scale / b_scale - + 1.)) - + (a_loc - b_loc) / b_scale) + true_kl = ( + np.log(b_scale) + - np.log(a_scale) + + np.euler_gamma * (a_scale / b_scale - 1.0) + + np.expm1( + (b_loc - a_loc) / b_scale + + np.vectorize(math.lgamma)(a_scale / b_scale + 1.0) + ) + + (a_loc - b_loc) / b_scale + ) kl = kullback_leibler.kl_divergence(a, b) diff --git a/tensorflow_probability/python/distributions/weibull_test.py b/tensorflow_probability/python/distributions/weibull_test.py index 1391420764..3559639c49 100644 --- a/tensorflow_probability/python/distributions/weibull_test.py +++ b/tensorflow_probability/python/distributions/weibull_test.py @@ -13,10 +13,12 @@ # limitations under the License. # ============================================================================ """Tests for Weibull distribution.""" +import math + # Dependency imports + import numpy as np from scipy import stats - import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions import gamma from tensorflow_probability.python.distributions import kullback_leibler @@ -274,12 +276,18 @@ def testWeibullWeibullKL(self): kl = kullback_leibler.kl_divergence(a, b) expected_kl = ( - np.log(a_concentration / a_scale**a_concentration) - - np.log(b_concentration / b_scale**b_concentration) + - ((a_concentration - b_concentration) * - (np.log(a_scale) - np.euler_gamma / a_concentration)) + - ((a_scale / b_scale)**b_concentration * - np.exp(np.math.lgamma(b_concentration / a_concentration + 1.))) - 1.) + np.log(a_concentration / a_scale**a_concentration) + - np.log(b_concentration / b_scale**b_concentration) + + ( + (a_concentration - b_concentration) + * (np.log(a_scale) - np.euler_gamma / a_concentration) + ) + + ( + (a_scale / b_scale) ** b_concentration + * np.exp(math.lgamma(b_concentration / a_concentration + 1.0)) + ) + - 1.0 + ) x = a.sample(int(1e5), seed=test_util.test_seed()) kl_samples = a.log_prob(x) - b.log_prob(x)