Skip to content

Commit

Permalink
[numpy] Fix users of NumPy APIs that are removed in NumPy 2.0.
Browse files Browse the repository at this point in the history
This change migrates users of APIs removed in NumPy 2.0 to their recommended replacements (https://numpy.org/doc/stable/numpy_2_0_migration_guide.html).

This change replaces uses of `np.math`, which is a deprecated alias for the builtin `math` module, with the builtin `math` module. `np.math` is removed in NumPy 2.0.

PiperOrigin-RevId: 660122558
  • Loading branch information
hawkinsp authored and tensorflower-gardener committed Aug 6, 2024
1 parent 68f4dc0 commit be996d8
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 15 deletions.
21 changes: 13 additions & 8 deletions tensorflow_probability/python/distributions/gumbel_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,16 @@
# ============================================================================
"""Tests for Gumbel."""

import math

# Dependency imports

import numpy as np
from scipy import stats

import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import gumbel
from tensorflow_probability.python.distributions import kullback_leibler

from tensorflow_probability.python.internal import test_util


Expand Down Expand Up @@ -258,12 +259,16 @@ def testGumbelGumbelKL(self):
a = gumbel.Gumbel(loc=a_loc, scale=a_scale, validate_args=True)
b = gumbel.Gumbel(loc=b_loc, scale=b_scale, validate_args=True)

true_kl = (np.log(b_scale) - np.log(a_scale)
+ np.euler_gamma * (a_scale / b_scale - 1.)
+ np.expm1((b_loc - a_loc) / b_scale
+ np.vectorize(np.math.lgamma)(a_scale / b_scale
+ 1.))
+ (a_loc - b_loc) / b_scale)
true_kl = (
np.log(b_scale)
- np.log(a_scale)
+ np.euler_gamma * (a_scale / b_scale - 1.0)
+ np.expm1(
(b_loc - a_loc) / b_scale
+ np.vectorize(math.lgamma)(a_scale / b_scale + 1.0)
)
+ (a_loc - b_loc) / b_scale
)

kl = kullback_leibler.kl_divergence(a, b)

Expand Down
22 changes: 15 additions & 7 deletions tensorflow_probability/python/distributions/weibull_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@
# limitations under the License.
# ============================================================================
"""Tests for Weibull distribution."""
import math

# Dependency imports

import numpy as np
from scipy import stats

import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import gamma
from tensorflow_probability.python.distributions import kullback_leibler
Expand Down Expand Up @@ -274,12 +276,18 @@ def testWeibullWeibullKL(self):

kl = kullback_leibler.kl_divergence(a, b)
expected_kl = (
np.log(a_concentration / a_scale**a_concentration) -
np.log(b_concentration / b_scale**b_concentration) +
((a_concentration - b_concentration) *
(np.log(a_scale) - np.euler_gamma / a_concentration)) +
((a_scale / b_scale)**b_concentration *
np.exp(np.math.lgamma(b_concentration / a_concentration + 1.))) - 1.)
np.log(a_concentration / a_scale**a_concentration)
- np.log(b_concentration / b_scale**b_concentration)
+ (
(a_concentration - b_concentration)
* (np.log(a_scale) - np.euler_gamma / a_concentration)
)
+ (
(a_scale / b_scale) ** b_concentration
* np.exp(math.lgamma(b_concentration / a_concentration + 1.0))
)
- 1.0
)

x = a.sample(int(1e5), seed=test_util.test_seed())
kl_samples = a.log_prob(x) - b.log_prob(x)
Expand Down

0 comments on commit be996d8

Please sign in to comment.