Skip to content

Commit

Permalink
fix code_style
Browse files Browse the repository at this point in the history
  • Loading branch information
a162837 committed Nov 3, 2024
1 parent 930ecc0 commit ae29dcc
Show file tree
Hide file tree
Showing 3 changed files with 175 additions and 92 deletions.
16 changes: 12 additions & 4 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -3815,10 +3815,14 @@ def clip(
max_n = check_clip_tensor(x, max, max_, value_dtype, 'max')

min_n = (
paddle.broadcast_to(min_n, x.shape) if min_n.shape != x.shape else min_n
paddle.broadcast_to(min_n, x.shape)
if min_n.shape != x.shape
else min_n
)
max_n = (
paddle.broadcast_to(max_n, x.shape) if max_n.shape != x.shape else max_n
paddle.broadcast_to(max_n, x.shape)
if max_n.shape != x.shape
else max_n
)

output_min = paddle.where(x < min_n, min_n, x)
Expand Down Expand Up @@ -3926,10 +3930,14 @@ def clip_(
min = check_clip_tensor(x, min, fmin, x.dtype, 'min')

max_expand = (
paddle.broadcast_to(max, x.shape) if max.shape != x.shape else max
paddle.broadcast_to(max, x.shape)
if max.shape != x.shape
else max
)
min_expand = (
paddle.broadcast_to(min, x.shape) if min.shape != x.shape else min
paddle.broadcast_to(min, x.shape)
if min.shape != x.shape
else min
)

paddle.where_(x > min_expand, x, min_expand)
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_clip_op.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down
249 changes: 162 additions & 87 deletions test/legacy_test/test_clip_tensor_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,93 +21,145 @@


class TestClipTensor(unittest.TestCase):
# def test_static_clip(self):
# data_shape = [1, 2, 3, 4]
# self.place = (
# base.CUDAPlace(0)
# if base.core.is_compiled_with_cuda()
# else base.CPUPlace()
# )
# data = np.random.random(data_shape).astype('float32')
# min_data = np.random.random(data_shape[-2:]).astype('float32')
# max_data = np.random.random(data_shape[-3:]).astype('float32')
# paddle.enable_static()
# with paddle.static.program_guard(paddle.static.Program()):
# x = paddle.static.data(name='x', shape=data_shape, dtype='float32')
# min = paddle.static.data(
# name='min', shape=data_shape[-2:], dtype='float32'
# )
# max = paddle.static.data(
# name='max', shape=data_shape[-3:], dtype='float32'
# )
# out = paddle.clip(x, min, max)
# exe = base.Executor(self.place)
# res = exe.run(
# feed={
# "x": data,
# 'min': min_data,
# 'max': max_data,
# },
# fetch_list=[out],
# )
# res_np = np.clip(data, min_data, max_data)
# np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
# paddle.disable_static()

# data_shape = [1, 2, 3, 4]
# self.place = (
# base.CUDAPlace(0)
# if base.core.is_compiled_with_cuda()
# else base.CPUPlace()
# )
# data = np.random.random(data_shape).astype('float32')
# min_data = np.random.random(data_shape[-2:]).astype('float32')
# max_data = np.random.random(data_shape[-3:]).astype('float32')
# paddle.enable_static()
# with paddle.static.program_guard(paddle.static.Program()):
# x = paddle.static.data(name='x', shape=data_shape, dtype='float32')
# min = paddle.static.data(
# name='min', shape=data_shape[-2:], dtype='float32'
# )
# max = 5.0
# out = paddle.clip(x, min, max)
# exe = base.Executor(self.place)
# res = exe.run(
# feed={
# "x": data,
# 'min': min_data,
# 'max': 5.0,
# },
# fetch_list=[out],
# )
# res_np = np.clip(data, min_data, 5.0)
# np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
# paddle.disable_static()

# data_shape = [1, 2, 3, 4]
# self.place = (
# base.CUDAPlace(0)
# if base.core.is_compiled_with_cuda()
# else base.CPUPlace()
# )
# data = np.random.random(data_shape).astype('float32')
# min_data = np.random.random(data_shape[-2:]).astype('float32')
# max_data = float(np.finfo(np.float32).max)
# paddle.enable_static()
# with paddle.static.program_guard(paddle.static.Program()):
# x = paddle.static.data(name='x', shape=data_shape, dtype='float32')
# min = paddle.static.data(
# name='min', shape=data_shape[-2:], dtype='float32'
# )
# out = paddle.clip(x, min)
# exe = base.Executor(self.place)
# res = exe.run(
# feed={"x": data, 'min': min_data},
# fetch_list=[out],
# )
# res_np = np.clip(data, min_data, max_data)
# np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
# paddle.disable_static()
def test_static_clip(self):
data_shape = [1, 2, 3, 4]
self.place = (
base.CUDAPlace(0)
if base.core.is_compiled_with_cuda()
else base.CPUPlace()
)
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = np.random.random(data_shape[-3:]).astype('float32')
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name='x', shape=data_shape, dtype='float32')
min = paddle.static.data(
name='min', shape=data_shape[-2:], dtype='float32'
)
max = paddle.static.data(
name='max', shape=data_shape[-3:], dtype='float32'
)
out = paddle.clip(x, min, max)
exe = base.Executor(self.place)
res = exe.run(
feed={
"x": data,
'min': min_data,
'max': max_data,
},
fetch_list=[out],
)
res_np = np.clip(data, min_data, max_data)
np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
paddle.disable_static()

data_shape = [1, 2, 3, 4]
self.place = (
base.CUDAPlace(0)
if base.core.is_compiled_with_cuda()
else base.CPUPlace()
)
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = np.random.random(data_shape[-3:]).astype('float32')
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name='x', shape=data_shape, dtype='float32')
min = paddle.static.data(
name='min', shape=data_shape[-2:], dtype='float32'
)
max = 5.0
out = paddle.clip(x, min, max)
exe = base.Executor(self.place)
res = exe.run(
feed={
"x": data,
'min': min_data,
'max': 5.0,
},
fetch_list=[out],
)
res_np = np.clip(data, min_data, 5.0)
np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
paddle.disable_static()

data_shape = [1, 2, 3, 4]
self.place = (
base.CUDAPlace(0)
if base.core.is_compiled_with_cuda()
else base.CPUPlace()
)
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = float(np.finfo(np.float32).max)
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name='x', shape=data_shape, dtype='float32')
min = paddle.static.data(
name='min', shape=data_shape[-2:], dtype='float32'
)
out = paddle.clip(x, min)
exe = base.Executor(self.place)
res = exe.run(
feed={"x": data, 'min': min_data},
fetch_list=[out],
)
res_np = np.clip(data, min_data, max_data)
np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
paddle.disable_static()

data_shape = [1, 2, 3, 4]
self.place = (
base.CUDAPlace(0)
if base.core.is_compiled_with_cuda()
else base.CPUPlace()
)
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random([1]).astype('float32')
max_data = min_data + 10.0
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name='x', shape=data_shape, dtype='float32')
min = paddle.static.data(name='min', shape=[1], dtype='float32')
max = paddle.static.data(name='max', shape=[1], dtype='float32')
out = paddle.clip(x, min, max)
exe = base.Executor(self.place)
res = exe.run(
feed={"x": data, 'min': min_data, 'max': max_data},
fetch_list=[out],
)
res_np = np.clip(data, min_data, max_data)
np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
paddle.disable_static()

if base.core.is_compiled_with_cuda():
paddle.enable_static()
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float16')

with paddle.static.program_guard(paddle.static.Program()):
images = paddle.static.data(
name='image1', shape=data_shape, dtype='float16'
)
min = paddle.static.data(
name='min1', shape=[1], dtype='float16'
)
max = paddle.static.data(
name='max1', shape=[1], dtype='float16'
)
out = paddle.clip(images, min, max)
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
res1 = exe.run(
feed={
"image1": data,
"min1": np.array([0.2]).astype('float16'),
"max1": np.array([0.8]).astype('float16'),
},
fetch_list=[out],
)
paddle.disable_static()

def test_dygraph_clip(self):
self.place = (
Expand Down Expand Up @@ -169,6 +221,29 @@ def test_dygraph_clip(self):
out = paddle.clip(data, min_data, max_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

if base.core.is_compiled_with_cuda():
data_shape = [1, 2, 3, 4]
data = np.random.random(data_shape).astype('float16')
min_data = np.random.random(data_shape[-2:]).astype('float16')
max_data = np.random.random(data_shape[-1:]).astype('float16')
out_np = np.clip(data, min_data, max_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
max_data = paddle.to_tensor(max_data)
out = paddle.clip(data, min_data, max_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

data_shape = [1, 2, 3, 4]
data = np.random.random(data_shape).astype('float16')
min_data = -10.0
max_data = 10.0
out_np = np.clip(data, min_data, max_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
max_data = paddle.to_tensor(max_data)
out = paddle.clip(data, min_data, max_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

paddle.enable_static()

def test_shapeerror_clip(self):
Expand Down

0 comments on commit ae29dcc

Please sign in to comment.