Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions paddle/phi/ops/yaml/python_api_info.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,11 @@
args_alias:
use_default_mapping : True

- op : lgamma
name : [paddle.lgamma, paddle.Tensor.lgamma]
args_alias :
use_default_mapping : True

- op : log
name : [paddle.log, paddle.Tensor.log]
args_alias :
Expand Down
42 changes: 42 additions & 0 deletions python/paddle/_paddle_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2479,6 +2479,48 @@ def sign(
""",
)

add_doc_and_signature(
"lgamma",
r"""
Calculates the lgamma of the given input tensor, element-wise.

This operator performs elementwise lgamma for input $X$.
:math:`out = log\Gamma(x)`

Args:
x (Tensor): Input Tensor. Must be one of the following types: bfloat16, float16, float32, float64,
uint8, int8, int16, int32, int64. Alias: ``input``.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
out (Tensor, optional): The output Tensor. If set, the result will be stored in this Tensor. Default: None.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out重复了,这里删掉


Keyword args:
out(Tensor, optional): The output tensor.

Returns:
Tensor, the lgamma of the input Tensor, the shape and data type is the same with input
(integer types are autocasted into float32).

Examples:
.. code-block:: pycon

>>> import paddle

>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.lgamma(x)
>>> out
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.31452465, 1.76149750, 2.25271273, 1.09579802])
""",
"""
def lgamma(
x: Tensor,
name: str | None = None,
*,
out: Tensor | None = None,
) -> Tensor
""",
)

add_doc_and_signature(
"log",
r"""
Expand Down
54 changes: 1 addition & 53 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
isfinite,
isinf,
isnan,
lgamma,
log,
log1p,
log2,
Expand Down Expand Up @@ -4137,59 +4138,6 @@ def gammainc_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
)


def lgamma(x: Tensor, name: str | None = None) -> Tensor:
r"""
Calculates the lgamma of the given input tensor, element-wise.

This operator performs elementwise lgamma for input $X$.
:math:`out = log\Gamma(x)`


Args:
x (Tensor): Input Tensor. Must be one of the following types: bfloat16, float16, float32, float64,
uint8, int8, int16, int32, int64.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Tensor, the lgamma of the input Tensor, the shape and data type is the same with input
(integer types are autocasted into float32).

Examples:
.. code-block:: pycon

>>> import paddle

>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.lgamma(x)
>>> out
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.31452465, 1.76149750, 2.25271273, 1.09579802])
"""
if in_dynamic_or_pir_mode():
return _C_ops.lgamma(x)
else:
check_variable_and_dtype(
x,
'x',
[
'float16',
'float32',
'float64',
'uint16',
'uint8',
'int8',
'int16',
'int32',
'int64',
],
'lgamma',
)
helper = LayerHelper('lgamma', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='lgamma', inputs={'X': x}, outputs={'Out': out})
return out


@inplace_apis_in_dygraph_only
def lgamma_(x: Tensor, name: str | None = None) -> Tensor:
r"""
Expand Down
97 changes: 97 additions & 0 deletions test/legacy_test/test_api_compatibility.py
Original file line number Diff line number Diff line change
Expand Up @@ -3003,6 +3003,103 @@ def test_dygraph_inplace_Compatibility(self):
paddle.enable_static()


class TestLgammaAPI(unittest.TestCase):
def setUp(self):
np.random.seed(2025)
paddle.enable_static()
self.shape = [5, 6]
self.dtype = 'float32'
self.init_data()

def init_data(self):
self.np_x = (np.random.rand(*self.shape) * 5.0 + 0.1).astype(self.dtype)

def test_dygraph_Compatibility(self):
paddle.disable_static()
x = paddle.to_tensor(self.np_x)
paddle_dygraph_out = []

# Paddle positional arguments
out1 = paddle.lgamma(x)
paddle_dygraph_out.append(out1)

# Paddle keyword arguments
out2 = paddle.lgamma(x=x)
paddle_dygraph_out.append(out2)

# PyTorch-style positional arguments
out3 = paddle.lgamma(x)
paddle_dygraph_out.append(out3)

# PyTorch keyword arguments (alias)
out4 = paddle.lgamma(input=x)
paddle_dygraph_out.append(out4)

# Mixed arguments
out5 = paddle.lgamma(x, name=None)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

name全部不用测试

paddle_dygraph_out.append(out5)

# out parameter
out6 = paddle.empty_like(x)
out7 = paddle.lgamma(x, out=out6)
paddle_dygraph_out.append(out6)
paddle_dygraph_out.append(out7)

# Tensor method - args
out8 = x.lgamma()
paddle_dygraph_out.append(out8)

# Tensor method - kwargs
out9 = x.lgamma(name=None)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

精简下测试,name全部忽略

paddle_dygraph_out.append(out9)

ref_out = out1.numpy()
for out in paddle_dygraph_out:
np.testing.assert_allclose(
ref_out, out.numpy(), rtol=1e-6, atol=1e-6
)

# Exception parameters
with self.assertRaises(ValueError):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这些不用测

paddle.lgamma()
with self.assertRaises(ValueError):
paddle.lgamma(input=1)
with self.assertRaises(ValueError):
paddle.lgamma(x=x, invalid_param=True)

paddle.enable_static()

def test_static_Compatibility(self):
paddle.enable_static()
main = paddle.static.Program()
startup = paddle.static.Program()
with paddle.static.program_guard(main, startup):
x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype)

# Paddle positional arguments
out1 = paddle.lgamma(x)
# Paddle keyword arguments
out2 = paddle.lgamma(x=x)
# PyTorch keyword arguments (alias)
out3 = paddle.lgamma(input=x)
# Tensor method
out4 = x.lgamma()
# Mixed arguments
out5 = paddle.lgamma(x, name=None)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

精简下测试,name全部忽略,压缩下测试行数


exe = paddle.static.Executor(paddle.CPUPlace())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

去掉CPUPlace

fetches = exe.run(
main,
feed={"x": self.np_x},
fetch_list=[out1, out2, out3, out4, out5],
)

for out in fetches[1:]:
np.testing.assert_allclose(
fetches[0], out, rtol=1e-6, atol=1e-6
)


# Test unique compatibility
class TestUniqueAPI_Compatibility(unittest.TestCase):
def setUp(self):
Expand Down
Loading