diff --git a/paddle/phi/ops/yaml/python_api_info.yaml b/paddle/phi/ops/yaml/python_api_info.yaml index c6619d0fb96e41..080dbb7a36512f 100644 --- a/paddle/phi/ops/yaml/python_api_info.yaml +++ b/paddle/phi/ops/yaml/python_api_info.yaml @@ -390,6 +390,11 @@ args_alias: use_default_mapping : True +- op : lgamma + name : [paddle.lgamma, paddle.Tensor.lgamma] + args_alias : + use_default_mapping : True + - op : log name : [paddle.log, paddle.Tensor.log] args_alias : diff --git a/python/paddle/_paddle_docs.py b/python/paddle/_paddle_docs.py index e38e435ffae222..e8a1665d2727a1 100644 --- a/python/paddle/_paddle_docs.py +++ b/python/paddle/_paddle_docs.py @@ -2479,6 +2479,47 @@ def sign( """, ) +add_doc_and_signature( + "lgamma", + r""" + Calculates the lgamma of the given input tensor, element-wise. + + This operator performs elementwise lgamma for input $X$. + :math:`out = log\Gamma(x)` + + Args: + x (Tensor): Input Tensor. Must be one of the following types: bfloat16, float16, float32, float64, + uint8, int8, int16, int32, int64. Alias: ``input``. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Keyword args: + out(Tensor, optional): The output tensor. + + Returns: + Tensor, the lgamma of the input Tensor, the shape and data type is the same with input + (integer types are autocasted into float32). + + Examples: + .. code-block:: pycon + + >>> import paddle + + >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) + >>> out = paddle.lgamma(x) + >>> out + Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, + [1.31452465, 1.76149750, 2.25271273, 1.09579802]) +""", + """ +def lgamma( + x: Tensor, + name: str | None = None, + *, + out: Tensor | None = None, +) -> Tensor +""", +) + add_doc_and_signature( "log", r""" diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 119ff9d112cb4e..e28d028a08e855 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -51,6 +51,7 @@ isfinite, isinf, isnan, + lgamma, log, log1p, log2, @@ -4137,59 +4138,6 @@ def gammainc_(x: Tensor, y: Tensor, name: str | None = None) -> Tensor: ) -def lgamma(x: Tensor, name: str | None = None) -> Tensor: - r""" - Calculates the lgamma of the given input tensor, element-wise. - - This operator performs elementwise lgamma for input $X$. - :math:`out = log\Gamma(x)` - - - Args: - x (Tensor): Input Tensor. Must be one of the following types: bfloat16, float16, float32, float64, - uint8, int8, int16, int32, int64. - name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - - Returns: - Tensor, the lgamma of the input Tensor, the shape and data type is the same with input - (integer types are autocasted into float32). - - Examples: - .. code-block:: pycon - - >>> import paddle - - >>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) - >>> out = paddle.lgamma(x) - >>> out - Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True, - [1.31452465, 1.76149750, 2.25271273, 1.09579802]) - """ - if in_dynamic_or_pir_mode(): - return _C_ops.lgamma(x) - else: - check_variable_and_dtype( - x, - 'x', - [ - 'float16', - 'float32', - 'float64', - 'uint16', - 'uint8', - 'int8', - 'int16', - 'int32', - 'int64', - ], - 'lgamma', - ) - helper = LayerHelper('lgamma', **locals()) - out = helper.create_variable_for_type_inference(x.dtype) - helper.append_op(type='lgamma', inputs={'X': x}, outputs={'Out': out}) - return out - - @inplace_apis_in_dygraph_only def lgamma_(x: Tensor, name: str | None = None) -> Tensor: r""" diff --git a/test/legacy_test/test_api_compatibility.py b/test/legacy_test/test_api_compatibility.py index d47bdb886bdfe3..787626f06eabab 100644 --- a/test/legacy_test/test_api_compatibility.py +++ b/test/legacy_test/test_api_compatibility.py @@ -3003,6 +3003,79 @@ def test_dygraph_inplace_Compatibility(self): paddle.enable_static() +class TestLgammaAPI(unittest.TestCase): + def setUp(self): + np.random.seed(2025) + paddle.enable_static() + self.shape = [5, 6] + self.dtype = 'float32' + self.init_data() + + def init_data(self): + self.np_x = (np.random.rand(*self.shape) * 5.0 + 0.1).astype(self.dtype) + + def test_dygraph_Compatibility(self): + paddle.disable_static() + x = paddle.to_tensor(self.np_x) + paddle_dygraph_out = [] + + # Paddle positional arguments + out1 = paddle.lgamma(x) + paddle_dygraph_out.append(out1) + + # Paddle keyword arguments + out2 = paddle.lgamma(x=x) + paddle_dygraph_out.append(out2) + + # PyTorch keyword arguments (alias) + out3 = paddle.lgamma(input=x) + paddle_dygraph_out.append(out3) + + # out parameter + out4 = paddle.empty_like(x) + out5 = paddle.lgamma(x, out=out4) + paddle_dygraph_out.append(out4) + paddle_dygraph_out.append(out5) + + # Tensor method - args + out6 = x.lgamma() + paddle_dygraph_out.append(out6) + + ref_out = out1.numpy() + for out in paddle_dygraph_out: + np.testing.assert_allclose( + ref_out, out.numpy(), rtol=1e-6, atol=1e-6 + ) + + paddle.enable_static() + + def test_static_Compatibility(self): + paddle.enable_static() + main = paddle.static.Program() + startup = paddle.static.Program() + with paddle.static.program_guard(main, startup): + x = paddle.static.data(name="x", shape=self.shape, dtype=self.dtype) + + # Paddle positional arguments + out1 = paddle.lgamma(x) + # PyTorch keyword arguments (alias) + out2 = paddle.lgamma(input=x) + # Tensor method + out3 = x.lgamma() + + exe = paddle.static.Executor() + fetches = exe.run( + main, + feed={"x": self.np_x}, + fetch_list=[out1, out2, out3], + ) + + for out in fetches[1:]: + np.testing.assert_allclose( + fetches[0], out, rtol=1e-6, atol=1e-6 + ) + + # Test unique compatibility class TestUniqueAPI_Compatibility(unittest.TestCase): def setUp(self):