68
68
'linear_chain_crf' ,
69
69
'crf_decoding' ,
70
70
'conv2d' ,
71
- 'softmax' ,
72
71
'pool2d' ,
73
72
'batch_norm' ,
74
73
'dropout' ,
@@ -145,7 +144,7 @@ def _get_reduce_dim(dim, input):
145
144
else :
146
145
raise TypeError (
147
146
"The type of dim must be int, list, tuple or range, but received {}" .format (
148
- type (axis )
147
+ type (dim )
149
148
)
150
149
)
151
150
if dim is None :
@@ -679,7 +678,7 @@ def _pull_gpups_sparse(
679
678
size(int|list of int): The embedding size parameter of each input, which indicates the size of
680
679
each embedding vector respectively.
681
680
dtype(str): The dtype refers to the data type of output tensor. Only supports
682
- float32 now.
681
+ float32 now.
683
682
684
683
Returns:
685
684
Variable|list of Variable: The tensor variable storing the embeddings of the \
@@ -742,7 +741,7 @@ def _pull_box_sparse(
742
741
size(int): The embedding size parameter, which indicates the size of
743
742
each embedding vector respectively.
744
743
dtype(str): The dtype refers to the data type of output tensor. Only supports
745
- float32 now.
744
+ float32 now.
746
745
747
746
Returns:
748
747
Variable|list of Variable: The tensor variable storing the embeddings of the \
@@ -1123,147 +1122,6 @@ def get_attrs(prog, dropout_prob, is_test, seed):
1123
1122
return out
1124
1123
1125
1124
1126
- @deprecated (since = "2.0.0" , update_to = "paddle.nn.functional.softmax" )
1127
- def softmax (input , use_cudnn = True , name = None , axis = - 1 ):
1128
- r"""
1129
- This operator implements the softmax layer. The calculation process is as follows:
1130
-
1131
- 1. The dimension :attr:`axis` of the ``input`` will be permuted to the last.
1132
-
1133
- 2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
1134
- second dimension(row length) is the same as the dimension :attr:`axis` of the input
1135
- tensor, and the first dimension(column length) is the product of all other
1136
- dimensions of the input tensor. For each row of the matrix, the softmax operator
1137
- squashes the K-dimensional(K is the width of the matrix, which is also the size
1138
- of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
1139
- K-dimensional vector of real values in the range [0, 1] that add up to 1.
1140
-
1141
- 3. After the softmax operation is completed, the inverse operations of steps 1 and 2
1142
- are performed to restore the two-dimensional matrix to the same dimension as the ``input``.
1143
-
1144
- It computes the exponential of the given dimension and the sum of exponential
1145
- values of all the other dimensions in the K-dimensional vector input.
1146
- Then the ratio of the exponential of the given dimension and the sum of
1147
- exponential values of all the other dimensions is the output of the softmax
1148
- operator.
1149
-
1150
- For each row :math:`i` and each column :math:`j` in the matrix, we have:
1151
-
1152
- .. math::
1153
-
1154
- Out[i, j] = \\frac{\\exp(X[i, j])}{\\sum_j(exp(X[i, j])}
1155
-
1156
- Example:
1157
-
1158
- .. code-block:: text
1159
-
1160
- Case 1:
1161
- Input:
1162
- X.shape = [2, 3, 4]
1163
- X.data = [[[2.0, 3.0, 4.0, 5.0],
1164
- [3.0, 4.0, 5.0, 6.0],
1165
- [7.0, 8.0, 8.0, 9.0]],
1166
- [[1.0, 2.0, 3.0, 4.0],
1167
- [5.0, 6.0, 7.0, 8.0],
1168
- [6.0, 7.0, 8.0, 9.0]]]
1169
-
1170
- Attrs:
1171
- axis = -1
1172
-
1173
- Output:
1174
- Out.shape = [2, 3, 4]
1175
- Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
1176
- [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
1177
- [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
1178
- [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
1179
- [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
1180
- [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
1181
-
1182
- Case 2:
1183
- Input:
1184
- X.shape = [2, 3, 4]
1185
- X.data = [[[2.0, 3.0, 4.0, 5.0],
1186
- [3.0, 4.0, 5.0, 6.0],
1187
- [7.0, 8.0, 8.0, 9.0]],
1188
- [[1.0, 2.0, 3.0, 4.0],
1189
- [5.0, 6.0, 7.0, 8.0],
1190
- [6.0, 7.0, 8.0, 9.0]]]
1191
- Attrs:
1192
- axis = 1
1193
-
1194
- Output:
1195
- Out.shape = [2, 3, 4]
1196
- Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
1197
- [0.01786798, 0.01786798, 0.04661262, 0.04661262],
1198
- [0.97555875, 0.97555875, 0.93623955, 0.93623955]],
1199
- [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
1200
- [0.26762315, 0.26762315, 0.26762315, 0.26762315],
1201
- [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
1202
-
1203
- Args:
1204
- input (Tensor): The input tensor. A multi-dimension ``Tensor`` with type float32 or float64.
1205
- use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
1206
- library is installed. To improve performance, set use_cudnn to True by default.
1207
- name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None.
1208
- will be named automatically. Default: None.
1209
- axis (int, optional): The index of dimension to perform softmax calculations, it should
1210
- be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
1211
- input tensor. Default: -1. -1 means the last dimension.
1212
-
1213
- Returns:
1214
- Tensor: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
1215
-
1216
- Examples:
1217
-
1218
- .. code-block:: python
1219
-
1220
- import paddle
1221
- import paddle.nn.functional as F
1222
-
1223
- x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
1224
- [3.0, 4.0, 5.0, 6.0],
1225
- [7.0, 8.0, 8.0, 9.0]],
1226
- [[1.0, 2.0, 3.0, 4.0],
1227
- [5.0, 6.0, 7.0, 8.0],
1228
- [6.0, 7.0, 8.0, 9.0]]], dtype='float32')
1229
- y = F.softmax(x, axis=1)
1230
- print(y)
1231
- # [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
1232
- # [0.01786798, 0.01786798, 0.04661262, 0.04661262],
1233
- # [0.97555870, 0.97555870, 0.93623954, 0.93623954]],
1234
- # [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
1235
- # [0.26762316, 0.26762316, 0.26762316, 0.26762316],
1236
- # [0.72747517, 0.72747517, 0.72747517, 0.72747517]]]
1237
-
1238
- """
1239
-
1240
- if in_dygraph_mode ():
1241
- return _C_ops .softmax (input , axis )
1242
-
1243
- if _non_static_mode ():
1244
- return _legacy_C_ops .softmax (
1245
- input , 'axis' , axis , 'use_cudnn' , use_cudnn
1246
- )
1247
-
1248
- inputs = {"X" : [input ]}
1249
- attrs = {"axis" : axis , "use_cudnn" : use_cudnn }
1250
-
1251
- helper = LayerHelper ('softmax' , ** locals ())
1252
- check_variable_and_dtype (
1253
- input , 'input/x' , ['float16' , 'float32' , 'float64' ], 'softmax'
1254
- )
1255
-
1256
- dtype = helper .input_dtype ()
1257
- softmax_out = helper .create_variable_for_type_inference (dtype )
1258
- helper .append_op (
1259
- type = "softmax" ,
1260
- inputs = {"X" : input },
1261
- outputs = {"Out" : softmax_out },
1262
- attrs = attrs ,
1263
- )
1264
- return softmax_out
1265
-
1266
-
1267
1125
def conv2d (
1268
1126
input ,
1269
1127
num_filters ,
@@ -1788,7 +1646,7 @@ def is_list_or_tuple(ele):
1788
1646
if pool_padding == "VALID" :
1789
1647
padding_algorithm = "VALID"
1790
1648
pool_padding = [0 , 0 ]
1791
- if ceil_mode != False :
1649
+ if ceil_mode is not False :
1792
1650
raise ValueError (
1793
1651
"When Attr(pool_padding) is \" VALID\" , Attr(ceil_mode) must be False. "
1794
1652
"Received ceil_mode: True."
@@ -6643,7 +6501,7 @@ def deformable_roi_pooling(
6643
6501
)
6644
6502
6645
6503
input_channels = input .shape [1 ]
6646
- if position_sensitive == False :
6504
+ if position_sensitive is False :
6647
6505
output_channels = input_channels
6648
6506
else :
6649
6507
output_channels = input_channels / pooled_height / pooled_width
@@ -6841,11 +6699,11 @@ def mish(x, threshold=20, name=None):
6841
6699
6842
6700
.. math::
6843
6701
6844
- out = \\begin{cases}
6845
- x \\ast \\tanh(x), \\text{if } x > \\text{threshold} \\\\
6846
- x \\ast \\tanh(e^{x}), \\text{if } x < -\\text{threshold} \\\\
6847
- x \\ast \\tanh(\\ln(1 + e^{x})), \\text{otherwise}
6848
- \\end{cases}
6702
+ out = \\begin{cases}
6703
+ x \\ast \\tanh(x), \\text{if } x > \\text{threshold} \\\\
6704
+ x \\ast \\tanh(e^{x}), \\text{if } x < -\\text{threshold} \\\\
6705
+ x \\ast \\tanh(\\ln(1 + e^{x})), \\text{otherwise}
6706
+ \\end{cases}
6849
6707
6850
6708
Args:
6851
6709
x (Variable): Input feature, multi-dimensional Tensor. The data type
@@ -6867,9 +6725,11 @@ def mish(x, threshold=20, name=None):
6867
6725
6868
6726
.. code-block:: python
6869
6727
6728
+ import paddle
6870
6729
import paddle.fluid as fluid
6871
6730
import numpy as np
6872
6731
6732
+ paddle.enable_static()
6873
6733
DATATYPE='float32'
6874
6734
6875
6735
x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
0 commit comments