diff --git a/.ai-rulez/config.yaml b/.ai-rulez/config.yaml new file mode 100644 index 000000000..a53d720d3 --- /dev/null +++ b/.ai-rulez/config.yaml @@ -0,0 +1,22 @@ +$schema: https://raw.githubusercontent.com/Goldziher/ai-rulez/main/schema/ai-rules-v3.schema.json +description: Convert PaddlePaddle models to ONNX format +gitignore: true +name: Paddle2ONNX + +builtins: + - python + - security + - git-workflow + - code-quality + - testing + - token-efficiency + - default-commands + +presets: + - claude + - gemini + - codex + +compression: + level: moderate +version: "3.0" diff --git a/.gitmodules b/.gitmodules index 1192f8f48..306072614 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,15 +1,15 @@ [submodule "third_party/onnx"] path = third_party/onnx url = https://github.com/onnx/onnx.git - branch = 990217f043af7222348ca8f0301e17fa7b841781 + branch = d3f6b795aedb48eaecc881bf5e8f5dd6efbe25b3 [submodule "third_party/optimizer"] path = third_party/optimizer url = https://github.com/onnx/optimizer.git - branch = b3a4611861734e0731bbcc2bed1f080139e4988b + branch = 94d238d96e3fb3a7ba34f03c284b9ad3516163be [submodule "third_party/pybind11"] path = third_party/pybind11 url = https://github.com/pybind/pybind11.git - branch = 3e9dfa2866941655c56877882565e7577de6fc7b + branch = 45fab4087eaaff234227a10cf7845e8b07f28a98 [submodule "third_party/glog"] path = third_party/glog url = https://github.com/google/glog.git diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 82d313a7d..bf9e594e0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,111 +1,54 @@ -# Exclude all third-party libraries -exclude: | - (?x)^( - patches/.+| - third_party/.+ - )$ +default_install_hook_types: + - pre-commit + - commit-msg +exclude: ^third_party/|vendor/|node_modules/|dist/|\.setuptools-cmake-build/|patches/ + repos: -# Common hooks -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: check-added-large-files - - id: check-merge-conflict - - id: check-symlinks - - id: detect-private-key - - id: end-of-file-fixer - - id: trailing-whitespace -- repo: https://github.com/Lucas-C/pre-commit-hooks.git - rev: v1.5.1 - hooks: - - id: remove-crlf - - id: remove-tabs - name: Tabs remover (C++) - files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|xpu|kps)$ - args: [--whitespaces-count, '2'] - - id: remove-tabs - name: Tabs remover (Python) - files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ - args: [--whitespaces-count, '4'] - # Exclude some unit test files that require tabs. -- repo: local - hooks: - - id: copyright_checker - name: copyright_checker - entry: python ./tools/codestyle/copyright.py - language: system - files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto|xpu|kps|py|pyi|sh)$ -# For Python files -- repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.8.0 - hooks: - - id: black -- repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.0 - hooks: - - id: ruff - args: [--fix, --exit-non-zero-on-fix, --no-cache] -# For C++ files -- repo: local - hooks: - - id: clang-format - name: clang-format - description: Format files with ClangFormat. - entry: bash ./tools/codestyle/clang_format.sh -i - language: system - files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|xpu|kps)$ -- repo: local - hooks: - - id: cpplint-cpp-source - name: cpplint - description: Check C++ code style using cpplint.py. - entry: bash ./tools/codestyle/cpplint_pre_commit.sh - language: system - files: \.(cc|cxx|cpp|cu|h|hpp|hxx)$ - args: - - --extensions=cc,cxx,cpp,cu,cuh,h,hpp,hxx,kps - - --filter=-readability/fn_size,-build/include_what_you_use,-build/c++11,-whitespace/parens - - --quiet -- repo: local - hooks: - - id: clang-tidy - name: clang-tidy - description: Parallel clang-tidy runner. - entry: python ./tools/codestyle/clang-tidy.py - language: system - files: \.(c|cc|cxx|cpp|h|hpp|hxx)$ - args: - - -p=build/ - - -extra-arg=-Wno-unknown-warning-option - - -extra-arg=-Wno-pessimizing-move - - -extra-arg=-Wno-braced-scalar-init - - -extra-arg=-Wno-dangling-gsl - - -extra-arg=-Wno-deprecated-copy - - -extra-arg=-Wno-final-dtor-non-final-class - - -extra-arg=-Wno-implicit-int-float-conversion - - -extra-arg=-Wno-inconsistent-missing-override - - -extra-arg=-Wno-infinite-recursion - - -extra-arg=-Wno-mismatched-tags - - -extra-arg=-Wno-self-assign - - -extra-arg=-Wno-sign-compare - - -extra-arg=-Wno-sometimes-uninitialized - - -extra-arg=-Wno-tautological-overlap-compare - - -extra-arg=-Wno-unused-const-variable - - -extra-arg=-Wno-unused-lambda-capture - - -extra-arg=-Wno-unused-private-field - - -extra-arg=-Wno-unused-value - - -extra-arg=-Wno-unused-variable - - -extra-arg=-Wno-overloaded-virtual - - -extra-arg=-Wno-defaulted-function-deleted - - -extra-arg=-Wno-delete-non-abstract-non-virtual-dtor - - -extra-arg=-Wno-return-type-c-linkage -# For CMake files -- repo: https://github.com/cheshirekow/cmake-format-precommit - rev: v0.6.13 - hooks: - - id: cmake-format -- repo: https://github.com/PFCCLab/cmake-lint-paddle - rev: v1.5.1 - hooks: - - id: cmakelint - args: [--config=./tools/codestyle/.cmakelintrc] + # Commit message linting + - repo: https://github.com/Goldziher/gitfluff + rev: v0.8.0 + hooks: + - id: gitfluff-lint + args: ["--write"] + stages: [commit-msg] + + # AI-Rulez: auto-generate AI assistant configuration files + - repo: https://github.com/Goldziher/ai-rulez + rev: v3.8.3 + hooks: + - id: ai-rulez-generate + + # General file checks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v6.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-merge-conflict + - id: check-added-large-files + - id: detect-private-key + - id: check-json + - id: check-yaml + args: ["--allow-multiple-documents", "--unsafe"] + - id: check-toml + - id: check-case-conflict + + # TOML formatting + - repo: https://github.com/tox-dev/pyproject-fmt + rev: "v2.16.2" + hooks: + - id: pyproject-fmt + + - repo: https://github.com/ComPWA/taplo-pre-commit + rev: v0.9.3 + hooks: + - id: taplo-format + exclude: "pyproject.toml" + + # Python: ruff (linting + formatting) + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.15.5 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format diff --git a/.python-version b/.python-version new file mode 100644 index 000000000..24ee5b1be --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.13 diff --git a/CMakeLists.txt b/CMakeLists.txt index 816d78da3..9f7ceb7b1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.16) project(paddle2onnx C CXX) -# ONNX 1.16 requires C++ 17 +# ONNX 1.20 requires C++ 17 set(CMAKE_CXX_STANDARD 17) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") # Build the libraries with - fPIC @@ -24,7 +24,7 @@ endif() # Set max opset version for onnx if you build from other version of onnx this # should be modified. -add_definitions(-DMAX_ONNX_OPSET_VERSION=23) +add_definitions(-DMAX_ONNX_OPSET_VERSION=25) add_definitions(-DPADDLE2ONNX_LIB) # Internal flags for convert.h.in @@ -57,10 +57,8 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}) include_directories(${PROJECT_SOURCE_DIR}/third_party/optimizer) file(GLOB_RECURSE ALL_SRCS ${PROJECT_SOURCE_DIR}/paddle2onnx/*.cc - ${PROJECT_SOURCE_DIR}/third_party/optimizer/onnxoptimizer/*.cc) + ${PROJECT_SOURCE_DIR}/third_party/optimizer/onnx/optimizer/*.cc) list(REMOVE_ITEM ALL_SRCS ${PROJECT_SOURCE_DIR}/paddle2onnx/cpp2py_export.cc) -list(REMOVE_ITEM ALL_SRCS - ${PROJECT_SOURCE_DIR}/third_party/optimizer/onnxoptimizer/cpp2py_export.cc) file(READ "${PROJECT_SOURCE_DIR}/VERSION_NUMBER" PADDLE2ONNX_VERSION) string(STRIP "${PADDLE2ONNX_VERSION}" PADDLE2ONNX_VERSION) diff --git a/debug/p2o_infer_debugger.py b/debug/p2o_infer_debugger.py index 292ec7fda..76e6c27b7 100644 --- a/debug/p2o_infer_debugger.py +++ b/debug/p2o_infer_debugger.py @@ -13,20 +13,22 @@ # limitations under the License. import argparse +import logging import os +import queue import re -import sys -import logging import shutil +import sys +import tempfile +import traceback +from contextlib import contextmanager + import numpy as np from onnxruntime import InferenceSession +from prune_onnx_model import prune_onnx_model + import paddle import paddle2onnx -from prune_onnx_model import prune_onnx_model -from contextlib import contextmanager -import traceback -import queue -import tempfile current_dir = os.path.dirname(os.path.abspath(__file__)) tests_dir = os.path.join(current_dir, "..", "tests") @@ -202,8 +204,8 @@ def check_operator_with_print( def _redirect_paddle_output_to_file( paddle_model_file, log_file, inputs_data: tuple ): - import subprocess import pickle + import subprocess import tempfile temp_filename = None @@ -266,7 +268,7 @@ def _compare_results(paddle_model_path, onnx_model_path, inputs_data: tuple): # TODO(wangmingkai02): adjust n according to the number of print op n = 8 shape_list, dtype, data_list = [], None, [] - with open(log_file, "r", encoding="utf-8") as f: + with open(log_file, encoding="utf-8") as f: lines = [] for _ in range(n): line = f.readline() @@ -291,7 +293,7 @@ def _compare_results(paddle_model_path, onnx_model_path, inputs_data: tuple): providers=["CPUExecutionProvider"], ) input_names = session.get_inputs() - input_feed = dict() + input_feed = {} for idx, input_name in enumerate(input_names): input_feed[input_name.name] = inputs_data[idx] result = session.run(output_names=None, input_feed=input_feed) @@ -359,10 +361,7 @@ def _binary_search(program, block): idx = (left + right) // 2 + offset op = block.ops[idx] op_name = op.name() - if idx < left: - left = idx - offset + 1 - offset = 0 - elif op_name in SKIP_FORWARD_OP_LIST: + if idx < left or op_name in SKIP_FORWARD_OP_LIST: left = idx - offset + 1 offset = 0 elif op_name in SKIP_BACKWARD_OP_LIST: @@ -533,7 +532,7 @@ def _compare_results( providers=["CPUExecutionProvider"], ) input_names = session.get_inputs() - input_feed = dict() + input_feed = {} for idx, input_name in enumerate(input_names): input_feed[input_name.name] = inputs_data[idx] result = session.run(output_names=None, input_feed=input_feed) @@ -606,10 +605,7 @@ def _check_operator(program, model_file, idx, input_shapes, input_dtypes): clone_program = program.clone() idx = (left + right) // 2 + offset op = clone_program.blocks[0].ops[idx] - if idx < left: - left = idx - offset + 1 - offset = 0 - elif op.name() in SKIP_FORWARD_OP_LIST: + if idx < left or op.name() in SKIP_FORWARD_OP_LIST: left = idx - offset + 1 offset = 0 elif op.name() in SKIP_BACKWARD_OP_LIST: @@ -654,7 +650,7 @@ def locate_issue( model_file, input_shapes, input_dtypes, - candidates: list[int] = None, + candidates: list[int] | None = None, has_cf=False, binary_search=False, output_num=1, @@ -705,7 +701,7 @@ def _get_mapping_and_uniq_set(program): index_mapping = {} ops = set() global_ops = set() - global_res = list() + global_res = [] shadow_output_op_num = 0 for block in program.blocks: ops |= _dfs(block, index_mapping) diff --git a/debug/prune_onnx_model.py b/debug/prune_onnx_model.py index ecbcfdd78..e4baca24e 100644 --- a/debug/prune_onnx_model.py +++ b/debug/prune_onnx_model.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import onnx -from onnx import helper, TensorProto -import os -import math import logging +import math +import os + +import onnx +from onnx import TensorProto, helper logger = logging.getLogger("p2o-logger") @@ -24,9 +25,11 @@ def prune_onnx_model( onnx_model_file, target_node_name="p2o.print", - target_dims=[1], + target_dims=None, target_dtype="float32", ): + if target_dims is None: + target_dims = [1] dtype_map = { "bool": (TensorProto.BOOL, bool), "float32": (TensorProto.FLOAT, float), diff --git a/docs/tech_walkthrough/paddle2onnx_tech_walkthrough.ipynb b/docs/tech_walkthrough/paddle2onnx_tech_walkthrough.ipynb index 67acc1ec4..3e2a92ec4 100644 --- a/docs/tech_walkthrough/paddle2onnx_tech_walkthrough.ipynb +++ b/docs/tech_walkthrough/paddle2onnx_tech_walkthrough.ipynb @@ -33,6 +33,7 @@ "import paddle\n", "import paddle.nn.functional as F\n", "\n", + "\n", "class MyModel(paddle.nn.Layer):\n", " def __init__(self, input_size, hidden_size):\n", " super().__init__()\n", @@ -45,10 +46,11 @@ " x = self.linear2(x)\n", " return x\n", "\n", + "\n", "input_size, hidden_size = 8, 4\n", "model = MyModel(input_size, hidden_size)\n", "\n", - "x_input_spec = paddle.static.InputSpec([None, input_size], 'float32', 'x')\n", + "x_input_spec = paddle.static.InputSpec([None, input_size], \"float32\", \"x\")\n", "paddle.jit.save(model, \"./demo\", input_spec=[x_input_spec])" ] }, @@ -83,10 +85,8 @@ "import paddle2onnx\n", "\n", "paddle2onnx.export(\n", - " model_file='./demo.pdmodel',\n", - " params_file='./demo.pdiparams',\n", - " save_file='./demo.onnx'\n", - " )" + " model_file=\"./demo.pdmodel\", params_file=\"./demo.pdiparams\", save_file=\"./demo.onnx\"\n", + ")" ] }, { @@ -155,10 +155,10 @@ "import paddle.base.proto.framework_pb2 as pppb\n", "\n", "prog = pppb.ProgramDesc()\n", - "with open('./demo.pdmodel', \"rb\") as f:\n", + "with open(\"./demo.pdmodel\", \"rb\") as f:\n", " prog.ParseFromString(f.read())\n", - " \n", - "print(str(prog)[:100] + '\\n...\\n' + str(prog)[-100:])\n", + "\n", + "print(str(prog)[:100] + \"\\n...\\n\" + str(prog)[-100:])\n", "# print(prog)" ] }, @@ -214,8 +214,8 @@ "source": [ "import onnx\n", "\n", - "m = onnx.load('./demo.onnx')\n", - "print(str(m)[:100] + '\\n...\\n' + str(m)[-100:])\n", + "m = onnx.load(\"./demo.onnx\")\n", + "print(str(m)[:100] + \"\\n...\\n\" + str(m)[-100:])\n", "# print(m)" ] }, @@ -262,28 +262,31 @@ } ], "source": [ - "import struct \n", + "import struct\n", + "\n", "import numpy as np\n", "\n", - "with open('./demo.pdiparams', 'rb') as f:\n", + "with open(\"./demo.pdiparams\", \"rb\") as f:\n", " raw_content = f.read()\n", "\n", "idx = 0\n", - "while(idx < len(raw_content)):\n", - " magic_number1, lod_level, magic_number_2, tensor_desc_size = struct.unpack('=IQIi', raw_content[idx:idx+20])\n", + "while idx < len(raw_content):\n", + " magic_number1, lod_level, magic_number_2, tensor_desc_size = struct.unpack(\n", + " \"=IQIi\", raw_content[idx : idx + 20]\n", + " )\n", " print(f\"lod_level: {lod_level} tensor_desc_size {tensor_desc_size}\")\n", " idx = idx + 20\n", "\n", " tensor_desc = pppb.VarType.TensorDesc()\n", - " tensor_desc.ParseFromString(raw_content[idx:idx+tensor_desc_size])\n", - " idx = idx+tensor_desc_size\n", - " \n", + " tensor_desc.ParseFromString(raw_content[idx : idx + tensor_desc_size])\n", + " idx = idx + tensor_desc_size\n", + "\n", " numel = 1\n", " for ele in tensor_desc.dims:\n", " numel = numel * ele\n", - " \n", + "\n", " # 4 in next line because sizeof(float32)=4\n", - " weight = np.frombuffer(raw_content[idx:idx+numel*4], dtype=np.float32)\n", + " weight = np.frombuffer(raw_content[idx : idx + numel * 4], dtype=np.float32)\n", " print(f\"shape: {tensor_desc.dims}\")\n", " print(f\"weight: {weight}\")\n", " idx = idx + numel * 4" @@ -390,10 +393,10 @@ } ], "source": [ - "print('>>> all variables:')\n", - "print('\\n'.join([f\"{x.name}, {x.persistable}\" for x in prog.blocks[0].vars]))\n", - "print('>>> all ops:')\n", - "print('\\n'.join([op.type for op in prog.blocks[0].ops]))" + "print(\">>> all variables:\")\n", + "print(\"\\n\".join([f\"{x.name}, {x.persistable}\" for x in prog.blocks[0].vars]))\n", + "print(\">>> all ops:\")\n", + "print(\"\\n\".join([op.type for op in prog.blocks[0].ops]))" ] }, { @@ -456,43 +459,47 @@ ], "source": [ "from onnx import TensorProto\n", - "from onnx.helper import (\n", - " make_model, make_node, make_graph,\n", - " make_tensor_value_info)\n", "from onnx.checker import check_model\n", + "from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info\n", "\n", - "x = make_tensor_value_info('x', TensorProto.FLOAT, [None, input_size])\n", - "y = make_tensor_value_info('y', TensorProto.FLOAT, [None])\n", + "x = make_tensor_value_info(\"x\", TensorProto.FLOAT, [None, input_size])\n", + "y = make_tensor_value_info(\"y\", TensorProto.FLOAT, [None])\n", "\n", - "weights = {'A':model.linear1.weight,\n", - " 'A_bias':model.linear1.bias,\n", - " 'B':model.linear2.weight,\n", - " 'B_bias':model.linear2.bias}\n", + "weights = {\n", + " \"A\": model.linear1.weight,\n", + " \"A_bias\": model.linear1.bias,\n", + " \"B\": model.linear2.weight,\n", + " \"B_bias\": model.linear2.bias,\n", + "}\n", "\n", "weight_nodes = []\n", "\n", - "for name,weight in weights.items():\n", - " n = make_node(\n", - " \"Constant\", [], [name],\n", - " value=onnx.helper.make_tensor(\n", - " name=name,\n", - " data_type=onnx.TensorProto.FLOAT,\n", - " dims=weight.shape,\n", - " vals=weight.numpy().flatten(),\n", - " )\n", - " )\n", + "for name, weight in weights.items():\n", + " n = make_node(\n", + " \"Constant\",\n", + " [],\n", + " [name],\n", + " value=onnx.helper.make_tensor(\n", + " name=name,\n", + " data_type=onnx.TensorProto.FLOAT,\n", + " dims=weight.shape,\n", + " vals=weight.numpy().flatten(),\n", + " ),\n", + " )\n", " weight_nodes.append(n)\n", "\n", - "node1 = make_node('MatMul', ['x', 'A'], ['XA'])\n", - "node2 = make_node('Add', ['XA', 'A_bias'], ['linear1_out'])\n", - "node3 = make_node('Sigmoid', ['linear1_out'], ['sigmoid'])\n", - "node4 = make_node('MatMul', ['sigmoid', 'B'], ['sigmoidB'])\n", - "node5 = make_node('Add', ['sigmoidB', 'B_bias'], ['y'])\n", + "node1 = make_node(\"MatMul\", [\"x\", \"A\"], [\"XA\"])\n", + "node2 = make_node(\"Add\", [\"XA\", \"A_bias\"], [\"linear1_out\"])\n", + "node3 = make_node(\"Sigmoid\", [\"linear1_out\"], [\"sigmoid\"])\n", + "node4 = make_node(\"MatMul\", [\"sigmoid\", \"B\"], [\"sigmoidB\"])\n", + "node5 = make_node(\"Add\", [\"sigmoidB\", \"B_bias\"], [\"y\"])\n", "\n", - "graph = make_graph(weight_nodes + [node1, node2, node3, node4, node5], # nodes\n", - " 'demograph', # a name\n", - " [x], # inputs\n", - " [y]) # outputs\n", + "graph = make_graph(\n", + " [*weight_nodes, node1, node2, node3, node4, node5], # nodes\n", + " \"demograph\", # a name\n", + " [x], # inputs\n", + " [y],\n", + ") # outputs\n", "\n", "onnx_model = make_model(graph)\n", "check_model(onnx_model)\n", @@ -537,7 +544,7 @@ } ], "source": [ - "print('>>> onnx graph converted using paddle2onnx :')\n", + "print(\">>> onnx graph converted using paddle2onnx :\")\n", "print(onnx.helper.printable_graph(m.graph))" ] }, diff --git a/paddle2onnx/__init__.py b/paddle2onnx/__init__.py index 594d9cedb..090664062 100755 --- a/paddle2onnx/__init__.py +++ b/paddle2onnx/__init__.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import sys import importlib.metadata -import packaging.version as pv import warnings +import packaging.version as pv + try: err_msg = ( "Please install the latest paddle: python -m pip install --pre " @@ -30,30 +30,26 @@ paddle_version = importlib.metadata.version(lib_paddle_name) if paddle_version == "0.0.0": warnings.warn( - f"You are currently using the development version of {lib_paddle_name}. " - f"Please ensure that its commit ID is more recent than the 'fedc65a'." + f"You are currently using the development version of {lib_paddle_name}. ", + stacklevel=2, ) else: - min_version = "3.0.0.dev20250426" - if ( - sys.platform == "win32" - and ( - pv.parse(paddle_version) < pv.parse(min_version) - or paddle_version == "3.0.0" - ) - ) or pv.parse(paddle_version) < pv.parse(min_version): + min_version = "3.0.0" + if pv.parse(paddle_version) < pv.parse(min_version): raise ValueError( f"The paddlepaddle version should not be less than {min_version}. {err_msg}" ) -except ImportError: +except ImportError as exc: raise ImportError( f"Failed to import paddle. Please ensure paddle is installed. {err_msg}" - ) + ) from exc +from .convert import ( + dygraph2onnx, # noqa: F401 + export, # noqa: F401 + load_parameter, # noqa: F401 + save_program, # noqa: F401 +) from .version import version -from .convert import export # noqa: F401 -from .convert import dygraph2onnx # noqa: F401 -from .convert import load_parameter # noqa: F401 -from .convert import save_program # noqa: F401 __version__ = version diff --git a/paddle2onnx/command.py b/paddle2onnx/command.py index f1c57b005..2f5dc2cf4 100755 --- a/paddle2onnx/command.py +++ b/paddle2onnx/command.py @@ -14,8 +14,9 @@ import argparse import ast -import sys import os +import sys + import paddle2onnx from paddle2onnx.utils import logging @@ -144,18 +145,16 @@ def main(): if args.version: logging.info( - "paddle2onnx-{} with python>=3.8, paddlepaddle>=3.0.0".format( - paddle2onnx.__version__ - ) + f"paddle2onnx-{paddle2onnx.__version__} with python>=3.8, paddlepaddle>=3.0.0" ) return - assert ( - args.model_dir is not None - ), "--model_dir should be defined while translating paddle model to onnx" - assert ( - args.save_file is not None - ), "--save_file should be defined while translating paddle model to onnx" + assert args.model_dir is not None, ( + "--model_dir should be defined while translating paddle model to onnx" + ) + assert args.save_file is not None, ( + "--save_file should be defined while translating paddle model to onnx" + ) model_file = os.path.join(args.model_dir, args.model_filename) if args.params_filename is None: diff --git a/paddle2onnx/convert.py b/paddle2onnx/convert.py index e77becf8d..80e93c21a 100755 --- a/paddle2onnx/convert.py +++ b/paddle2onnx/convert.py @@ -13,15 +13,16 @@ # limitations under the License. import os -import paddle +import shutil import tempfile -import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o -from paddle2onnx.utils import logging, paddle2onnx_export_configs +import traceback from contextlib import contextmanager -from paddle.decomposition import decomp + +import paddle +import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o from paddle.base.executor import global_scope -import shutil -import traceback +from paddle.decomposition import decomp +from paddle2onnx.utils import logging, paddle2onnx_export_configs PADDLE2ONNX_EXPORT_TEMP_DIR = None @@ -89,7 +90,7 @@ def load_parameter(program): def decompose_program(model_filename): """Decomposes the given pir program.""" - model_file_path, new_model_file_path, new_model_file_name, new_params_file_name = ( + model_file_path, new_model_file_path, new_model_file_name, _new_params_file_name = ( get_tmp_dir_and_file(model_filename, "_decompose") ) model = paddle.jit.load(model_file_path) @@ -139,9 +140,9 @@ def export( ): global PADDLE2ONNX_EXPORT_TEMP_DIR # check model_filename - assert os.path.exists( - model_filename - ), f"Model file {model_filename} does not exist." + assert os.path.exists(model_filename), ( + f"Model file {model_filename} does not exist." + ) if not os.path.exists(params_filename): logging.warning( f"Params file {params_filename} does not exist, " @@ -167,7 +168,7 @@ def export( place = paddle.CPUPlace() exe = paddle.static.Executor(place) with paddle.pir_utils.OldIrGuard(): - [inference_program, feed_target_names, fetch_targets] = ( + [inference_program, _feed_target_names, _fetch_targets] = ( paddle.static.load_inference_model(model_file_path, exe) ) if verbose: @@ -196,14 +197,17 @@ def export( if verbose: logging.info("Complete the conversion from .pdmodel to json file.") - if paddle.get_flags("FLAGS_enable_pir_api")["FLAGS_enable_pir_api"]: - if dist_prim_all and auto_upgrade_opset: - if verbose: - logging.info("Try to decompose program ...") - # TODO(wangmingkai02): Do we need to update params_filename here? - model_filename = decompose_program(model_filename) - if verbose: - logging.info("Complete the decomposition of combined operators.") + if ( + paddle.get_flags("FLAGS_enable_pir_api")["FLAGS_enable_pir_api"] + and dist_prim_all + and auto_upgrade_opset + ): + if verbose: + logging.info("Try to decompose program ...") + # TODO(wangmingkai02): Do we need to update params_filename here? + model_filename = decompose_program(model_filename) + if verbose: + logging.info("Complete the decomposition of combined operators.") if verbose and PADDLE2ONNX_EXPORT_TEMP_DIR is not None: logging.info( @@ -291,6 +295,7 @@ def export( "Try to perform optimization on the ONNX model with onnxoptimizer." ) import io + import onnx import onnxoptimizer @@ -320,6 +325,7 @@ def export( ) os.environ["POLYGRAPHY_AUTOINSTALL_DEPS"] = "1" import io + import onnx from polygraphy.backend.onnx import fold_constants @@ -358,9 +364,10 @@ def export( else: with open(save_file, "wb") as f: f.write(onnx_model_str) - logging.info("ONNX model saved in {}.".format(save_file)) + logging.info(f"ONNX model saved in {save_file}.") else: return onnx_model_str + return None def dygraph2onnx(layer, save_file, input_spec=None, opset_version=9, **configs): @@ -376,7 +383,7 @@ def dygraph2onnx(layer, save_file, input_spec=None, opset_version=9, **configs): ) if not os.path.isfile(model_file): raise ValueError("Failed to save static PaddlePaddle model.") - logging.info("Static PaddlePaddle model saved in {}.".format(paddle_model_dir)) + logging.info(f"Static PaddlePaddle model saved in {paddle_model_dir}.") params_file = os.path.join(paddle_model_dir, "model.pdiparams") if not os.path.isfile(params_file): params_file = "" diff --git a/paddle2onnx/convert_to_fp16.py b/paddle2onnx/convert_to_fp16.py index e95f89019..5fce8c0f4 100755 --- a/paddle2onnx/convert_to_fp16.py +++ b/paddle2onnx/convert_to_fp16.py @@ -11,9 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import import argparse + from paddle2onnx.utils import logging @@ -35,4 +35,4 @@ def parse_arguments(): import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o c_p2o.convert_to_fp16(args.input_model_path, args.output_model_path) - logging.info("FP16 model saved in {}.".format(args.output_model_path)) + logging.info(f"FP16 model saved in {args.output_model_path}.") diff --git a/paddle2onnx/mapper/exporter.cc b/paddle2onnx/mapper/exporter.cc index 10c63c18c..3cf15f939 100644 --- a/paddle2onnx/mapper/exporter.cc +++ b/paddle2onnx/mapper/exporter.cc @@ -16,7 +16,7 @@ #include #include #include -#include "onnxoptimizer/optimize.h" +#include "onnx/optimizer/optimize.h" #include "paddle/fluid/pir/dialect/operator/ir/control_flow_op.h" #include "paddle/phi/core/enforce.h" #include "paddle2onnx/mapper/quantize/ort_quantize_processor.h" diff --git a/paddle2onnx/optimize.py b/paddle2onnx/optimize.py index 1793981ab..d3673872d 100755 --- a/paddle2onnx/optimize.py +++ b/paddle2onnx/optimize.py @@ -11,9 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import import argparse + from paddle2onnx.utils import logging @@ -43,4 +43,4 @@ def parse_arguments(): if args.input_shape_dict != "": shape_dict = eval(args.input_shape_dict) c_p2o.optimize(args.input_model, args.output_model, shape_dict) - logging.info("Model optmized, saved in {}.".format(args.output_model)) + logging.info(f"Model optmized, saved in {args.output_model}.") diff --git a/paddle2onnx/optimizer/eliminate_non_transpose.h b/paddle2onnx/optimizer/eliminate_non_transpose.h index 1e43ce307..1a9f64f74 100644 --- a/paddle2onnx/optimizer/eliminate_non_transpose.h +++ b/paddle2onnx/optimizer/eliminate_non_transpose.h @@ -21,7 +21,7 @@ #pragma once -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -48,11 +48,7 @@ struct EliminateNonTranspose final : public PredicateBasedPass { } } } - const bool replacing_success = - tryReplacingAllUsesWith(node->output(), node->input()); - if (!replacing_success) { - return false; - } + node->output()->replaceAllUsesWith(node->input()); destroy_current = NodeDestroyType::DestroyOne; return true; } diff --git a/paddle2onnx/optimizer/fuse_constant_cast.h b/paddle2onnx/optimizer/fuse_constant_cast.h index 2ddb620a4..26c4cfca9 100644 --- a/paddle2onnx/optimizer/fuse_constant_cast.h +++ b/paddle2onnx/optimizer/fuse_constant_cast.h @@ -26,7 +26,7 @@ #include #include "onnx/defs/tensor_util.h" -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -57,9 +57,7 @@ struct FuseConstantCast final : public PredicateBasedPass { auto dtype = cast->i(kto); t.elem_type() = dtype; constant->t_(kvalue, std::move(t)); - if (!tryReplacingAllUsesWith(cast->output(), cast->inputs()[0])) { - return false; - } + cast->output()->replaceAllUsesWith(cast->inputs()[0]); destroy_current = NodeDestroyType::DestroyOne; return true; } diff --git a/paddle2onnx/optimizer/fuse_constant_reshape.h b/paddle2onnx/optimizer/fuse_constant_reshape.h index 4e0262861..1da50adf0 100644 --- a/paddle2onnx/optimizer/fuse_constant_reshape.h +++ b/paddle2onnx/optimizer/fuse_constant_reshape.h @@ -26,7 +26,7 @@ #include #include "onnx/defs/tensor_util.h" -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -127,11 +127,7 @@ struct FuseConstantReshape final : public PredicateBasedPass { // update constant node constant->output()->setSizes(reshape->output()->sizes()); constant->output()->setElemType(reshape->output()->elemType()); - const bool replacing_success = - tryReplacingAllUsesWith(reshape->output(), reshape->inputs()[0]); - if (!replacing_success) { - return false; - } + reshape->output()->replaceAllUsesWith(reshape->inputs()[0]); destroy_current = NodeDestroyType::DestroyOne; return true; } diff --git a/paddle2onnx/optimizer/fuse_constant_unsqueeze.h b/paddle2onnx/optimizer/fuse_constant_unsqueeze.h index ca51de8ce..2de25c995 100644 --- a/paddle2onnx/optimizer/fuse_constant_unsqueeze.h +++ b/paddle2onnx/optimizer/fuse_constant_unsqueeze.h @@ -26,7 +26,7 @@ #include #include "onnx/defs/tensor_util.h" -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -96,11 +96,7 @@ struct FuseConstantUnsqueeze final : public PredicateBasedPass { // update constant node constant->output()->setSizes(unsqueeze->output()->sizes()); constant->output()->setElemType(unsqueeze->output()->elemType()); - const bool replacing_success = - tryReplacingAllUsesWith(unsqueeze->output(), unsqueeze->inputs()[0]); - if (!replacing_success) { - return false; - } + unsqueeze->output()->replaceAllUsesWith(unsqueeze->inputs()[0]); destroy_current = NodeDestroyType::DestroyOne; return true; } diff --git a/paddle2onnx/optimizer/fuse_paddle_conv_bias.h b/paddle2onnx/optimizer/fuse_paddle_conv_bias.h index e976241dc..3da3633e0 100644 --- a/paddle2onnx/optimizer/fuse_paddle_conv_bias.h +++ b/paddle2onnx/optimizer/fuse_paddle_conv_bias.h @@ -23,7 +23,7 @@ #include #include "onnx/defs/tensor_util.h" -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -84,11 +84,7 @@ struct FusePaddleConvBias final : public PredicateBasedPass { conv->addInput(bias->outputs()[0]); conv->output()->setSizes(add->output()->sizes()); conv->output()->setElemType(add->output()->elemType()); - const bool replacing_success = - tryReplacingAllUsesWith(add->output(), add->inputs()[0]); - if (!replacing_success) { - return false; - } + add->output()->replaceAllUsesWith(add->inputs()[0]); destroy_current = NodeDestroyType::DestroyOne; return true; } diff --git a/paddle2onnx/optimizer/fuse_unsqueeze_conv2d_squeeze.h b/paddle2onnx/optimizer/fuse_unsqueeze_conv2d_squeeze.h index 6db5007b4..cd8fcb5c0 100644 --- a/paddle2onnx/optimizer/fuse_unsqueeze_conv2d_squeeze.h +++ b/paddle2onnx/optimizer/fuse_unsqueeze_conv2d_squeeze.h @@ -29,7 +29,7 @@ #include #include "onnx/defs/tensor_util.h" -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -153,14 +153,8 @@ struct FuseUnsqueezeConv2dSqueeze final : public PredicateBasedPass { } conv_node->replaceInput(0, unsqueeze_node->inputs()[0]); - if (!tryReplacingAllUsesWith(unsqueeze_node->output(), - unsqueeze_node->inputs()[0])) { - return false; - } - if (!tryReplacingAllUsesWith(squeeze_node->output(), - squeeze_node->inputs()[0])) { - return false; - } + unsqueeze_node->output()->replaceAllUsesWith(unsqueeze_node->inputs()[0]); + squeeze_node->output()->replaceAllUsesWith(squeeze_node->inputs()[0]); // unsqueeze_node->destroy(); // squeeze_node->destroy(); // destroy_current = NodeDestroyType::DestroyZero; diff --git a/paddle2onnx/optimizer/paddle2onnx_optimizer.cc b/paddle2onnx/optimizer/paddle2onnx_optimizer.cc index ca605e098..412e80ad6 100644 --- a/paddle2onnx/optimizer/paddle2onnx_optimizer.cc +++ b/paddle2onnx/optimizer/paddle2onnx_optimizer.cc @@ -15,7 +15,7 @@ #include "paddle2onnx/optimizer/paddle2onnx_optimizer.h" #include #include -#include "onnxoptimizer/optimize.h" +#include "onnx/optimizer/optimize.h" #include "paddle2onnx/converter.h" #include "paddle2onnx/optimizer/eliminate_non_transpose.h" #include "paddle2onnx/optimizer/fuse_constant_cast.h" diff --git a/paddle2onnx/optimizer/replace_add_to_identity.h b/paddle2onnx/optimizer/replace_add_to_identity.h index d7027c6c4..e532bf4aa 100644 --- a/paddle2onnx/optimizer/replace_add_to_identity.h +++ b/paddle2onnx/optimizer/replace_add_to_identity.h @@ -28,7 +28,7 @@ #include #include #include "onnx/defs/tensor_util.h" -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -82,9 +82,7 @@ struct ReplaceAddToIdentity final : public PredicateBasedPass { int32_data.size() == 0 && int64_data.size() == 0) { return false; } - if (!tryReplacingAllUsesWith(add_node->output(), add_node->inputs()[1])) { - return false; - } + add_node->output()->replaceAllUsesWith(add_node->inputs()[1]); } else { auto bias = add_ipt_1->t(kvalue); if (bias.sizes().size() == 1 && bias.sizes()[0] != 1) { @@ -113,9 +111,7 @@ struct ReplaceAddToIdentity final : public PredicateBasedPass { int32_data.size() == 0 && int64_data.size() == 0) { return false; } - if (!tryReplacingAllUsesWith(add_node->output(), add_node->inputs()[0])) { - return false; - } + add_node->output()->replaceAllUsesWith(add_node->inputs()[0]); } return true; } diff --git a/paddle2onnx/optimizer/replace_mul_to_identity.h b/paddle2onnx/optimizer/replace_mul_to_identity.h index 7e33f8f18..3bdbf6863 100644 --- a/paddle2onnx/optimizer/replace_mul_to_identity.h +++ b/paddle2onnx/optimizer/replace_mul_to_identity.h @@ -28,7 +28,7 @@ #include #include #include "onnx/defs/tensor_util.h" -#include "onnxoptimizer/pass.h" +#include "onnx/optimizer/pass.h" namespace ONNX_NAMESPACE { namespace optimization { @@ -82,9 +82,7 @@ struct ReplaceMulToIdentity final : public PredicateBasedPass { int32_data.size() == 0 && int64_data.size() == 0) { return false; } - if (!tryReplacingAllUsesWith(mul_node->output(), mul_node->inputs()[1])) { - return false; - } + mul_node->output()->replaceAllUsesWith(mul_node->inputs()[1]); } else { auto scale = mul_ipt_1->t(kvalue); if (scale.sizes().size() == 1 && scale.sizes()[0] != 1) { @@ -113,9 +111,7 @@ struct ReplaceMulToIdentity final : public PredicateBasedPass { int32_data.size() == 0 && int64_data.size() == 0) { return false; } - if (!tryReplacingAllUsesWith(mul_node->output(), mul_node->inputs()[0])) { - return false; - } + mul_node->output()->replaceAllUsesWith(mul_node->inputs()[0]); } return true; } diff --git a/paddle2onnx/utils.py b/paddle2onnx/utils.py index 6d08e867e..cbb93bc80 100644 --- a/paddle2onnx/utils.py +++ b/paddle2onnx/utils.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import import importlib -import time import sys +import time + import paddle2onnx.paddle2onnx_cpp2py_export as c_p2o @@ -32,23 +32,22 @@ def try_import(module_name): """Try importing a module, with an informative error message on failure.""" install_name = module_name try: - mod = importlib.import_module(module_name) - return mod - except ImportError: + return importlib.import_module(module_name) + except ImportError as exc: err_msg = ( - "Failed importing {}. This likely means that some modules " + f"Failed importing {module_name}. This likely means that some modules " "requires additional dependencies that have to be " - "manually installed (usually with `pip install {}`). " - ).format(module_name, install_name) - raise ImportError(err_msg) + f"manually installed (usually with `pip install {install_name}`). " + ) + raise ImportError(err_msg) from exc def check_model(onnx_model): onnx = try_import("onnx") try: onnx.checker.check_model(onnx_model) - except Exception: - raise Exception("ONNX model is not valid.") + except Exception as exc: + raise Exception("ONNX model is not valid.") from exc finally: logging.info("ONNX model generated is valid.") @@ -73,17 +72,15 @@ def log(level=2, message="", use_color=False): if logging.log_level >= level: if use_color: print( - "{}{} [{}]\t{}\033[0m".format( - level_color[level], current_time, levels[level], message + f"{level_color[level]}{current_time} [{levels[level]}]\t{message}\033[0m".encode().decode( + "latin1" ) - .encode("utf-8") - .decode("latin1") ) else: print( - "{} [{}]\t{}".format(current_time, levels[level], message) - .encode("utf-8") - .decode("latin1") + f"{current_time} [{levels[level]}]\t{message}".encode().decode( + "latin1" + ) ) sys.stdout.flush() @@ -108,59 +105,42 @@ def error(message="", use_color=True, exit=True): def compare_value(a, b, cond): if cond == "equal": - if a != b: - return False - return True + return a == b if cond == "greater_than": - if a <= b: - return False - return True + return not a <= b if cond == "greater_equal": - if a < b: - return False - return True + return not a < b if cond == "less_equal": - if a > b: - return False - return True + return not a > b if cond == "less_than": - if a >= b: - return False - return True + return not a >= b + return None def compare_attr(actual_value, target_value, attr_name, cond="equal"): if not compare_value(actual_value, target_value, cond): raise ValueError( - "Support {} {} {}, actually got {}=={}.".format( - attr_name, cond, target_value, attr_name, actual_value - ) + f"Support {attr_name} {cond} {target_value}, actually got {attr_name}=={actual_value}." ) def compare_attr_between_dims(attr, dims, attr_name, cond="equal"): if not compare_value(attr[dims[0]], attr[dims[1]], cond): - expect_info = "Support {}[{}] {} {}[{}], ".format( - attr_name, dims[0], cond, attr_name, dims[1] - ) - actual_info = "actually got {}[{}]=={}, not {} {}[{}]=={}.".format( - attr_name, dims[0], attr[dims[0]], cond, attr_name, dims[1], attr[dims[1]] - ) + expect_info = f"Support {attr_name}[{dims[0]}] {cond} {attr_name}[{dims[1]}], " + actual_info = f"actually got {attr_name}[{dims[0]}]=={attr[dims[0]]}, not {cond} {attr_name}[{dims[1]}]=={attr[dims[1]]}." raise ValueError(expect_info + actual_info) def require_fixed_shape(op_name=None): logging.error( - "[{}]Fixed shape is required, refer this doc for more information: https://github.com/PaddlePaddle/Paddle2ONNX/blob/develop/docs/zh/FAQ.md".format( - op_name - ) + f"[{op_name}]Fixed shape is required, refer this doc for more information: https://github.com/PaddlePaddle/Paddle2ONNX/blob/develop/docs/zh/FAQ.md" ) def paddle2onnx_export_configs(configs): - assert isinstance( - configs, dict - ), "create jit.save and paddle2onnx conversion configs from input, but input data is not dict." + assert isinstance(configs, dict), ( + "create jit.save and paddle2onnx conversion configs from input, but input data is not dict." + ) jit_save_configs = { "output_spec", "with_hook", diff --git a/pyproject.toml b/pyproject.toml index a34befaf2..ae44d2d73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,53 +1,88 @@ [build-system] +build-backend = "setuptools.build_meta" requires = [ - "setuptools>=42", - "wheel", - "cmake>=3.16", - "setuptools-scm", - "paddlepaddle==3.0.0.dev20250426", + "cmake>=3.16", + "paddlepaddle>=3.3,<4", + "setuptools>=42", + "setuptools-scm", + "wheel", ] -build-backend = "setuptools.build_meta" [project] name = "paddle2onnx" -dynamic = ["version"] description = "Export PaddlePaddle to ONNX" readme = "README.md" +license = { text = "Apache License v2.0" } authors = [ - {name = "paddle-infer", email = "paddle-infer@baidu.com"}, + { name = "paddle-infer", email = "paddle-infer@baidu.com" }, ] +requires-python = ">=3.10" classifiers = [ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", ] -license = {text = "Apache License v2.0"} -requires-python = ">=3.8" +dynamic = [ "version" ] dependencies = [ - "onnx>=1.16.1,<=1.17.0", - "onnxoptimizer==0.3.13; python_version < '3.12'", - "polygraphy>=0.49.20", + "onnx>=1.20.1,<2", + "onnxoptimizer>=1.5,<2", + "paddlepaddle>=3.3,<4", + "polygraphy>=0.49.20,<1", ] +scripts.paddle2onnx = "paddle2onnx.command:main" -[project.scripts] -paddle2onnx = "paddle2onnx.command:main" - -[tool.setuptools.dynamic] -version = {file = "VERSION_NUMBER"} - -[tool.setuptools.packages.find] -include = ["paddle2onnx*"] +[dependency-groups] +dev = [ + "mypy>=1.15,<2", + "pytest>=8,<9", + "ruff>=0.11,<1", +] -[tool.setuptools.exclude-package-data] -"*" = ["*.h", "*.cc", "*.bak", "*.in"] +[tool.setuptools] +dynamic.version = { file = "VERSION_NUMBER" } +exclude-package-data."*" = [ "*.h", "*.cc", "*.bak", "*.in" ] +packages.find.include = [ "paddle2onnx*" ] [tool.setuptools_scm] write_to = "paddle2onnx/version.py" +[tool.ruff] +target-version = "py310" +lint.select = [ + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "E", # pycodestyle errors + "F", # pyflakes + "FURB", # refurb + "I", # isort + "PERF", # perflint + "PIE", # flake8-pie + "RET", # flake8-return + "RUF", # ruff-specific rules + "SIM", # flake8-simplify + "TCH", # flake8-type-checking + "UP", # pyupgrade + "W", # pycodestyle warnings +] +lint.ignore = [ + "E501", # line too long (handled by formatter) + "E721", # type comparison - sometimes intentional + "PERF401", # manual list comprehension - often less readable + "RET504", # unnecessary assignment before return - often clearer with assignment + "SIM108", # ternary operator - often less readable + "SIM112", # capitalized env vars - PaddlePaddle uses lowercase FLAGS_* convention + "SIM115", # open context manager - not always applicable +] + [tool.mypy] files = "setup.py" -python_version = "3.8" +python_version = "3.10" strict = true show_error_codes = true -enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] +enable_error_code = [ "ignore-without-code", "redundant-expr", "truthy-bool" ] warn_unreachable = true diff --git a/setup.py b/setup.py index c3b2794e1..33ae686b4 100644 --- a/setup.py +++ b/setup.py @@ -13,20 +13,23 @@ # limitations under the License. # This file referred to github.com/onnx/onnx.git -from distutils import sysconfig, log -import setuptools -import setuptools.command.build_py -import setuptools.command.develop -import setuptools.command.build_ext -from shutil import which +from __future__ import annotations -from contextlib import contextmanager +import multiprocessing import os +import platform import shlex import subprocess import sys -import platform -import multiprocessing +from contextlib import contextmanager +from distutils import log, sysconfig +from shutil import which +from typing import ClassVar + +import setuptools +import setuptools.command.build_ext +import setuptools.command.build_py +import setuptools.command.develop TOP_DIR = os.path.realpath(os.path.dirname(__file__)) SRC_DIR = os.path.join(TOP_DIR, "paddle2onnx") @@ -60,7 +63,7 @@ @contextmanager def cd(path): if not os.path.isabs(path): - raise RuntimeError("Can only cd to absolute path, got: {}".format(path)) + raise RuntimeError(f"Can only cd to absolute path, got: {path}") orig_path = os.getcwd() os.chdir(path) try: @@ -83,8 +86,8 @@ class cmake_build(setuptools.Command): to `setup.py build`. By default all CPUs are used. """ - user_options = [ - (str("jobs="), str("j"), str("Specifies the number of jobs to use with make")) + user_options: ClassVar[list[tuple[str, str, str]]] = [ + ("jobs=", "j", "Specifies the number of jobs to use with make") ] built = False @@ -93,8 +96,7 @@ def initialize_options(self): self.jobs = None def finalize_options(self): - if sys.version_info[0] >= 3: - self.set_undefined_options("build", ("parallel", "jobs")) + self.set_undefined_options("build", ("parallel", "jobs")) if self.jobs is None and os.getenv("MAX_JOBS") is not None: self.jobs = os.getenv("MAX_JOBS") self.jobs = multiprocessing.cpu_count() if self.jobs is None else int(self.jobs) @@ -107,11 +109,11 @@ def run(self): # configure cmake_args = [ CMAKE, - "-DPYTHON_INCLUDE_DIR={}".format(sysconfig.get_python_inc()), - "-DPYTHON_EXECUTABLE={}".format(sys.executable), + f"-DPYTHON_INCLUDE_DIR={sysconfig.get_python_inc()}", + f"-DPYTHON_EXECUTABLE={sys.executable}", "-DBUILD_PADDLE2ONNX_PYTHON=ON", "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON", - "-DONNX_NAMESPACE={}".format(ONNX_NAMESPACE), + f"-DONNX_NAMESPACE={ONNX_NAMESPACE}", "-DPY_EXT_SUFFIX={}".format( sysconfig.get_config_var("EXT_SUFFIX") or "" ), @@ -119,7 +121,7 @@ def run(self): str(sys.version_info[0]) + "." + str(sys.version_info[1]) ), ] - cmake_args.append("-DCMAKE_BUILD_TYPE=%s" % build_type) + cmake_args.append(f"-DCMAKE_BUILD_TYPE={build_type}") cmake_args.append("-DCMAKE_POLICY_VERSION_MINIMUM=3.5") if WINDOWS: cmake_args.extend( @@ -127,27 +129,22 @@ def run(self): # we need to link with libpython on windows, so # passing python version to window in order to # find python in cmake - "-DPY_VERSION={}".format( - "{0}.{1}".format(*sys.version_info[:2]) - ), + "-DPY_VERSION={}".format("{}.{}".format(*sys.version_info[:2])), ] ) if platform.architecture()[0] == "64bit": cmake_args.extend(["-A", "x64", "-T", "host=x64"]) else: cmake_args.extend(["-A", "Win32", "-T", "host=x86"]) - cmake_args.extend(["-G", "Visual Studio 16 2019"]) else: cmake_args.append( - "-DPYTHON_LIBRARY={}".format( - sysconfig.get_python_lib(standard_lib=True) - ) + f"-DPYTHON_LIBRARY={sysconfig.get_python_lib(standard_lib=True)}" ) if "CMAKE_ARGS" in os.environ: extra_cmake_args = shlex.split(os.environ["CMAKE_ARGS"]) # prevent crossfire with downstream scripts del os.environ["CMAKE_ARGS"] - log.info("Extra cmake args: {}".format(extra_cmake_args)) + log.info(f"Extra cmake args: {extra_cmake_args}") cmake_args.extend(extra_cmake_args) cmake_args.append(TOP_DIR) subprocess.check_call(cmake_args) @@ -155,7 +152,7 @@ def run(self): build_args = [CMAKE, "--build", os.curdir] if WINDOWS: build_args.extend(["--config", build_type]) - build_args.extend(["--", "/maxcpucount:{}".format(self.jobs)]) + build_args.extend(["--", f"/maxcpucount:{self.jobs}"]) else: build_args.extend(["--", "-j", str(self.jobs)]) subprocess.check_call(build_args) @@ -189,14 +186,10 @@ def build_extensions(self): os.path.realpath(self.build_lib), "paddle2onnx", filename ) if platform.system() == "Darwin": - command = "install_name_tool -change @loader_path/../libs/ @loader_path/../paddle/base/libpaddle.so {}".format( - src - ) + command = f"install_name_tool -change @loader_path/../libs/ @loader_path/../paddle/base/libpaddle.so {src}" if os.system(command) != 0: raise Exception( - "Failed to change library paths using command: '{}'".format( - command - ) + f"Failed to change library paths using command: '{command}'" ) self.copy_file(src, dst) @@ -211,7 +204,7 @@ def build_extensions(self): ################################################################################ ext_modules = [ - setuptools.Extension(name=str("paddle2onnx.paddle2onnx_cpp2py_export"), sources=[]) + setuptools.Extension(name="paddle2onnx.paddle2onnx_cpp2py_export", sources=[]) ] ################################################################################ diff --git a/tests/auto_scan_test.py b/tests/auto_scan_test.py index 4999903f3..af0857774 100755 --- a/tests/auto_scan_test.py +++ b/tests/auto_scan_test.py @@ -12,18 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np -import unittest +import copy +import logging import os import time -import logging -import paddle -from hypothesis import given, settings, HealthCheck +import unittest +from inspect import isfunction +from itertools import product + import hypothesis.strategies as st +import numpy as np +from hypothesis import HealthCheck, given, settings from onnxbase import APIOnnx, randtool -from itertools import product -import copy -from inspect import isfunction + +import paddle paddle.set_device("cpu") @@ -62,7 +64,7 @@ class BaseNet(paddle.nn.Layer): """ def __init__(self, config): - super(BaseNet, self).__init__() + super().__init__() self.config = copy.copy(config) def forward(self, *args, **kwargs): @@ -71,20 +73,21 @@ def forward(self, *args, **kwargs): class OPConvertAutoScanTest(unittest.TestCase): def __init__(self, *args, **kwargs): - super(OPConvertAutoScanTest, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) np.random.seed(1024) - paddle.enable_static() self.num_ran_models = 0 # @_test_with_pir def run_and_statis( self, max_examples=100, - opset_version=[7, 9, 15], + opset_version=None, reproduce=None, min_success_num=25, max_duration=-1, ): + if opset_version is None: + opset_version = [7, 9, 15] self.num_ran_models = 0 if os.getenv("CE_STAGE", "OFF") == "ON": max_examples *= 10 @@ -113,49 +116,45 @@ def run_test(configs): loop_func = given(generator())(run_test) if reproduce is not None: loop_func = reproduce(loop_func) - logging.info("Start to running test of {}".format(type(self))) + logging.info(f"Start to running test of {type(self)}") paddle.disable_static() loop_func() logging.info("===================Statistical Information===================") - logging.info("Number of Generated Programs: {}".format(self.num_ran_models)) + logging.info(f"Number of Generated Programs: {self.num_ran_models}") successful_ran_programs = int(self.num_ran_models) if successful_ran_programs < min_success_num: logging.warning("satisfied_programs = ran_programs") logging.error( - "At least {} programs need to ran successfully, but now only about {} programs satisfied.".format( - min_success_num, successful_ran_programs - ) + f"At least {min_success_num} programs need to ran successfully, but now only about {successful_ran_programs} programs satisfied." ) - assert False + raise AssertionError() used_time = time.time() - start_time - logging.info("Used time: {} s".format(round(used_time, 2))) + logging.info(f"Used time: {round(used_time, 2)} s") if max_duration > 0 and used_time > max_duration: logging.error( - "The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.".format( - max_duration - ) + f"The duration exceeds {max_duration} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`." ) - assert False + raise AssertionError() def run_test(self, configs): config, models = configs - logging.info("Run configs: {}".format(config)) - - assert "op_names" in config.keys(), "config must include op_names in dict keys" - assert ( - "test_data_shapes" in config.keys() - ), "config must include test_data_shapes in dict keys" - assert ( - "test_data_types" in config.keys() - ), "config must include test_data_types in dict keys" - assert ( - "opset_version" in config.keys() - ), "config must include opset_version in dict keys" - assert ( - "input_spec_shape" in config.keys() - ), "config must include input_spec_shape in dict keys" + logging.info(f"Run configs: {config}") + + assert "op_names" in config, "config must include op_names in dict keys" + assert "test_data_shapes" in config, ( + "config must include test_data_shapes in dict keys" + ) + assert "test_data_types" in config, ( + "config must include test_data_types in dict keys" + ) + assert "opset_version" in config, ( + "config must include opset_version in dict keys" + ) + assert "input_spec_shape" in config, ( + "config must include input_spec_shape in dict keys" + ) op_names = config["op_names"] test_data_shapes = config["test_data_shapes"] @@ -164,7 +163,7 @@ def run_test(self, configs): input_specs = config["input_spec_shape"] use_gpu = False - if "use_gpu" in config.keys(): + if "use_gpu" in config: use_gpu = config["use_gpu"] self.num_ran_models += 1 @@ -178,9 +177,9 @@ def run_test(self, configs): if len(opset_version) == 1 and len(models) != len(opset_version): opset_version = opset_version * len(models) - assert len(models) == len( - op_names - ), "Length of models should be equal to length of op_names" + assert len(models) == len(op_names), ( + "Length of models should be equal to length of op_names" + ) input_type_list = None if len(test_data_types) > 1: @@ -195,9 +194,9 @@ def run_test(self, configs): delta = 1e-5 rtol = 1e-5 - if "delta" in config.keys(): + if "delta" in config: delta = config["delta"] - if "rtol" in config.keys(): + if "rtol" in config: rtol = config["rtol"] for i, model in enumerate(models): @@ -213,7 +212,7 @@ def run_test(self, configs): use_gpu, ) for input_type in input_type_list: - input_tensors = list() + input_tensors = [] for j, shape in enumerate(test_data_shapes): # Determine whether it is a user-defined data generation function if isfunction(shape): @@ -240,9 +239,7 @@ def run_test(self, configs): ) ) obj.set_input_data("input_data", tuple(input_tensors)) - logging.info( - "Now Run >>> dtype: {}, op_name: {}".format(input_type, op_names[i]) - ) + logging.info(f"Now Run >>> dtype: {input_type}, op_name: {op_names[i]}") obj.run() if len(input_type_list) == 0: obj.run() diff --git a/tests/detection_ops/nms.py b/tests/detection_ops/nms.py index 9a1509d36..e4de96114 100644 --- a/tests/detection_ops/nms.py +++ b/tests/detection_ops/nms.py @@ -13,8 +13,8 @@ # limitations under the License. import paddle -from paddle.base.framework import in_dygraph_mode from paddle.base import core +from paddle.base.framework import in_dygraph_mode from paddle.base.layer_helper import LayerHelper @@ -149,42 +149,41 @@ class number index = None return output, nms_rois_num, index - else: - output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) - index = helper.create_variable_for_type_inference(dtype="int32") + output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) + index = helper.create_variable_for_type_inference(dtype="int32") - inputs = {"BBoxes": bboxes, "Scores": scores} - outputs = {"Out": output, "Index": index} + inputs = {"BBoxes": bboxes, "Scores": scores} + outputs = {"Out": output, "Index": index} - if rois_num is not None: - inputs["RoisNum"] = rois_num + if rois_num is not None: + inputs["RoisNum"] = rois_num - if return_rois_num: - nms_rois_num = helper.create_variable_for_type_inference(dtype="int32") - outputs["NmsRoisNum"] = nms_rois_num + if return_rois_num: + nms_rois_num = helper.create_variable_for_type_inference(dtype="int32") + outputs["NmsRoisNum"] = nms_rois_num - helper.append_op( - type="multiclass_nms3", - inputs=inputs, - attrs={ - "background_label": background_label, - "score_threshold": score_threshold, - "nms_top_k": nms_top_k, - "nms_threshold": nms_threshold, - "keep_top_k": keep_top_k, - "nms_eta": nms_eta, - "normalized": normalized, - }, - outputs=outputs, - ) - output.stop_gradient = True - index.stop_gradient = True - if not return_index: - index = None - if not return_rois_num: - nms_rois_num = None + helper.append_op( + type="multiclass_nms3", + inputs=inputs, + attrs={ + "background_label": background_label, + "score_threshold": score_threshold, + "nms_top_k": nms_top_k, + "nms_threshold": nms_threshold, + "keep_top_k": keep_top_k, + "nms_eta": nms_eta, + "normalized": normalized, + }, + outputs=outputs, + ) + output.stop_gradient = True + index.stop_gradient = True + if not return_index: + index = None + if not return_rois_num: + nms_rois_num = None - return ( - output, - nms_rois_num, - ) + return ( + output, + nms_rois_num, + ) diff --git a/tests/fake_quant.py b/tests/fake_quant.py index 0d83b3c08..9723f5bf3 100755 --- a/tests/fake_quant.py +++ b/tests/fake_quant.py @@ -14,18 +14,19 @@ # come from: https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/auto_compression/utils/fake_ptq.py import os + import paddle from paddle.fluid.framework import IrGraph from paddle.framework import core from paddle.static.quantization import ( - QuantizationTransformPass, - QuantizationTransformPassV2, AddQuantDequantPass, AddQuantDequantPassV2, QuantizationFreezePass, + QuantizationTransformPass, + QuantizationTransformPassV2, QuantWeightPass, + utils, ) -from paddle.static.quantization import utils try: from paddle.static.quantization import quant_config @@ -49,7 +50,7 @@ def post_quant_fake( model_filename=None, params_filename=None, save_model_path=None, - quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"], + quantizable_op_type=None, is_full_quantize=False, activation_bits=8, weight_bits=8, @@ -68,6 +69,8 @@ def post_quant_fake( params_filename='params', save_model_path='fake_quant') """ + if quantizable_op_type is None: + quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"] activation_quantize_type = "range_abs_max" weight_quantize_type = "channel_wise_abs_max" _dynamic_quantize_op_type = ["lstm"] diff --git a/tests/onnx/base_expect.py b/tests/onnx/base_expect.py index 8137ab876..08afed8d9 100644 --- a/tests/onnx/base_expect.py +++ b/tests/onnx/base_expect.py @@ -18,9 +18,7 @@ from __future__ import annotations from copy import deepcopy -from typing import Any, Callable, Sequence - -import numpy as np +from typing import TYPE_CHECKING, Any import onnx from onnx.onnx_pb import ( @@ -33,6 +31,11 @@ TypeProto, ) +if TYPE_CHECKING: + from collections.abc import Callable, Sequence + + import numpy as np + _NodeTestCases = [] _TargetOpType = None _DiffOpTypes = None @@ -76,11 +79,11 @@ def _rename_edges_helper( for sparse_init_desc in new_graph.sparse_initializer: sg_rename[sparse_init_desc.values.name] = ( sparse_init_desc.values.name - ) = (prefix + sparse_init_desc.values.name) + ) = prefix + sparse_init_desc.values.name for sparse_init_desc in new_graph.sparse_initializer: sg_rename[sparse_init_desc.indices.name] = ( sparse_init_desc.indices.name - ) = (prefix + sparse_init_desc.indices.name) + ) = prefix + sparse_init_desc.indices.name def subgraph_rename_helper(name: str) -> Any: if name in sg_rename: # noqa: B023 @@ -124,15 +127,14 @@ def function_expand_helper( def rename_helper(internal_name: str) -> Any: if internal_name in io_names_map: return io_names_map[internal_name] - elif internal_name == "": + if internal_name == "": return "" return op_prefix + internal_name - new_node_list = [ + return [ _rename_edges_helper(internal_node, rename_helper, attribute_map, op_prefix) for internal_node in function_proto.node ] - return new_node_list def function_testcase_helper( @@ -186,7 +188,7 @@ def _extract_value_info( raise NotImplementedError( "_extract_value_info: both input and type_proto arguments cannot be None." ) - elif isinstance(input, list): + if isinstance(input, list): elem_type = onnx.helper.np_dtype_to_tensor_dtype(input[0].dtype) shape = None tensor_type_proto = onnx.helper.make_tensor_type_proto(elem_type, shape) @@ -208,9 +210,7 @@ def _make_test_model_gen_version(graph: GraphProto, **kwargs: Any) -> ModelProto latest_onnx_version, latest_ml_version, latest_training_version, - ) = onnx.helper.VERSION_TABLE[-1][ - 2:5 - ] # type: ignore + ) = onnx.helper.VERSION_TABLE[-1][2:5] # type: ignore if "opset_imports" in kwargs: for opset in kwargs["opset_imports"]: # If the test model uses an unreleased opset version (latest_version+1), @@ -280,12 +280,14 @@ def expect( del kwargs["output_type_protos"] inputs_vi = [ _extract_value_info(arr, arr_name, input_type) - for arr, arr_name, input_type in zip(inputs, present_inputs, input_type_protos) + for arr, arr_name, input_type in zip( + inputs, present_inputs, input_type_protos, strict=False + ) ] outputs_vi = [ _extract_value_info(arr, arr_name, output_type) for arr, arr_name, output_type in zip( - outputs, present_outputs, output_type_protos + outputs, present_outputs, output_type_protos, strict=False ) ] graph = onnx.helper.make_graph( diff --git a/tests/onnx/export_if.py b/tests/onnx/export_if.py index 508b5a7a4..383506cd7 100644 --- a/tests/onnx/export_if.py +++ b/tests/onnx/export_if.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import onnx import numpy as np +import onnx from base_expect import expect if __name__ == "__main__": diff --git a/tests/onnxbase.py b/tests/onnxbase.py index 46baf3aa0..3598f1c80 100644 --- a/tests/onnxbase.py +++ b/tests/onnxbase.py @@ -12,18 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import onnx -from inspect import isfunction import logging -from onnxruntime import InferenceSession import os +import shutil +from functools import wraps +from inspect import isfunction + import numpy as np +import onnx import paddle -import paddle2onnx import paddle.static as static +from onnxruntime import InferenceSession + +import paddle2onnx from paddle2onnx.convert import dygraph2onnx -import shutil -from functools import wraps def _test_with_pir(func): @@ -57,9 +59,7 @@ def compare_data(result_data, expect_data, delta, rtol): # 输出数据类型错误 if result_data.dtype != result_data.dtype: logging.error( - "Different output data types! res type is: {}, and expect type is: {}".format( - result_data.dtype, expect_data.dtype - ) + f"Different output data types! res type is: {result_data.dtype}, and expect type is: {expect_data.dtype}" ) return False @@ -68,7 +68,7 @@ def compare_data(result_data, expect_data, delta, rtol): diff = abs(result_data.astype("int32") - expect_data.astype("int32")) else: diff = abs(result_data - expect_data) - logging.error("Output has diff! max diff: {}".format(np.amax(diff))) + logging.error(f"Output has diff! max diff: {np.amax(diff)}") return False @@ -104,13 +104,13 @@ def compare(result, expect, delta=1e-10, rtol=1e-10): # Compare the actual shape with the expected shape and determine if the output results are correct. res_shape = compare_shape(result, expect) - assert res_data, "result: {} != expect: {}".format(result, expect) - assert res_shape, "result.shape: {} != expect.shape: {}".format( - result.shape, expect.shape + assert res_data, f"result: {result} != expect: {expect}" + assert res_shape, ( + f"result.shape: {result.shape} != expect.shape: {expect.shape}" + ) + assert result.dtype == expect.dtype, ( + f"result.dtype: {result.dtype} != expect.dtype: {expect.dtype}" ) - assert ( - result.dtype == expect.dtype - ), "result.dtype: {} != expect.dtype: {}".format(result.dtype, expect.dtype) elif isinstance(result, list) and len(result) > 1: for i in range(len(result)): if isinstance(result[i], (np.generic, np.ndarray)): @@ -128,11 +128,12 @@ def randtool(dtype, low, high, shape): if dtype == "int": return np.random.randint(low, high, shape) - elif dtype == "float": + if dtype == "float": return low + (high - low) * np.random.random(shape) - elif dtype == "bool": + if dtype == "bool": return np.random.randint(low, high, shape).astype("bool") + return None class BuildFunc(paddle.nn.Layer): @@ -141,7 +142,7 @@ class BuildFunc(paddle.nn.Layer): """ def __init__(self, inner_func, **super_param): - super(BuildFunc, self).__init__() + super().__init__() self.inner_func = inner_func self._super_param = super_param @@ -149,8 +150,7 @@ def forward(self, inputs): """ forward """ - x = self.inner_func(inputs, **self._super_param) - return x + return self.inner_func(inputs, **self._super_param) class BuildClass(paddle.nn.Layer): @@ -159,15 +159,14 @@ class BuildClass(paddle.nn.Layer): """ def __init__(self, inner_class, **super_param): - super(BuildClass, self).__init__() + super().__init__() self.inner_class = inner_class(**super_param) def forward(self, inputs): """ forward """ - x = self.inner_class(inputs) - return x + return self.inner_class(inputs) dtype_map = { @@ -190,7 +189,7 @@ def forward(self, inputs): } -class APIOnnx(object): +class APIOnnx: """ paddle API transfer to onnx """ @@ -200,13 +199,17 @@ def __init__( func, file_name, ver_list, - ops=[], - input_spec_shape=[], + ops=None, + input_spec_shape=None, delta=1e-5, rtol=1e-5, use_gpu=False, **sup_params, ): + if input_spec_shape is None: + input_spec_shape = [] + if ops is None: + ops = [] self.ops = ops if isinstance(self.ops, str): self.ops = [self.ops] @@ -295,14 +298,12 @@ def set_input_spec(self): if len(self.input_spec_shape) == 0: return self.input_spec.clear() - i = 0 - for shape in self.input_spec_shape: + for i, shape in enumerate(self.input_spec_shape): self.input_spec.append( paddle.static.InputSpec( shape=shape, dtype=self.input_dtype[i], name=str(i) ) ) - i += 1 def _mkdir(self): """ @@ -363,8 +364,7 @@ def _mk_onnx_res(self, ver): input_feed = {} if len(model.graph.input) == 0: return sess.run(output_names=None, input_feed=input_feed) - ort_outs = sess.run(output_names=None, input_feed=self.input_feed) - return ort_outs + return sess.run(output_names=None, input_feed=self.input_feed) def add_kwargs_to_dict(self, group_name, **kwargs): """ @@ -387,7 +387,7 @@ def check_ops(self, version): included = False paddle_op_list = [] assert len(self.ops) == 1, "You have to set one op name" - for key, node in paddle_graph.node_map.items(): + for node in paddle_graph.node_map.values(): op_type = node.type op_type = op_type.replace("depthwise_", "") if op_type == self.ops[0]: @@ -396,8 +396,8 @@ def check_ops(self, version): if len(paddle_graph.node_map.keys()) == 0 and self.ops[0] == "": included = True - assert included is True, "{} op in not in convert OPs, all OPs :{}".format( - self.ops, paddle_op_list + assert included is True, ( + f"{self.ops} op in not in convert OPs, all OPs :{paddle_op_list}" ) # TODO: PaddlePaddle 2.6 has modified the ParseFromString API, and it cannot be simply replaced with @@ -451,9 +451,9 @@ def run(self): for place in self.places: paddle.set_device(place) exp = self._mk_dygraph_exp(self._func) - assert ( - len(self.ops) <= 1 - ), "Need to make sure the number of ops in config is 1." + assert len(self.ops) <= 1, ( + "Need to make sure the number of ops in config is 1." + ) # Save Paddle Inference model if os.path.exists(self.name): diff --git a/tests/quantize_ops.py b/tests/quantize_ops.py index 7cfb9054a..716bc97b7 100755 --- a/tests/quantize_ops.py +++ b/tests/quantize_ops.py @@ -13,9 +13,9 @@ # limitations under the License. import paddle +from paddle import _legacy_C_ops from paddle.base.framework import in_dygraph_mode from paddle.base.layer_helper import LayerHelper -from paddle import _legacy_C_ops @paddle.jit.not_to_static @@ -25,20 +25,19 @@ def quantize_linear(x, scale, zero_point, bit_length=8, quant_axis=-1, name=None attrs = ("bit_length", bit_length, "quant_axis", quant_axis) if in_dygraph_mode(): return _legacy_C_ops.quantize_linear(x, scale, zero_point, *attrs) - else: - output = helper.create_variable_for_type_inference(dtype=x.dtype) + output = helper.create_variable_for_type_inference(dtype=x.dtype) - inputs = {"X": x, "Scale": scale, "ZeroPoint": zero_point} - outputs = {"Y": output} + inputs = {"X": x, "Scale": scale, "ZeroPoint": zero_point} + outputs = {"Y": output} - helper.append_op( - type="quantize_linear", - inputs=inputs, - attrs={"bit_length": bit_length, "quant_axis": quant_axis}, - outputs=outputs, - ) - output.stop_gradient = True - return output + helper.append_op( + type="quantize_linear", + inputs=inputs, + attrs={"bit_length": bit_length, "quant_axis": quant_axis}, + outputs=outputs, + ) + output.stop_gradient = True + return output @paddle.jit.not_to_static @@ -48,17 +47,16 @@ def dequantize_linear(x, scale, zero_point, bit_length=8, quant_axis=-1, name=No attrs = ("bit_length", bit_length, "quant_axis", quant_axis) if in_dygraph_mode(): return _legacy_C_ops.dequantize_linear(x, scale, zero_point, *attrs) - else: - output = helper.create_variable_for_type_inference(dtype=x.dtype) - - inputs = {"X": x, "Scale": scale, "ZeroPoint": zero_point} - outputs = {"Y": output} - - helper.append_op( - type="dequantize_linear", - inputs=inputs, - attrs={"bit_length": bit_length, "quant_axis": quant_axis}, - outputs=outputs, - ) - output.stop_gradient = True - return output + output = helper.create_variable_for_type_inference(dtype=x.dtype) + + inputs = {"X": x, "Scale": scale, "ZeroPoint": zero_point} + outputs = {"Y": output} + + helper.append_op( + type="dequantize_linear", + inputs=inputs, + attrs={"bit_length": bit_length, "quant_axis": quant_axis}, + outputs=outputs, + ) + output.stop_gradient = True + return output diff --git a/tests/test_Conv2D_Dropout.py b/tests/test_Conv2D_Dropout.py index 0731a0d24..b9c28e4b0 100644 --- a/tests/test_Conv2D_Dropout.py +++ b/tests/test_Conv2D_Dropout.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -34,7 +33,7 @@ def __init__( bias_attr=None, data_format="NCHW", ): - super(Net, self).__init__() + super().__init__() self._bn = paddle.nn.Conv2D( in_channels=1, out_channels=2, @@ -55,8 +54,7 @@ def forward(self, inputs): forward """ x = self._bn(inputs) - x = self._drop(x) - return x + return self._drop(x) @_test_with_pir diff --git a/tests/test_abs.py b/tests/test_abs.py index 0ac9eedc7..68bf2a1c9 100644 --- a/tests/test_abs.py +++ b/tests/test_abs.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.abs(inputs) - return x + return paddle.abs(inputs) @_test_with_pir diff --git a/tests/test_acos.py b/tests/test_acos.py index 81dad3295..7bb536612 100644 --- a/tests/test_acos.py +++ b/tests/test_acos.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.acos(inputs) - return x + return paddle.acos(inputs) def test_acos_7(): diff --git a/tests/test_add.py b/tests/test_add.py index f0ee1fb4e..936f6e621 100644 --- a/tests/test_add.py +++ b/tests/test_add.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.add(inputs, inputs_) - return x + return paddle.add(inputs, inputs_) @_test_with_pir diff --git a/tests/test_argmax.py b/tests/test_argmax.py index d3655b158..63e34e9eb 100755 --- a/tests/test_argmax.py +++ b/tests/test_argmax.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, dtype="int64"): - super(Net, self).__init__() + super().__init__() self.dtype = dtype def forward(self, inputs): """ forward """ - x = paddle.argmax(inputs, axis=0, dtype=self.dtype) - return x + return paddle.argmax(inputs, axis=0, dtype=self.dtype) @_test_with_pir diff --git a/tests/test_argmin.py b/tests/test_argmin.py index 722232e2c..e2c7779cf 100644 --- a/tests/test_argmin.py +++ b/tests/test_argmin.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self, axis=None, keepdim=False): - super(Net, self).__init__() + super().__init__() self.axis = axis self.keepdim = keepdim @@ -31,8 +31,7 @@ def forward(self, inputs): """ forward """ - x = paddle.argmin(inputs, axis=self.axis, keepdim=self.keepdim) - return x + return paddle.argmin(inputs, axis=self.axis, keepdim=self.keepdim) def test_argmin_9(): diff --git a/tests/test_argsort.py b/tests/test_argsort.py index 8260d6338..91ad04930 100644 --- a/tests/test_argsort.py +++ b/tests/test_argsort.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self, axis=-1, descending=False): - super(Net, self).__init__() + super().__init__() self.axis = axis self.descending = descending @@ -31,8 +31,7 @@ def forward(self, inputs): """ forward """ - x = paddle.argsort(inputs, axis=self.axis, descending=self.descending) - return x + return paddle.argsort(inputs, axis=self.axis, descending=self.descending) @_test_with_pir diff --git a/tests/test_array_to_tensor.py b/tests/test_array_to_tensor.py index 445bab6e4..4e9772610 100644 --- a/tests/test_array_to_tensor.py +++ b/tests/test_array_to_tensor.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_only_pir global_config = { "axis": 0, @@ -24,7 +24,7 @@ class BaseNet(paddle.nn.Layer): def __init__(self, axis, use_stack): - super(BaseNet, self).__init__() + super().__init__() self.axis = axis self.use_stack = use_stack diff --git a/tests/test_asin.py b/tests/test_asin.py index fe25dc74c..d09d98e23 100644 --- a/tests/test_asin.py +++ b/tests/test_asin.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.asin(inputs) - return x + return paddle.asin(inputs) def test_asin_9(): diff --git a/tests/test_assign.py b/tests/test_assign.py index ad425ff5b..79d410c94 100644 --- a/tests/test_assign.py +++ b/tests/test_assign.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.assign(inputs) - return x + return paddle.assign(inputs) @_test_with_pir diff --git a/tests/test_atan.py b/tests/test_atan.py index 5099b351a..007744b91 100644 --- a/tests/test_atan.py +++ b/tests/test_atan.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.atan(inputs) - return x + return paddle.atan(inputs) def test_atan_9(): diff --git a/tests/test_auto_scan_argminmax.py b/tests/test_auto_scan_argminmax.py index 26e6b3e5e..817294b8a 100755 --- a/tests/test_auto_scan_argminmax.py +++ b/tests/test_auto_scan_argminmax.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + op_api_map = { "arg_min": paddle.argmin, "arg_max": paddle.argmax, @@ -42,13 +44,12 @@ def forward(self, inputs): axis = paddle.assign(self.config["axis"]) else: axis = self.config["axis"] - x = op_api_map[self.config["op_names"]]( + return op_api_map[self.config["op_names"]]( inputs, axis=axis, keepdim=self.config["keep_dim"], dtype=self.config["out_dtype"], ) - return x class TestArgMinMaxConvert(OPConvertAutoScanTest): @@ -88,10 +89,10 @@ def sample_convert_config(self, draw): "rtol": 1e-4, } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) diff --git a/tests/test_auto_scan_argsort.py b/tests/test_auto_scan_argsort.py index 8858f859c..9dd9f2bf3 100644 --- a/tests/test_auto_scan_argsort.py +++ b/tests/test_auto_scan_argsort.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -30,10 +32,9 @@ def forward(self, input): forward """ - x = paddle.argsort( + return paddle.argsort( input, axis=self.config["axis"], descending=self.config["descending"] ) - return x class TestArgsortConvert(OPConvertAutoScanTest): @@ -61,13 +62,9 @@ def generator_data(): for i in range(len(input_shape)): t = t * input_shape[i] input_data = np.array(random.sample(range(-5000, 5000), t)) - input_data = input_data.reshape(input_shape) - return input_data + return input_data.reshape(input_shape) - if descending: - opset_version = [7, 10, 11, 15] - else: - opset_version = [11, 15] + opset_version = [7, 10, 11, 15] if descending else [11, 15] config = { "op_names": ["argsort"], "test_data_shapes": [generator_data], diff --git a/tests/test_auto_scan_assign.py b/tests/test_auto_scan_assign.py index 2117df32c..e78eb7e54 100644 --- a/tests/test_auto_scan_assign.py +++ b/tests/test_auto_scan_assign.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ diff --git a/tests/test_auto_scan_atan2.py b/tests/test_auto_scan_atan2.py index 1b00b10af..2f8c2fd9d 100755 --- a/tests/test_auto_scan_atan2.py +++ b/tests/test_auto_scan_atan2.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,8 +29,7 @@ def forward(self, input1, input2): """ forward """ - x = paddle.atan2(input1, input2) - return x + return paddle.atan2(input1, input2) class TestUnsqueezeConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_avgpool.py b/tests/test_auto_scan_avgpool.py index d2509278d..59d6f30ff 100755 --- a/tests/test_auto_scan_avgpool.py +++ b/tests/test_auto_scan_avgpool.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -33,7 +35,7 @@ def forward(self, inputs): padding = self.config["padding"] ceil_mode = self.config["ceil_mode"] data_format = self.config["data_format"] - x = paddle.nn.functional.avg_pool2d( + return paddle.nn.functional.avg_pool2d( inputs, kernel_size=kernel_size, stride=stride, @@ -41,7 +43,6 @@ def forward(self, inputs): ceil_mode=ceil_mode, data_format=data_format, ) - return x class TestMaxpool2dConvert(OPConvertAutoScanTest): @@ -122,28 +123,22 @@ def sample_convert_config(self, draw): axis=0, ).tolist() if data_format == "NCHW": - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding = [[0, 0], [0, 0], *padding1, *padding2] else: - padding = [[0, 0]] + padding1 + padding2 + [[0, 0]] + padding = [[0, 0], *padding1, *padding2, [0, 0]] else: padding = 0 if return_mask and padding_type in ["list2", "list4", "list8"]: padding = draw(st.integers(min_value=1, max_value=5)) - if return_mask: - opset_version = [[9, 15]] - else: - opset_version = [[7, 9, 15]] + opset_version = [[9, 15]] if return_mask else [[7, 9, 15]] if ceil_mode: opset_version = [10, 15] if padding == "VALID": ceil_mode = False - if return_mask: - op_names = "max_pool2d_with_index" - else: - op_names = "pool2d" + op_names = "max_pool2d_with_index" if return_mask else "pool2d" config = { "op_names": [op_names], "test_data_shapes": [input_shape], diff --git a/tests/test_auto_scan_batch_norm.py b/tests/test_auto_scan_batch_norm.py index 79531b5bd..d21d033ef 100755 --- a/tests/test_auto_scan_batch_norm.py +++ b/tests/test_auto_scan_batch_norm.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir + import paddle from paddle import ParamAttr -from onnxbase import _test_with_pir class Net(BaseNet): @@ -26,7 +28,7 @@ class Net(BaseNet): """ def __init__(self, config=None): - super(Net, self).__init__(config) + super().__init__(config) if self.config["data_format"] in ["NC", "NCL", "NCHW", "NCDHW", "NCHW"]: param_shape = [self.config["input_shape"][1]] else: @@ -65,7 +67,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.batch_norm( + return paddle.nn.functional.batch_norm( inputs, running_mean=self.mean, running_var=self.variance, @@ -76,7 +78,6 @@ def forward(self, inputs): data_format=self.config["data_format"], use_global_stats=self.config["use_global_stats"], ) - return x class TestBatchNormConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_bmm.py b/tests/test_auto_scan_bmm.py index 60a77a088..bd1d6aa11 100755 --- a/tests/test_auto_scan_bmm.py +++ b/tests/test_auto_scan_bmm.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs1, inputs2): """ forward """ - x = paddle.bmm(inputs1, inputs2) - return x + return paddle.bmm(inputs1, inputs2) class TestBmmConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_cast.py b/tests/test_auto_scan_cast.py index efa490878..af84d5c5e 100755 --- a/tests/test_auto_scan_cast.py +++ b/tests/test_auto_scan_cast.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.cast(inputs, dtype=self.config["dtype"]) - return x + return paddle.cast(inputs, dtype=self.config["dtype"]) class TestCastConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_clip.py b/tests/test_auto_scan_clip.py index e713892ef..df991b630 100755 --- a/tests/test_auto_scan_clip.py +++ b/tests/test_auto_scan_clip.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -from onnxbase import randtool import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir, randtool + import paddle -from onnxbase import _test_with_pir class Net0(BaseNet): @@ -29,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.clip(inputs, min=self.config["min"], max=self.config["max"]) - return x + return paddle.clip(inputs, min=self.config["min"], max=self.config["max"]) class Net1(BaseNet): @@ -42,8 +42,7 @@ def forward(self, inputs, max_value): """ forward """ - x = paddle.clip(inputs, min=self.config["min"], max=max_value) - return x + return paddle.clip(inputs, min=self.config["min"], max=max_value) class Net2(BaseNet): @@ -55,8 +54,7 @@ def forward(self, inputs, min_value): """ forward """ - x = paddle.clip(inputs, min=min_value, max=self.config["max"]) - return x + return paddle.clip(inputs, min=min_value, max=self.config["max"]) class Net3(BaseNet): @@ -68,8 +66,7 @@ def forward(self, inputs, min_value, max_value): """ forward """ - x = paddle.clip(inputs, min=min_value, max=max_value) - return x + return paddle.clip(inputs, min=min_value, max=max_value) class Net4(BaseNet): @@ -81,8 +78,7 @@ def forward(self, inputs): """ forward """ - x = paddle.clip(inputs) - return x + return paddle.clip(inputs) class TestClipConvert0(OPConvertAutoScanTest): @@ -101,7 +97,7 @@ def sample_convert_config(self, draw): min_num = draw(st.integers(min_value=-4.0, max_value=-1.0)) max_num = draw(st.floats(min_value=0, max_value=4.0)) - models = list() + models = [] config0 = { "op_names": ["clip"], "test_data_shapes": [input_shape], @@ -137,10 +133,9 @@ def sample_convert_config(self, draw): min_num = draw(st.floats(min_value=-4.0, max_value=-2.0)) def generator_max(): - input_data = randtool("int", 0, 10, [1]) - return input_data + return randtool("int", 0, 10, [1]) - models = list() + models = [] config1 = { "op_names": ["clip"], "test_data_shapes": [input_shape, generator_max], @@ -173,7 +168,7 @@ def sample_convert_config(self, draw): max_num = draw(st.floats(min_value=2.0, max_value=4.0)) - models = list() + models = [] config2 = { "op_names": ["clip"], "test_data_shapes": [input_shape, [1]], @@ -205,14 +200,12 @@ def sample_convert_config(self, draw): dtype = draw(st.sampled_from(["float32", "float64"])) def generator_min(): - input_data = randtool("float", -10, -1, [1]) - return input_data + return randtool("float", -10, -1, [1]) def generator_max(): - input_data = randtool("int", 0, 10, [1]) - return input_data + return randtool("int", 0, 10, [1]) - models = list() + models = [] config3 = { "op_names": ["clip"], "test_data_shapes": [input_shape, generator_min, generator_max], @@ -242,7 +235,7 @@ def sample_convert_config(self, draw): dtype = draw(st.sampled_from(["float32", "float64"])) - models = list() + models = [] config0 = { "op_names": ["clip"], "test_data_shapes": [input_shape], diff --git a/tests/test_auto_scan_concat.py b/tests/test_auto_scan_concat.py index c28d79f22..da6a8260e 100644 --- a/tests/test_auto_scan_concat.py +++ b/tests/test_auto_scan_concat.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -31,8 +33,7 @@ def forward(self, inputs1, inputs2): axis = self.config["axis"] if self.config["isTensor"]: axis = paddle.to_tensor(axis, dtype=self.config["axis_dtype"]) - x = paddle.concat([inputs1, inputs2], axis=axis) - return x + return paddle.concat([inputs1, inputs2], axis=axis) class TestConcatConvert(OPConvertAutoScanTest): @@ -45,7 +46,7 @@ def sample_convert_config(self, draw): input_shape = draw( st.lists(st.integers(min_value=4, max_value=8), min_size=2, max_size=5) ) - axis_dtype = "int64" # 只能设置为INT64,设置为INT32时会在axis_tensor后增加cast导致取不到constant数值 + axis_dtype = "int64" # 只能设置为INT64,设置为INT32时会在axis_tensor后增加cast导致取不到constant数值 dtype = draw( st.sampled_from(["float16", "float32", "float64", "int32", "int64"]) ) diff --git a/tests/test_auto_scan_conv2d.py b/tests/test_auto_scan_conv2d.py index e5a55f827..95de7aab7 100755 --- a/tests/test_auto_scan_conv2d.py +++ b/tests/test_auto_scan_conv2d.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -29,7 +31,7 @@ def forward(self, inputs, weight): """ forward """ - x = paddle.nn.functional.conv2d( + return paddle.nn.functional.conv2d( inputs, weight, stride=self.config["stride"], @@ -38,7 +40,6 @@ def forward(self, inputs, weight): groups=self.config["groups"], data_format=self.config["data_format"], ) - return x class TestConv2dConvert(OPConvertAutoScanTest): @@ -112,9 +113,9 @@ def sample_convert_config(self, draw): axis=0, ).tolist() if data_format == "NCHW": - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding = [[0, 0], [0, 0], *padding1, *padding2] else: - padding = [[0, 0]] + padding1 + padding2 + [[0, 0]] + padding = [[0, 0], *padding1, *padding2, [0, 0]] elif padding_type == "list": if draw(st.booleans()): padding = draw( diff --git a/tests/test_auto_scan_conv2d_transpose.py b/tests/test_auto_scan_conv2d_transpose.py index e1b87ee1c..5ed359d29 100755 --- a/tests/test_auto_scan_conv2d_transpose.py +++ b/tests/test_auto_scan_conv2d_transpose.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -33,7 +35,7 @@ def forward(self, inputs, weight): output_size = self.config["output_size"] else: output_size = self.config["output_size"] - x = paddle.nn.functional.conv2d_transpose( + return paddle.nn.functional.conv2d_transpose( inputs, weight, stride=self.config["stride"], @@ -43,7 +45,6 @@ def forward(self, inputs, weight): output_size=output_size, data_format=self.config["data_format"], ) - return x class TestConv2dTransposeConvert(OPConvertAutoScanTest): @@ -133,9 +134,9 @@ def sample_convert_config(self, draw): axis=0, ).tolist() if data_format == "NCHW": - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding = [[0, 0], [0, 0], *padding1, *padding2] else: - padding = [[0, 0]] + padding1 + padding2 + [[0, 0]] + padding = [[0, 0], *padding1, *padding2, [0, 0]] padding_1_1 = padding[2][0] padding_1_2 = padding[2][1] padding_2_1 = padding[3][0] diff --git a/tests/test_auto_scan_conv3d.py b/tests/test_auto_scan_conv3d.py index 572f78abc..cce8ef01b 100755 --- a/tests/test_auto_scan_conv3d.py +++ b/tests/test_auto_scan_conv3d.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -28,7 +30,7 @@ def forward(self, inputs, weight): """ forward """ - x = paddle.nn.functional.conv3d( + return paddle.nn.functional.conv3d( inputs, weight, stride=self.config["stride"], @@ -37,7 +39,6 @@ def forward(self, inputs, weight): groups=self.config["groups"], data_format=self.config["data_format"], ) - return x class TestConv3dConvert(OPConvertAutoScanTest): @@ -129,7 +130,7 @@ def sample_convert_config(self, draw): ), axis=0, ).tolist() - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding3 + padding = [[0, 0], [0, 0], *padding1, *padding2, *padding3] elif padding_type == "list": if draw(st.booleans()): padding = draw( diff --git a/tests/test_auto_scan_cumsum.py b/tests/test_auto_scan_cumsum.py index 8d8214c3e..b2e82a417 100755 --- a/tests/test_auto_scan_cumsum.py +++ b/tests/test_auto_scan_cumsum.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -36,8 +38,7 @@ def forward(self, inputs): ) else: axis = self.config["axis"] - x = paddle.cumsum(inputs, axis=axis, dtype=self.config["dtype"]) - return x + return paddle.cumsum(inputs, axis=axis, dtype=self.config["dtype"]) class TestCumsumConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_dist.py b/tests/test_auto_scan_dist.py index ade64f939..44ed45f55 100644 --- a/tests/test_auto_scan_dist.py +++ b/tests/test_auto_scan_dist.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, x, y): """ forward """ - x = paddle.dist(x, y, p=self.config["p"]) - return x + return paddle.dist(x, y, p=self.config["p"]) class TestDistConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_dot.py b/tests/test_auto_scan_dot.py index db16f6fd8..8f0c0e6de 100644 --- a/tests/test_auto_scan_dot.py +++ b/tests/test_auto_scan_dot.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,8 +29,7 @@ def forward(self, x, y): """ forward """ - x = paddle.dot(x, y) - return x + return paddle.dot(x, y) class TestDotConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_dropout.py b/tests/test_auto_scan_dropout.py index 292c1e101..65c6b1a97 100755 --- a/tests/test_auto_scan_dropout.py +++ b/tests/test_auto_scan_dropout.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -import unittest -import paddle import random +import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -34,10 +36,9 @@ def forward(self, x): else: p = self.config["p"] # when training is true, has diff - x = paddle.nn.functional.dropout( + return paddle.nn.functional.dropout( x, training=False, p=p, axis=self.config["axis"], mode=self.config["mode"] ) - return x class TestDropoutConvert(OPConvertAutoScanTest): @@ -81,7 +82,7 @@ def sample_convert_config(self, draw): "tensor_attr": tensor_attr, } if axis is not None: - if mode in ["upscale_in_train"]: + if mode == "upscale_in_train": config["op_names"] = [""] else: config["op_names"] = ["scale"] diff --git a/tests/test_auto_scan_elementwise_ops.py b/tests/test_auto_scan_elementwise_ops.py index 3134734f9..eb8cd1bfb 100755 --- a/tests/test_auto_scan_elementwise_ops.py +++ b/tests/test_auto_scan_elementwise_ops.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -from onnxbase import randtool import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_only_pir, randtool + import paddle -from onnxbase import _test_only_pir op_api_map = { "elementwise_add": paddle.add, @@ -38,8 +39,7 @@ class Net(BaseNet): def forward(self, inputs1, inputs2): - x = op_api_map[self.config["op_names"]](inputs1, inputs2) - return x + return op_api_map[self.config["op_names"]](inputs1, inputs2) class TestElementwiseopsConvert(OPConvertAutoScanTest): @@ -90,14 +90,14 @@ def generator_data(): "input_spec_shape": [], } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) - for op_name, i in op_api_map.items(): + for op_name in op_api_map: opset_versions.append(opset_version_map[op_name]) config["op_names"] = op_names config["opset_version"] = opset_versions @@ -124,8 +124,7 @@ def test(self): class Net_2(BaseNet): def forward(self, inputs1, inputs2): - x = op_api_map_2[self.config["op_names"]](inputs1, inputs2) - return x + return op_api_map_2[self.config["op_names"]](inputs1, inputs2) class TestElementwiseopsConvert_2(OPConvertAutoScanTest): @@ -171,14 +170,14 @@ def sample_convert_config(self, draw): "input_spec_shape": [], } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map_2.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map_2: config["op_names"] = op_name models.append(Net_2(config)) op_names.append(op_name) - for op_name, i in op_api_map_2.items(): + for op_name in op_api_map_2: opset_versions.append(opset_version_map_2[op_name]) config["op_names"] = op_names config["opset_version"] = opset_versions diff --git a/tests/test_auto_scan_elu.py b/tests/test_auto_scan_elu.py index 0e0c2f604..99f95bcdf 100755 --- a/tests/test_auto_scan_elu.py +++ b/tests/test_auto_scan_elu.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,8 +29,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.elu(inputs, alpha=self.config["alpha"]) - return x + return paddle.nn.functional.elu(inputs, alpha=self.config["alpha"]) class TestEluConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_expand.py b/tests/test_auto_scan_expand.py index dd0635f04..1f987f166 100755 --- a/tests/test_auto_scan_expand.py +++ b/tests/test_auto_scan_expand.py @@ -12,14 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import random +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle -import random +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -35,8 +37,7 @@ def forward(self, inputs): shape = paddle.to_tensor(np.array(shape).astype(self.config["shape_dtype"])) x = paddle.expand(inputs, shape=shape) # TODO there's bug with expand operator - x = paddle.reshape(x, shape=paddle.to_tensor(np.array([-1]).astype("int32"))) - return x + return paddle.reshape(x, shape=paddle.to_tensor(np.array([-1]).astype("int32"))) class TestExpandConvert(OPConvertAutoScanTest): @@ -90,8 +91,7 @@ def forward(self, inputs): # shape = [paddle.to_tensor(2), paddle.to_tensor(np.array(1).astype("int64")), paddle.to_tensor(2), paddle.to_tensor(3), paddle.to_tensor(2), paddle.to_tensor(2)] x = paddle.expand(inputs, shape=shape) # TODO there's bug with expand operator - x = paddle.reshape(x, shape=paddle.to_tensor(np.array([-1]).astype("int32"))) - return x + return paddle.reshape(x, shape=paddle.to_tensor(np.array([-1]).astype("int32"))) class TestExpandConvert1(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_expand_as.py b/tests/test_auto_scan_expand_as.py index 973397998..761946f09 100755 --- a/tests/test_auto_scan_expand_as.py +++ b/tests/test_auto_scan_expand_as.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st +import random import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle -import random class Net(BaseNet): diff --git a/tests/test_auto_scan_eye.py b/tests/test_auto_scan_eye.py index 173f74ad4..9fae3d7af 100755 --- a/tests/test_auto_scan_eye.py +++ b/tests/test_auto_scan_eye.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -35,8 +37,7 @@ def forward(self): if self.config["num_columns"] is not None: num_columns = paddle.assign(self.config["num_columns"]) dtype = self.config["dtype"] - x = paddle.eye(num_rows, num_columns=num_columns, dtype=dtype) - return x + return paddle.eye(num_rows, num_columns=num_columns, dtype=dtype) class TestEyeConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_fill_constant.py b/tests/test_auto_scan_fill_constant.py index 39635b6d6..cc818408e 100755 --- a/tests/test_auto_scan_fill_constant.py +++ b/tests/test_auto_scan_fill_constant.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir -import unittest + import paddle @@ -29,8 +31,7 @@ def forward(self): if self.config["is_shape_tensor"]: shape = paddle.to_tensor(shape).astype(self.config["shape_dtype"]) dtype = self.config["dtype"] - x = paddle.full(shape=shape, fill_value=fill_value, dtype=dtype) - return x + return paddle.full(shape=shape, fill_value=fill_value, dtype=dtype) class TestFullConvert(OPConvertAutoScanTest): @@ -50,10 +51,7 @@ def sample_convert_config(self, draw): # todo tensor is not supported is_tensor = False # draw(st.booleans()) is_shape_tensor = draw(st.booleans()) - if is_shape_tensor: - opset_version = [9, 11, 15] - else: - opset_version = [7, 9, 15] + opset_version = [9, 11, 15] if is_shape_tensor else [7, 9, 15] config = { "op_names": ["fill_constant"], @@ -85,8 +83,7 @@ def forward(self): # TODO not supported # shape = [paddle.to_tensor(2), paddle.to_tensor(np.array(1).astype("int64")), 2, 3, 2, 2] dtype = self.config["dtype"] - x = paddle.full(shape=shape, fill_value=fill_value, dtype=dtype) - return x + return paddle.full(shape=shape, fill_value=fill_value, dtype=dtype) class TestFullConvert1(OPConvertAutoScanTest): @@ -105,10 +102,7 @@ def sample_convert_config(self, draw): fill_value = draw(st.integers(min_value=1, max_value=5)) # todo tensor is not supported is_shape_tensor = True # draw(st.booleans()) - if is_shape_tensor: - opset_version = [9, 15] - else: - opset_version = [7, 9, 15] + opset_version = [9, 15] if is_shape_tensor else [7, 9, 15] config = { "op_names": ["fill_constant"], "test_data_shapes": [], @@ -139,8 +133,7 @@ def forward(self): # TODO not supported # shape = [paddle.to_tensor(2), paddle.to_tensor(np.array(1).astype("int64")), 2, 3, 2, 2] dtype = self.config["dtype"] - x = paddle.full(shape=shape, fill_value=fill_value, dtype=dtype) - return x + return paddle.full(shape=shape, fill_value=fill_value, dtype=dtype) class TestFullConvert2(OPConvertAutoScanTest): @@ -160,10 +153,7 @@ def sample_convert_config(self, draw): # todo tensor is not supported is_tensor = draw(st.booleans()) is_shape_tensor = True # draw(st.booleans()) - if is_shape_tensor: - opset_version = [9, 11, 15] - else: - opset_version = [7, 9, 15] + opset_version = [9, 11, 15] if is_shape_tensor else [7, 9, 15] config = { "op_names": ["fill_constant"], "test_data_shapes": [], diff --git a/tests/test_auto_scan_fill_like.py b/tests/test_auto_scan_fill_like.py index 5a6630210..c0e7ae554 100755 --- a/tests/test_auto_scan_fill_like.py +++ b/tests/test_auto_scan_fill_like.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle op_api_map = { @@ -33,8 +35,7 @@ def forward(self, x): forward """ x = op_api_map[self.config["op_names"]](x) - x = x.astype("int32") - return x + return x.astype("int32") class TestFillLikeConvert(OPConvertAutoScanTest): @@ -58,9 +59,9 @@ def sample_convert_config(self, draw): "input_spec_shape": [], } - models = list() - op_names = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) diff --git a/tests/test_auto_scan_flatten.py b/tests/test_auto_scan_flatten.py index 2b69d6bee..9a8a91956 100755 --- a/tests/test_auto_scan_flatten.py +++ b/tests/test_auto_scan_flatten.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,10 +30,9 @@ def forward(self, x): """ forward """ - x = paddle.flatten( + return paddle.flatten( x, start_axis=self.config["start_axis"], stop_axis=self.config["stop_axis"] ) - return x class TestFlattenConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_flip.py b/tests/test_auto_scan_flip.py index 22da09857..3fb7e4601 100755 --- a/tests/test_auto_scan_flip.py +++ b/tests/test_auto_scan_flip.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, x): """ forward """ - x = paddle.flip(x, axis=self.config["axis"]) - return x + return paddle.flip(x, axis=self.config["axis"]) class TestFlattenConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_floordiv.py b/tests/test_auto_scan_floordiv.py index 2bbd12ede..4b6235afd 100755 --- a/tests/test_auto_scan_floordiv.py +++ b/tests/test_auto_scan_floordiv.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -from onnxbase import randtool import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir, randtool + import paddle -from onnxbase import _test_with_pir op_api_map = {"elementwise_floordiv": paddle.floor_divide} @@ -28,8 +29,7 @@ class Net(BaseNet): def forward(self, inputs1, inputs2): - x = op_api_map[self.config["op_names"]](inputs1, inputs2) - return x + return op_api_map[self.config["op_names"]](inputs1, inputs2) class TestfloordivConvert(OPConvertAutoScanTest): @@ -68,8 +68,7 @@ def sample_convert_config(self, draw): dtype = draw(st.sampled_from(["int32", "int64"])) def generator_data(): - input_data = randtool("int", 1.0, 20.0, input2_shape) - return input_data + return randtool("int", 1.0, 20.0, input2_shape) config = { "op_names": ["elementwise_floordiv"], @@ -79,14 +78,14 @@ def generator_data(): "input_spec_shape": [], } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) - for op_name, i in op_api_map.items(): + for op_name in op_api_map: opset_versions.append(opset_version_map[op_name]) config["op_names"] = op_names config["opset_version"] = opset_versions diff --git a/tests/test_auto_scan_full_like.py b/tests/test_auto_scan_full_like.py index 3af481ac0..3edc08c65 100755 --- a/tests/test_auto_scan_full_like.py +++ b/tests/test_auto_scan_full_like.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_only_pir, randtool + import paddle -from onnxbase import randtool, _test_only_pir op_api_map = { "fill_any_like": paddle.ones_like, @@ -34,8 +36,7 @@ def forward(self, x): forward """ x = op_api_map[self.config["op_names"]](x) - x = x.astype("int32") - return x + return x.astype("int32") class TestFullLikeConvert(OPConvertAutoScanTest): @@ -59,9 +60,9 @@ def sample_convert_config(self, draw): "input_spec_shape": [], } - models = list() - op_names = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) @@ -84,8 +85,7 @@ def forward(self, x, fill_value): forward """ x = paddle.full_like(x, fill_value) - x = x.astype("int32") - return x + return x.astype("int32") class TestFullLikeConvert2(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_gather.py b/tests/test_auto_scan_gather.py index e1274eb69..273d01cc1 100644 --- a/tests/test_auto_scan_gather.py +++ b/tests/test_auto_scan_gather.py @@ -12,14 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -import numpy as np import unittest -import paddle from random import sample + +import hypothesis.strategies as st +import numpy as np +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net0(BaseNet): """ @@ -30,8 +32,7 @@ def forward(self, x, index): """ forward """ - x = paddle.gather(x, index, axis=self.config["axis"]) - return x + return paddle.gather(x, index, axis=self.config["axis"]) class Net1(BaseNet): @@ -47,8 +48,7 @@ def forward(self, x, index): axis = paddle.to_tensor(axis, dtype="int64") x = paddle.gather(x, index, axis=axis) shape = paddle.shape(x) - x = paddle.reshape(x, shape) - return x + return paddle.reshape(x, shape) class TestGatherConvert0(OPConvertAutoScanTest): @@ -68,7 +68,7 @@ def sample_convert_config(self, draw): axis = draw(st.integers(min_value=0, max_value=len(input_shape) - 1)) def generator_index(): - index_list = [i for i in range(input_shape[axis])] + index_list = list(range(input_shape[axis])) index_select = sample(index_list, 2) return np.array(index_select) @@ -107,7 +107,7 @@ def sample_convert_config(self, draw): axis = draw(st.integers(min_value=0, max_value=len(input_shape) - 1)) def generator_index(): - index_list = [i for i in range(input_shape[axis])] + index_list = list(range(input_shape[axis])) index_select = sample(index_list, 2) return np.array(index_select) diff --git a/tests/test_auto_scan_gather_nd.py b/tests/test_auto_scan_gather_nd.py index a71535e6b..af1ab039f 100755 --- a/tests/test_auto_scan_gather_nd.py +++ b/tests/test_auto_scan_gather_nd.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -from onnxbase import randtool -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir, randtool + import paddle -from onnxbase import _test_with_pir class Net(BaseNet): @@ -29,8 +30,7 @@ def forward(self, input1, input2): """ forward """ - x = paddle.gather_nd(input1, input2) - return x + return paddle.gather_nd(input1, input2) class TestGatherNDConvert(OPConvertAutoScanTest): @@ -60,8 +60,7 @@ def sample_convert_config(self, draw): input2_shape = [len(input_shape)] def generator_data(): - input_data = randtool("int", 0, 10, input2_shape) - return input_data + return randtool("int", 0, 10, input2_shape) config = { "op_names": ["gather_nd"], diff --git a/tests/test_auto_scan_gaussian_random.py b/tests/test_auto_scan_gaussian_random.py index 66e6f4cae..9add10912 100755 --- a/tests/test_auto_scan_gaussian_random.py +++ b/tests/test_auto_scan_gaussian_random.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -from onnxbase import randtool, _test_with_pir -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir, randtool + import paddle @@ -29,13 +31,12 @@ def forward(self, input_1, input_2, input_3): forward """ inputs = [input_1, input_2, input_3] - x = paddle.tensor.random.gaussian( + return paddle.tensor.random.gaussian( inputs, mean=self.config["mean"], std=self.config["std"], dtype=self.config["out_dtype"], ) - return x class TestGaussianRandomConvert_tensorlist(OPConvertAutoScanTest): @@ -56,16 +57,13 @@ def sample_convert_config(self, draw): out_dtype = draw(st.sampled_from(["float32", "float64"])) def generator1_data(): - input_data1 = randtool("int", 1, 10, input_shape[0]) - return input_data1 + return randtool("int", 1, 10, input_shape[0]) def generator2_data(): - input_data2 = randtool("int", 1, 10, input_shape[1]) - return input_data2 + return randtool("int", 1, 10, input_shape[1]) def generator3_data(): - input_data3 = randtool("int", 1, 10, input_shape[2]) - return input_data3 + return randtool("int", 1, 10, input_shape[2]) dtype = draw(st.sampled_from(["int32", "int64"])) @@ -101,13 +99,12 @@ def forward(self, inputs): """ forward """ - x = paddle.tensor.random.gaussian( + return paddle.tensor.random.gaussian( inputs, mean=self.config["mean"], std=self.config["std"], dtype=self.config["out_dtype"], ) - return x class TestGaussianRandomConvert(OPConvertAutoScanTest): @@ -128,8 +125,7 @@ def sample_convert_config(self, draw): out_dtype = draw(st.sampled_from(["float32", "float64"])) def generator_data(): - input_data = randtool("int", 1, 10, input_shape) - return input_data + return randtool("int", 1, 10, input_shape) dtype = draw(st.sampled_from(["int32", "int64"])) @@ -165,13 +161,12 @@ def forward(self): """ forward """ - x = paddle.tensor.random.gaussian( + return paddle.tensor.random.gaussian( shape=self.config["shape"], mean=self.config["mean"], std=self.config["std"], dtype=self.config["out_dtype"], ) - return x class TestGaussianRandomConvert_list(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_gelu.py b/tests/test_auto_scan_gelu.py index a2bd11e8a..dd9bcb2d5 100644 --- a/tests/test_auto_scan_gelu.py +++ b/tests/test_auto_scan_gelu.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.gelu(inputs, approximate=False) - return x + return paddle.nn.functional.gelu(inputs, approximate=False) class TestGeluConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_grid_sampler.py b/tests/test_auto_scan_grid_sampler.py index 5fafcf3bb..6bdc1aa2a 100755 --- a/tests/test_auto_scan_grid_sampler.py +++ b/tests/test_auto_scan_grid_sampler.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir @@ -28,14 +29,13 @@ def forward(self, x, grid): """ forward """ - out = paddle.nn.functional.grid_sample( + return paddle.nn.functional.grid_sample( x, grid, align_corners=self.config["align_corners"], padding_mode=self.config["padding_mode"], mode=self.config["mode"], ) - return out class TestGroupNormConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_group_norm.py b/tests/test_auto_scan_group_norm.py index 7501431ff..74dd5cc32 100755 --- a/tests/test_auto_scan_group_norm.py +++ b/tests/test_auto_scan_group_norm.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir @@ -25,7 +26,7 @@ class Net(BaseNet): """ def __init__(self, config=None): - super(Net, self).__init__(config) + super().__init__(config) groups = self.config["groups"] epsilon = self.config["epsilon"] num_channels = self.config["num_channels"] @@ -43,8 +44,7 @@ def forward(self, inputs): """ forward """ - x = self.group_norm(inputs) - return x + return self.group_norm(inputs) class TestGroupNormConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_hardsigmoid.py b/tests/test_auto_scan_hardsigmoid.py index d7afa3981..cfaa836c8 100644 --- a/tests/test_auto_scan_hardsigmoid.py +++ b/tests/test_auto_scan_hardsigmoid.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,10 +30,9 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.hardsigmoid( + return paddle.nn.functional.hardsigmoid( inputs, slope=self.config["slope"], offset=self.config["offset"] ) - return x class TestHardsigmoidConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_hardswish.py b/tests/test_auto_scan_hardswish.py index dcc55c57a..f69c2e222 100644 --- a/tests/test_auto_scan_hardswish.py +++ b/tests/test_auto_scan_hardswish.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.hardswish(inputs) - return x + return paddle.nn.functional.hardswish(inputs) class TestHardswishConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_index_put.py b/tests/test_auto_scan_index_put.py index 00a459d79..0c20cf21f 100644 --- a/tests/test_auto_scan_index_put.py +++ b/tests/test_auto_scan_index_put.py @@ -13,13 +13,14 @@ # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st import numpy as np -from onnxbase import randtool -from onnxbase import _test_with_pir +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir, randtool + +import paddle class Net(BaseNet): @@ -30,10 +31,9 @@ class Net(BaseNet): def forward(self, inputs, indices, value): accumulate = self.config.get("accumulate", False) indices = list(indices) # index_put() expects a list/tuple of tensors - x = paddle.index_put( + return paddle.index_put( inputs, indices=indices, value=value, accumulate=accumulate ) - return x class TestIndexPutConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_index_sample.py b/tests/test_auto_scan_index_sample.py index 55c30969b..2d0e35177 100755 --- a/tests/test_auto_scan_index_sample.py +++ b/tests/test_auto_scan_index_sample.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import randtool +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, input, index): """ forward """ - x = paddle.index_sample(input, index=index) - return x + return paddle.index_sample(input, index=index) class TestIndexSampleConvert(OPConvertAutoScanTest): @@ -51,8 +52,7 @@ def generator_data(): import random n = random.randint(1, input_shape[1]) - input_data = randtool("int", 0.0, input_shape[1] - 1, [input_shape[0], n]) - return input_data + return randtool("int", 0.0, input_shape[1] - 1, [input_shape[0], n]) config = { "op_names": ["index_sample"], diff --git a/tests/test_auto_scan_index_select.py b/tests/test_auto_scan_index_select.py index 9c481e89e..24264d6f3 100644 --- a/tests/test_auto_scan_index_select.py +++ b/tests/test_auto_scan_index_select.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir, randtool + import paddle -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(BaseNet): @@ -29,8 +30,7 @@ def forward(self, input, index): """ forward """ - x = paddle.index_select(input, index=index, axis=self.config["axis"]) - return x + return paddle.index_select(input, index=index, axis=self.config["axis"]) class TestIndexselectConvert(OPConvertAutoScanTest): @@ -53,8 +53,7 @@ def generator_data(): n = random.randint(1, len(input_shape)) min_val = min(input_shape) - input_data = randtool("int", 0.0, min_val - 1, n) - return input_data + return randtool("int", 0.0, min_val - 1, n) config = { "op_names": ["index_select"], diff --git a/tests/test_auto_scan_instance_norm.py b/tests/test_auto_scan_instance_norm.py index b6a6da803..84a07f6d0 100755 --- a/tests/test_auto_scan_instance_norm.py +++ b/tests/test_auto_scan_instance_norm.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir + import paddle from paddle import ParamAttr -from onnxbase import _test_with_pir class Net(BaseNet): @@ -26,7 +28,7 @@ class Net(BaseNet): """ def __init__(self, config=None): - super(Net, self).__init__(config) + super().__init__(config) param_shape = [self.config["input_shape"][1]] dtype = self.config["dtype"] @@ -62,7 +64,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.instance_norm( + return paddle.nn.functional.instance_norm( inputs, running_mean=self.mean, running_var=self.variance, @@ -73,7 +75,6 @@ def forward(self, inputs): eps=self.config["epsilon"], data_format=self.config["data_format"], ) - return x class TestInstanceNormConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_interpolate_ops.py b/tests/test_auto_scan_interpolate_ops.py index a1c0493fb..08cf964b2 100755 --- a/tests/test_auto_scan_interpolate_ops.py +++ b/tests/test_auto_scan_interpolate_ops.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + op_api_map = { "linear": "linear_interp_v2", "bilinear": "bilinear_interp_v2", @@ -69,7 +71,7 @@ def forward(self, inputs): # align_corners True is only set with the interpolating modes: linear | bilinear | bicubic | trilinear if mode == "nearest": align_corners = False - x = paddle.nn.functional.interpolate( + return paddle.nn.functional.interpolate( x=inputs, size=size, scale_factor=scale_factor, @@ -78,7 +80,6 @@ def forward(self, inputs): align_mode=align_mode, data_format=data_format, ) - return x class TestInterpolateConvert(OPConvertAutoScanTest): @@ -204,7 +205,7 @@ def forward(self, inputs): # align_corners True is only set with the interpolating modes: linear | bilinear | bicubic | trilinear if mode == "nearest": align_corners = False - x = paddle.nn.functional.interpolate( + return paddle.nn.functional.interpolate( x=inputs, size=size, scale_factor=scale_factor, @@ -213,7 +214,6 @@ def forward(self, inputs): align_mode=align_mode, data_format=data_format, ) - return x class TestInterpolateConvert1(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_isx_ops.py b/tests/test_auto_scan_isx_ops.py index ed8f89fe4..65ebb2491 100644 --- a/tests/test_auto_scan_isx_ops.py +++ b/tests/test_auto_scan_isx_ops.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st -from onnxbase import _test_with_pir import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir + import paddle op_api_map = { @@ -75,8 +77,7 @@ def generator_data(): minus_nan_data = np.ones(shape=input_shape) minus_nan_data[:] = float("-nan") minus_nan_condition = np.random.randint(-2, 2, input_shape).astype("bool") - input_data = np.where(minus_nan_condition, input_data, minus_nan_data) - return input_data + return np.where(minus_nan_condition, input_data, minus_nan_data) config = { "op_names": ["isX"], @@ -86,10 +87,10 @@ def generator_data(): "input_spec_shape": [], } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) diff --git a/tests/test_auto_scan_layer_norm.py b/tests/test_auto_scan_layer_norm.py index 59f5e12cb..66532d6f1 100755 --- a/tests/test_auto_scan_layer_norm.py +++ b/tests/test_auto_scan_layer_norm.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -26,7 +28,7 @@ class Net(BaseNet): """ def __init__(self, config=None): - super(Net, self).__init__(config) + super().__init__(config) param_shape = [np.prod(self.config["normalized_shape"])] self.weight = self.create_parameter( attr=None, @@ -40,14 +42,13 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.layer_norm( + return paddle.nn.functional.layer_norm( inputs, weight=self.weight if self.config["has_weight_bias"] else None, bias=self.bias if self.config["has_weight_bias"] else None, normalized_shape=self.config["normalized_shape"], epsilon=self.config["epsilon"], ) - return x class TestLayerNormConvert(OPConvertAutoScanTest): @@ -60,7 +61,7 @@ def sample_convert_config(self, draw): input_shape = draw( st.lists(st.integers(min_value=2, max_value=8), min_size=2, max_size=5) ) - input_spec = [-1] * len(input_shape) + [-1] * len(input_shape) # When the dims is 5 and the last dimension is too small, an error will be reported due to the optimization of ONNXRuntime if len(input_shape) == 5: @@ -68,10 +69,7 @@ def sample_convert_config(self, draw): axis = draw(st.integers(min_value=1, max_value=len(input_shape) - 1)) axis_type = draw(st.sampled_from(["int", "list"])) - if axis_type == "int": - normalized_shape = input_shape[-1] - else: - normalized_shape = input_shape[axis:] + normalized_shape = input_shape[-1] if axis_type == "int" else input_shape[axis:] dtype = draw(st.sampled_from(["float32"])) epsilon = draw(st.floats(min_value=1e-12, max_value=1e-5)) diff --git a/tests/test_auto_scan_leakyrelu.py b/tests/test_auto_scan_leakyrelu.py index 6375debd0..cf9100338 100644 --- a/tests/test_auto_scan_leakyrelu.py +++ b/tests/test_auto_scan_leakyrelu.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,10 +30,9 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.leaky_relu( + return paddle.nn.functional.leaky_relu( inputs, negative_slope=self.config["negative_slope"] ) - return x class TestLeakyreluConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_linspace.py b/tests/test_auto_scan_linspace.py index bfbc26a54..2ee446763 100755 --- a/tests/test_auto_scan_linspace.py +++ b/tests/test_auto_scan_linspace.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): def forward(self): @@ -25,8 +27,7 @@ def forward(self): stop = self.config["stop"] num = self.config["num"] dtype = self.config["dtype"] - x = paddle.linspace(start=start, stop=stop, num=num, dtype=dtype) - return x + return paddle.linspace(start=start, stop=stop, num=num, dtype=dtype) class TestLinspaceConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_log.py b/tests/test_auto_scan_log.py index 5bc7315b4..f5aa7533d 100755 --- a/tests/test_auto_scan_log.py +++ b/tests/test_auto_scan_log.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + op_api_map = {"log1p": paddle.log1p, "log10": paddle.log10} @@ -47,9 +49,9 @@ def sample_convert_config(self, draw): "input_spec_shape": [], } - models = list() - op_names = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) diff --git a/tests/test_auto_scan_logical_ops.py b/tests/test_auto_scan_logical_ops.py index d5b2589cb..877e58f7d 100755 --- a/tests/test_auto_scan_logical_ops.py +++ b/tests/test_auto_scan_logical_ops.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle op_api_map = { @@ -54,8 +56,7 @@ def forward(self, inputs1, inputs2): ): inputs1 = inputs1.astype("int32") inputs2 = inputs2.astype("int32") - x = op_api_map[self.config["op_names"]](inputs1, inputs2) - return x + return op_api_map[self.config["op_names"]](inputs1, inputs2) class TestLogicopsConvert(OPConvertAutoScanTest): @@ -104,10 +105,10 @@ def sample_convert_config(self, draw): "input_spec_shape": [], } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) diff --git a/tests/test_auto_scan_logsigmoid.py b/tests/test_auto_scan_logsigmoid.py index 253e319d4..6877f6a75 100755 --- a/tests/test_auto_scan_logsigmoid.py +++ b/tests/test_auto_scan_logsigmoid.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,8 +29,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.log_sigmoid(inputs) - return x + return paddle.nn.functional.log_sigmoid(inputs) class TestLogsigmoidConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_logsoftmax.py b/tests/test_auto_scan_logsoftmax.py index 37a0a7397..9ecaffe79 100755 --- a/tests/test_auto_scan_logsoftmax.py +++ b/tests/test_auto_scan_logsoftmax.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,8 +29,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.log_softmax(inputs, axis=self.config["axis"]) - return x + return paddle.nn.functional.log_softmax(inputs, axis=self.config["axis"]) class TestLogSoftmaxConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_logsumexp.py b/tests/test_auto_scan_logsumexp.py index 45f104b7b..922128791 100644 --- a/tests/test_auto_scan_logsumexp.py +++ b/tests/test_auto_scan_logsumexp.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,10 +29,9 @@ def forward(self, inputs): """ forward """ - x = paddle.logsumexp( + return paddle.logsumexp( inputs, axis=self.config["axis"], keepdim=self.config["keepdim"] ) - return x class TestLogsumexpConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_lookup_table_v2.py b/tests/test_auto_scan_lookup_table_v2.py index ab3b276d9..4a6ea66a7 100755 --- a/tests/test_auto_scan_lookup_table_v2.py +++ b/tests/test_auto_scan_lookup_table_v2.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -from onnxbase import randtool -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import randtool + import paddle @@ -28,13 +30,12 @@ def forward(self, inputs, weight): """ forward """ - x = paddle.nn.functional.embedding( + return paddle.nn.functional.embedding( inputs, weight, padding_idx=self.config["padding_idx"], sparse=self.config["sparse"], ) - return x class TestKookuptablev2Convert(OPConvertAutoScanTest): @@ -53,8 +54,7 @@ def sample_convert_config(self, draw): ) def generator_data(): - input_data = randtool("int", 0, weight_shape[0] - 1, input_shape) - return input_data + return randtool("int", 0, weight_shape[0] - 1, input_shape) padding_idx = None if draw(st.booleans()): diff --git a/tests/test_auto_scan_masked_select.py b/tests/test_auto_scan_masked_select.py index 26e573700..08856b9e0 100644 --- a/tests/test_auto_scan_masked_select.py +++ b/tests/test_auto_scan_masked_select.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, x, y): """ forward """ - x = paddle.masked_select(x, y) - return x + return paddle.masked_select(x, y) class TestMaskedselectConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_matmul.py b/tests/test_auto_scan_matmul.py index 56de84e51..2d9290f52 100755 --- a/tests/test_auto_scan_matmul.py +++ b/tests/test_auto_scan_matmul.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,13 +30,12 @@ def forward(self, x, y): """ forward """ - x = paddle.matmul( + return paddle.matmul( x, y, transpose_x=self.config["transpose_x"], transpose_y=self.config["transpose_y"], ) - return x class TestMatmulConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_meshgrid.py b/tests/test_auto_scan_meshgrid.py index 4b24dab1a..a32565532 100644 --- a/tests/test_auto_scan_meshgrid.py +++ b/tests/test_auto_scan_meshgrid.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs1, inputs2): """ forward """ - x = paddle.meshgrid([inputs1, inputs2]) - return x + return paddle.meshgrid([inputs1, inputs2]) class TestMeshgridConvert(OPConvertAutoScanTest): @@ -75,8 +76,7 @@ def forward(self, inputs1, inputs2, inputs3): """ forward """ - x = paddle.meshgrid([inputs1, inputs2, inputs3]) - return x + return paddle.meshgrid([inputs1, inputs2, inputs3]) class TestMeshgridConvert1(OPConvertAutoScanTest): @@ -126,8 +126,7 @@ def forward(self, inputs1, inputs2, inputs3, inputs4, inputs5): """ forward """ - x = paddle.meshgrid([inputs1, inputs2, inputs3, inputs4, inputs5]) - return x + return paddle.meshgrid([inputs1, inputs2, inputs3, inputs4, inputs5]) class TestMeshgridConvert2(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_mish.py b/tests/test_auto_scan_mish.py index 311f501ba..275292446 100755 --- a/tests/test_auto_scan_mish.py +++ b/tests/test_auto_scan_mish.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import randtool -import unittest + import paddle @@ -28,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.mish(inputs) - return x + return paddle.nn.functional.mish(inputs) class TestMishConvert(OPConvertAutoScanTest): @@ -44,8 +45,7 @@ def sample_convert_config(self, draw): ) def generator_data(): - input_data = randtool("float", -100, 100, input_shape) - return input_data + return randtool("float", -100, 100, input_shape) dtype = draw(st.sampled_from(["float32", "float64"])) diff --git a/tests/test_auto_scan_multiclass_nms.py b/tests/test_auto_scan_multiclass_nms.py index 07241963a..06a0b8594 100755 --- a/tests/test_auto_scan_multiclass_nms.py +++ b/tests/test_auto_scan_multiclass_nms.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from multiprocessing import Process -from multiprocessing import Queue +from multiprocessing import Process, Queue + import numpy as np from detection_ops.nms import multiclass_nms @@ -51,7 +51,7 @@ def __init__( return_index, return_rois_num, ): - super(Model, self).__init__() + super().__init__() self.score_threshold = score_threshold self.nms_top_k = nms_top_k self.keep_top_k = keep_top_k @@ -135,7 +135,7 @@ def gen_onnx_export(q): for opset in range(10, 16): import paddle2onnx - onnx_file_path = "nms/nms_{}.onnx".format(opset) + onnx_file_path = f"nms/nms_{opset}.onnx" paddle2onnx.export( "nms/model.pdmodel", "", @@ -150,39 +150,39 @@ def gen_onnx_export(q): sess = ort.InferenceSession(onnx_file_path) result1 = sess.run(None, {"x0": data[0], "x1": data[1]}) - assert len(result0) == len( - result1 - ), "multiclass_nms3: Length of result is not same" + assert len(result0) == len(result1), ( + "multiclass_nms3: Length of result is not same" + ) diff = np.fabs(all_sort(result0[0]) - all_sort(result1[0])) print("Max diff of BBoxes:", result0[0].shape, result1[0].shape, diff.max()) - assert diff.max() < 1e-05, "Difference={} of bbox is exceed 1e-05".format( - diff.max() - ) + assert diff.max() < 1e-05, f"Difference={diff.max()} of bbox is exceed 1e-05" for i in range(1, len(result0)): diff = np.fabs(result0[i] - result1[i]) print(result0[i], result1[i]) - assert ( - diff.max() < 1e-05 - ), "Difference={} of output {}(shape is {}) is exceed 1e-05".format( - diff.max(), i, result0[i].shape + assert diff.max() < 1e-05, ( + f"Difference={diff.max()} of output {i}(shape is {result0[i].shape}) is exceed 1e-05" ) q.put(True) def test_nms(): - for i in range(100): + for _i in range(100): q0 = Queue() p0 = Process(target=gen_paddle_nms, args=(q0,)) p0.start() p0.join() if not q0.get(timeout=1): - assert False, "Test failed for multiclass_nms as gen paddle model step." + raise AssertionError( + "Test failed for multiclass_nms as gen paddle model step." + ) q1 = Queue() p1 = Process(target=gen_onnx_export, args=(q1,)) p1.start() p1.join() if not q1.get(timeout=1): - assert False, "Test failed for multiclass_nms at gen_onnx_export step." + raise AssertionError( + "Test failed for multiclass_nms at gen_onnx_export step." + ) if __name__ == "__main__": diff --git a/tests/test_auto_scan_mv.py b/tests/test_auto_scan_mv.py index 73f537b7c..51d6baf1d 100644 --- a/tests/test_auto_scan_mv.py +++ b/tests/test_auto_scan_mv.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,8 +29,7 @@ def forward(self, x, y): """ forward """ - x = paddle.mv(x, y) - return x + return paddle.mv(x, y) class TestMvConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_norm.py b/tests/test_auto_scan_norm.py index be00faf78..bb1454a11 100755 --- a/tests/test_auto_scan_norm.py +++ b/tests/test_auto_scan_norm.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,10 +29,9 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.normalize( + return paddle.nn.functional.normalize( inputs, axis=self.config["axis"], epsilon=self.config["epsilon"] ) - return x class TestNormConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_one_hot_v2.py b/tests/test_auto_scan_one_hot_v2.py index 4079f5509..cb8c26825 100755 --- a/tests/test_auto_scan_one_hot_v2.py +++ b/tests/test_auto_scan_one_hot_v2.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -from onnxbase import randtool + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_only_pir, randtool + import paddle -from onnxbase import _test_only_pir class Net(BaseNet): @@ -32,8 +33,7 @@ def forward(self, inputs): num_classes = self.config["num_classes"] if self.config["is_tensor"]: num_classes = paddle.to_tensor([num_classes]) - x = paddle.nn.functional.one_hot(inputs, num_classes) - return x + return paddle.nn.functional.one_hot(inputs, num_classes) class TestOneHotV2Convert(OPConvertAutoScanTest): @@ -50,8 +50,7 @@ def sample_convert_config(self, draw): num_classes = draw(st.integers(min_value=10, max_value=20)) def generator_data(): - input_data = randtool("int", 0, num_classes - 1, input_shape) - return input_data + return randtool("int", 0, num_classes - 1, input_shape) dtype = draw(st.sampled_from(["int32", "int64"])) diff --git a/tests/test_auto_scan_p_norm.py b/tests/test_auto_scan_p_norm.py index 1863b1ac4..92b1bea23 100755 --- a/tests/test_auto_scan_p_norm.py +++ b/tests/test_auto_scan_p_norm.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,13 +30,12 @@ def forward(self, inputs): """ forward """ - x = paddle.norm( + return paddle.norm( inputs, p=self.config["p"], axis=self.config["axis"], keepdim=self.config["keepdim"], ) - return x class TestPnormConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_pad.py b/tests/test_auto_scan_pad.py index c15089227..6ae06c6c9 100755 --- a/tests/test_auto_scan_pad.py +++ b/tests/test_auto_scan_pad.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -24,10 +26,9 @@ def forward(self, inputs): mode = self.config["mode"] value = self.config["value"] data_format = self.config["data_format"] - x = paddle.nn.functional.pad( + return paddle.nn.functional.pad( inputs, pad=pad, mode=mode, value=value, data_format=data_format ) - return x class TestPadopsConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_pad2d.py b/tests/test_auto_scan_pad2d.py index ce7f567bd..8416604b2 100755 --- a/tests/test_auto_scan_pad2d.py +++ b/tests/test_auto_scan_pad2d.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st -from onnxbase import randtool, _test_with_pir import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir, randtool + import paddle @@ -26,10 +28,9 @@ def forward(self, inputs): mode = self.config["mode"] pad_value = self.config["pad_value"] data_format = self.config["data_format"] - x = paddle.nn.functional.pad( + return paddle.nn.functional.pad( inputs, pad=paddings, mode=mode, value=pad_value, data_format=data_format ) - return x class TestPadopsConvert(OPConvertAutoScanTest): @@ -83,10 +84,9 @@ def forward(self, inputs, padding): mode = self.config["mode"] pad_value = self.config["pad_value"] data_format = self.config["data_format"] - x = paddle.nn.functional.pad( + return paddle.nn.functional.pad( inputs, pad=padding, mode=mode, value=pad_value, data_format=data_format ) - return x class TestPadopsConvert_Paddingtensor(OPConvertAutoScanTest): @@ -111,8 +111,7 @@ def sample_convert_config(self, draw): data_format = draw(st.sampled_from(["NCHW", "NHWC"])) def generator_data(): - input_data = randtool("int", 1, 10, paddings) - return input_data + return randtool("int", 1, 10, paddings) config = { "op_names": ["pad2d"], @@ -144,10 +143,9 @@ def forward(self, inputs): mode = self.config["mode"] pad_value = self.config["pad_value"] data_format = self.config["data_format"] - x = paddle.nn.functional.pad( + return paddle.nn.functional.pad( inputs, pad=padding, mode=mode, value=pad_value, data_format=data_format ) - return x class TestPadopsConvert_Constanttensor(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_pad3d.py b/tests/test_auto_scan_pad3d.py index c19fff784..3756ae420 100755 --- a/tests/test_auto_scan_pad3d.py +++ b/tests/test_auto_scan_pad3d.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st -from onnxbase import _test_with_pir import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import _test_with_pir + import paddle @@ -30,9 +32,7 @@ def forward(self, inputs): inputs, pad=pad, mode=mode, value=value, data_format=data_format ) shape = paddle.shape(x) - x = paddle.reshape(x, shape) - - return x + return paddle.reshape(x, shape) class TestPadopsConvert(OPConvertAutoScanTest): @@ -108,9 +108,7 @@ def forward(self, inputs): inputs, pad, mode=mode, value=value, data_format=data_format ) shape = paddle.shape(x) - x = paddle.reshape(x, shape) - - return x + return paddle.reshape(x, shape) class TestPadopsConvert_Constanttensor(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_partial_ops.py b/tests/test_auto_scan_partial_ops.py index 0c1483fb2..998d47a25 100755 --- a/tests/test_auto_scan_partial_ops.py +++ b/tests/test_auto_scan_partial_ops.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.incubate.layers import partial_sum, partial_concat -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from paddle.incubate.layers import partial_concat, partial_sum + name2fun_dict = {} name2fun_dict["partial_sum"] = partial_sum name2fun_dict["partial_concat"] = partial_concat @@ -32,14 +33,13 @@ def forward(self, inputs1, inputs2): forward """ inputs_list = [inputs1] - for i in range(self.config["repeat_times"]): + for _i in range(self.config["repeat_times"]): inputs_list.append(inputs2) - x = name2fun_dict[self.config["op_names"][0]]( + return name2fun_dict[self.config["op_names"][0]]( inputs_list, start_index=self.config["start_index"], length=self.config["length"], ) - return x class TestConcatConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_pixel_shuffle.py b/tests/test_auto_scan_pixel_shuffle.py index 423660c6f..683b8c0fe 100644 --- a/tests/test_auto_scan_pixel_shuffle.py +++ b/tests/test_auto_scan_pixel_shuffle.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,12 +29,10 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.pixel_shuffle( + return paddle.nn.functional.pixel_shuffle( inputs, upscale_factor=self.config["upscale_factor"] ) - return x - class TestPixelshuffleConvert(OPConvertAutoScanTest): """ diff --git a/tests/test_auto_scan_pool_adaptive_avg_ops.py b/tests/test_auto_scan_pool_adaptive_avg_ops.py index ddc91c7c2..68d3d4e76 100755 --- a/tests/test_auto_scan_pool_adaptive_avg_ops.py +++ b/tests/test_auto_scan_pool_adaptive_avg_ops.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -28,8 +30,7 @@ def forward(self, inputs): forward """ output_size = self.config["output_size"] - x = paddle.nn.functional.adaptive_avg_pool1d(inputs, output_size=output_size) - return x + return paddle.nn.functional.adaptive_avg_pool1d(inputs, output_size=output_size) class TestAdaptiveAvgPool1dConvert(OPConvertAutoScanTest): @@ -81,10 +82,9 @@ def forward(self, inputs): else: output_size = self.config["output_size"] data_format = self.config["data_format"] - x = paddle.nn.functional.adaptive_avg_pool2d( + return paddle.nn.functional.adaptive_avg_pool2d( inputs, output_size=output_size, data_format=data_format ) - return x class TestAdaptiveAvgPool2dConvert(OPConvertAutoScanTest): @@ -145,10 +145,9 @@ def forward(self, inputs): """ output_size = self.config["output_size"] data_format = self.config["data_format"] - x = paddle.nn.functional.adaptive_avg_pool3d( + return paddle.nn.functional.adaptive_avg_pool3d( inputs, output_size=output_size, data_format=data_format ) - return x class TestAdaptiveAvgPool3dConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_pool_adaptive_max_ops.py b/tests/test_auto_scan_pool_adaptive_max_ops.py index f00d721d7..489c3a715 100644 --- a/tests/test_auto_scan_pool_adaptive_max_ops.py +++ b/tests/test_auto_scan_pool_adaptive_max_ops.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class NetAvgPool1d(BaseNet): """ @@ -29,10 +31,9 @@ def forward(self, inputs): forward """ output_size = self.config["output_size"] - x = paddle.nn.functional.adaptive_max_pool1d( + return paddle.nn.functional.adaptive_max_pool1d( inputs, output_size=output_size, return_mask=False ) - return x class TestAdaptiveAvgPool1dConvert(OPConvertAutoScanTest): @@ -82,10 +83,9 @@ def forward(self, inputs): forward """ output_size = self.config["output_size"] - x = paddle.nn.functional.adaptive_max_pool2d( + return paddle.nn.functional.adaptive_max_pool2d( inputs, output_size, return_mask=False ) - return x class TestAdaptiveAvgPool2dConvert(OPConvertAutoScanTest): @@ -144,10 +144,9 @@ def forward(self, inputs): forward """ output_size = self.config["output_size"] - x = paddle.nn.functional.adaptive_max_pool3d( + return paddle.nn.functional.adaptive_max_pool3d( inputs, output_size=output_size, return_mask=False ) - return x class TestAdaptiveAvgPool3dConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_pool_avg_ops.py b/tests/test_auto_scan_pool_avg_ops.py index b84c20e0e..d74794498 100755 --- a/tests/test_auto_scan_pool_avg_ops.py +++ b/tests/test_auto_scan_pool_avg_ops.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -32,7 +34,7 @@ def forward(self, inputs): stride = self.config["stride"] padding = self.config["padding"] ceil_mode = self.config["ceil_mode"] - x = paddle.nn.functional.avg_pool1d( + return paddle.nn.functional.avg_pool1d( inputs, kernel_size, stride=stride, @@ -40,7 +42,6 @@ def forward(self, inputs): exclusive=True, ceil_mode=ceil_mode, ) - return x class TestAvgpool1dConvert(OPConvertAutoScanTest): @@ -122,7 +123,7 @@ def forward(self, inputs): padding = self.config["padding"] ceil_mode = self.config["ceil_mode"] data_format = self.config["data_format"] - x = paddle.nn.functional.avg_pool2d( + return paddle.nn.functional.avg_pool2d( inputs, kernel_size, stride=stride, @@ -133,8 +134,6 @@ def forward(self, inputs): data_format=data_format, ) - return x - class TestAvgpool2dConvert(OPConvertAutoScanTest): """ @@ -211,9 +210,9 @@ def sample_convert_config(self, draw): axis=0, ).tolist() if data_format == "NCHW": - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding = [[0, 0], [0, 0], *padding1, *padding2] else: - padding = [[0, 0]] + padding1 + padding2 + [[0, 0]] + padding = [[0, 0], *padding1, *padding2, [0, 0]] else: padding = 0 @@ -260,7 +259,7 @@ def forward(self, inputs): padding = self.config["padding"] ceil_mode = self.config["ceil_mode"] data_format = self.config["data_format"] - x = paddle.nn.functional.avg_pool3d( + return paddle.nn.functional.avg_pool3d( inputs, kernel_size, stride=stride, @@ -270,7 +269,6 @@ def forward(self, inputs): divisor_override=None, data_format=data_format, ) - return x class TestAvgpool3dConvert(OPConvertAutoScanTest): @@ -360,9 +358,9 @@ def sample_convert_config(self, draw): axis=0, ).tolist() if data_format == "NCDHW": - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding3 + padding = [[0, 0], [0, 0], *padding1, *padding2, *padding3] else: - padding = [[0, 0]] + padding1 + padding2 + padding3 + [[0, 0]] + padding = [[0, 0], *padding1, *padding2, *padding3, [0, 0]] else: padding = 0 diff --git a/tests/test_auto_scan_pool_max_ops.py b/tests/test_auto_scan_pool_max_ops.py index 2472be174..87767c90a 100755 --- a/tests/test_auto_scan_pool_max_ops.py +++ b/tests/test_auto_scan_pool_max_ops.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class NetMaxpool1d(BaseNet): """ @@ -85,19 +87,13 @@ def sample_convert_config(self, draw): padding = 0 - if return_mask: - opset_version = [[9, 15]] - else: - opset_version = [[7, 9, 15]] + opset_version = [[9, 15]] if return_mask else [[7, 9, 15]] if ceil_mode: opset_version = [10, 15] if padding == "VALID": ceil_mode = False - if return_mask: - op_names = "max_pool2d_with_index" - else: - op_names = "pool2d" + op_names = "max_pool2d_with_index" if return_mask else "pool2d" config = { "op_names": [op_names], @@ -228,28 +224,22 @@ def sample_convert_config(self, draw): axis=0, ).tolist() if data_format == "NCHW": - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding = [[0, 0], [0, 0], *padding1, *padding2] else: - padding = [[0, 0]] + padding1 + padding2 + [[0, 0]] + padding = [[0, 0], *padding1, *padding2, [0, 0]] else: padding = 0 if return_mask and padding_type in ["list2", "list4", "list8"]: padding = draw(st.integers(min_value=1, max_value=5)) - if return_mask: - opset_version = [[9, 15]] - else: - opset_version = [[7, 9, 15]] + opset_version = [[9, 15]] if return_mask else [[7, 9, 15]] if ceil_mode: opset_version = [10, 15] if padding == "VALID": ceil_mode = False - if return_mask: - op_names = "max_pool2d_with_index" - else: - op_names = "pool2d" + op_names = "max_pool2d_with_index" if return_mask else "pool2d" config = { "op_names": [op_names], "test_data_shapes": [input_shape], @@ -392,28 +382,22 @@ def sample_convert_config(self, draw): axis=0, ).tolist() if data_format == "NCDHW": - padding = [[0, 0]] + [[0, 0]] + padding1 + padding2 + padding3 + padding = [[0, 0], [0, 0], *padding1, *padding2, *padding3] else: - padding = [[0, 0]] + padding1 + padding2 + padding3 + [[0, 0]] + padding = [[0, 0], *padding1, *padding2, *padding3, [0, 0]] else: padding = 0 if return_mask and padding_type in ["list3", "list6", "list10"]: padding = draw(st.integers(min_value=1, max_value=5)) - if return_mask: - opset_version = [[9, 15]] - else: - opset_version = [[7, 9, 15]] + opset_version = [[9, 15]] if return_mask else [[7, 9, 15]] if ceil_mode: opset_version = [10, 15] if padding == "VALID": ceil_mode = False - if return_mask: - op_names = "max_pool3d_with_index" - else: - op_names = "pool3d" + op_names = "max_pool3d_with_index" if return_mask else "pool3d" config = { "op_names": [op_names], diff --git a/tests/test_auto_scan_pow.py b/tests/test_auto_scan_pow.py index db774464a..ac24fa206 100644 --- a/tests/test_auto_scan_pow.py +++ b/tests/test_auto_scan_pow.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, x): """ forward """ - x = paddle.pow(x, y=self.config["y"]) - return x + return paddle.pow(x, y=self.config["y"]) class TestPowConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_prelu.py b/tests/test_auto_scan_prelu.py index c4b3d6f8f..a300349d2 100755 --- a/tests/test_auto_scan_prelu.py +++ b/tests/test_auto_scan_prelu.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs, weights): """ forward """ - x = paddle.nn.functional.prelu(inputs, weight=weights) - return x + return paddle.nn.functional.prelu(inputs, weight=weights) class TestPreluConvert(OPConvertAutoScanTest): @@ -42,10 +43,7 @@ def sample_convert_config(self, draw): input_shape = draw( st.lists(st.integers(min_value=5, max_value=20), min_size=0, max_size=4) ) - if len(input_shape) == 0: - weight_shape = [] - else: - weight_shape = [1] + weight_shape = [] if len(input_shape) == 0 else [1] dtype = draw(st.sampled_from(["float32", "float64"])) diff --git a/tests/test_auto_scan_put_along_axis.py b/tests/test_auto_scan_put_along_axis.py index e70d2b23f..60b20bd71 100644 --- a/tests/test_auto_scan_put_along_axis.py +++ b/tests/test_auto_scan_put_along_axis.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir, randtool +import paddle + class Net(BaseNet): """ @@ -28,10 +30,9 @@ def forward(self, arr, indices, values): """ forward """ - x = paddle.put_along_axis( + return paddle.put_along_axis( arr, indices, values, axis=self.config["axis"], reduce=self.config["reduce"] ) - return x class TestPutAlongAxisConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_range.py b/tests/test_auto_scan_range.py index b3a0c9de7..b8ed3ed2d 100755 --- a/tests/test_auto_scan_range.py +++ b/tests/test_auto_scan_range.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): def forward(self): @@ -35,8 +37,7 @@ def forward(self): step = paddle.to_tensor(np.array(step)).astype(self.config["index_dtype"]) dtype = self.config["dtype"] - x = paddle.arange(start=start, end=end, step=step, dtype=dtype) - return x + return paddle.arange(start=start, end=end, step=step, dtype=dtype) class TestArangeConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_reduce_all_or_any.py b/tests/test_auto_scan_reduce_all_or_any.py index cb07fe3b1..5669cdff5 100755 --- a/tests/test_auto_scan_reduce_all_or_any.py +++ b/tests/test_auto_scan_reduce_all_or_any.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st +import random import unittest + +import hypothesis.strategies as st import paddle -import random +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir op_api_map = { @@ -43,8 +44,7 @@ def forward(self, inputs): inputs, axis=self.config["dim"], keepdim=self.config["keep_dim"] ) x = paddle.unsqueeze(x, axis=0) - x = x.astype("int32") - return x + return x.astype("int32") class TestReduceAllConvert(OPConvertAutoScanTest): @@ -88,10 +88,10 @@ def sample_convert_config(self, draw): "input_spec_shape": [], } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) diff --git a/tests/test_auto_scan_reduce_ops.py b/tests/test_auto_scan_reduce_ops.py index a283b1bbb..46f40b051 100755 --- a/tests/test_auto_scan_reduce_ops.py +++ b/tests/test_auto_scan_reduce_ops.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st +import random import unittest + +import hypothesis.strategies as st import paddle -import random +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir op_api_map = { @@ -50,8 +51,7 @@ def forward(self, inputs): x = op_api_map[self.config["op_names"]]( inputs, axis=axis, keepdim=self.config["keep_dim"] ) - x = paddle.unsqueeze(x, axis=[0]) - return x + return paddle.unsqueeze(x, axis=[0]) class TestReduceAllConvert(OPConvertAutoScanTest): @@ -104,10 +104,10 @@ def sample_convert_config(self, draw): "axis_dtype": axis_dtype, } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name if op_name == "reduce_mean": dtype_mean = draw(st.sampled_from(["float32", "float64"])) diff --git a/tests/test_auto_scan_reshape.py b/tests/test_auto_scan_reshape.py index 3cca00f97..1d7c2e150 100755 --- a/tests/test_auto_scan_reshape.py +++ b/tests/test_auto_scan_reshape.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -28,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.reshape(inputs, self.config["shape"]) - return x + return paddle.reshape(inputs, self.config["shape"]) class Net1(BaseNet): @@ -41,8 +42,7 @@ def forward(self, inputs, shape): """ forward """ - x = paddle.reshape(inputs, shape) - return x + return paddle.reshape(inputs, shape) class TestReshapeConvert0(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_rnn.py b/tests/test_auto_scan_rnn.py index c7ef8ddb2..91f829f38 100644 --- a/tests/test_auto_scan_rnn.py +++ b/tests/test_auto_scan_rnn.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -24,7 +26,7 @@ class Net0(BaseNet): """ def __init__(self, config=None): - super(Net0, self).__init__(config) + super().__init__(config) self.lstm = paddle.nn.LSTM( input_size=self.config["input_size"], hidden_size=self.config["hidden_size"], @@ -37,7 +39,7 @@ def forward(self, inputs, prev_h, prev_c): """ forward """ - y, (h, c) = self.lstm(inputs, (prev_h, prev_c)) + y, (_h, _c) = self.lstm(inputs, (prev_h, prev_c)) return y @@ -47,7 +49,7 @@ class Net1(BaseNet): """ def __init__(self, config=None): - super(Net1, self).__init__(config) + super().__init__(config) self.gru = paddle.nn.GRU( input_size=self.config["input_size"], hidden_size=self.config["hidden_size"], @@ -60,7 +62,7 @@ def forward(self, inputs, prev_h): """ forward """ - y, h = self.gru(inputs, prev_h) + y, _h = self.gru(inputs, prev_h) return y @@ -80,14 +82,11 @@ def sample_convert_config(self, draw): num_layers = 2 time_major = draw(st.booleans()) if time_major: - t, b, input_size = input_shape + _t, b, input_size = input_shape else: - b, t, input_size = input_shape + b, _t, input_size = input_shape direction = draw(st.sampled_from(["forward", "bidirect"])) - if direction == "forward": - num_directions = 1 - else: - num_directions = 2 + num_directions = 1 if direction == "forward" else 2 prev_h_shape = [num_layers * num_directions, b, hidden_size] @@ -130,14 +129,11 @@ def sample_convert_config(self, draw): num_layers = 2 time_major = draw(st.booleans()) if time_major: - t, b, input_size = input_shape + _t, b, input_size = input_shape else: - b, t, input_size = input_shape + b, _t, input_size = input_shape direction = draw(st.sampled_from(["forward", "bidirect"])) - if direction == "forward": - num_directions = 1 - else: - num_directions = 2 + num_directions = 1 if direction == "forward" else 2 prev_h_shape = [num_layers * num_directions, b, hidden_size] diff --git a/tests/test_auto_scan_roll.py b/tests/test_auto_scan_roll.py index 785b3fe2c..6cc896534 100644 --- a/tests/test_auto_scan_roll.py +++ b/tests/test_auto_scan_roll.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -35,8 +37,7 @@ def forward(self, inputs): # shifts = [paddle.to_tensor(-2), -2] if self.config["is_shifts_tensor"]: shifts = paddle.to_tensor(shifts).astype(self.config["shift_dtype"]) - x = paddle.roll(inputs, shifts=shifts, axis=axis) - return x + return paddle.roll(inputs, shifts=shifts, axis=axis) class TestRollConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_scale.py b/tests/test_auto_scan_scale.py index 276db52cf..da5cd512b 100755 --- a/tests/test_auto_scan_scale.py +++ b/tests/test_auto_scan_scale.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -32,13 +34,12 @@ def forward(self, x): scale = self.config["scale"] if self.config["isTensor"]: scale = paddle.to_tensor(np.array(scale).astype("float32")) - x = paddle.scale( + return paddle.scale( x, scale=scale, bias=self.config["bias"], bias_after_scale=self.config["bias_after_scale"], ) - return x class TestScaleConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_scatter.py b/tests/test_auto_scan_scatter.py index 14d5414bf..430f81c5f 100755 --- a/tests/test_auto_scan_scatter.py +++ b/tests/test_auto_scan_scatter.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import randtool +import paddle + class Net(BaseNet): """ @@ -28,8 +30,9 @@ def forward(self, inputs, index, updates): """ forward """ - x = paddle.scatter(inputs, index, updates, overwrite=self.config["overwrite"]) - return x + return paddle.scatter( + inputs, index, updates, overwrite=self.config["overwrite"] + ) class TestScatterConvert(OPConvertAutoScanTest): @@ -61,8 +64,7 @@ def sample_convert_config(self, draw): opset_version = [11, 15] def generator_index(): - index_list = randtool("int", 0, input_shape[0], index_shape) - return index_list + return randtool("int", 0, input_shape[0], index_shape) config = { "op_names": ["scatter"], diff --git a/tests/test_auto_scan_scatter_nd_add.py b/tests/test_auto_scan_scatter_nd_add.py index bf2d10347..25f88f555 100755 --- a/tests/test_auto_scan_scatter_nd_add.py +++ b/tests/test_auto_scan_scatter_nd_add.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import randtool +import paddle + class Net(BaseNet): """ @@ -29,8 +31,7 @@ def forward(self, inputs, index, updates): """ forward """ - x = paddle.scatter_nd_add(inputs, index, updates) - return x + return paddle.scatter_nd_add(inputs, index, updates) class TestScatterNdAddConvert(OPConvertAutoScanTest): @@ -60,8 +61,7 @@ def sample_convert_config(self, draw): def generator_index(): min_val = np.min(input_shape) - index_list = randtool("int", 0, min_val, index_shape) - return index_list + return randtool("int", 0, min_val, index_shape) config = { "op_names": ["scatter_nd_add"], diff --git a/tests/test_auto_scan_set_value.py b/tests/test_auto_scan_set_value.py index 4e78a101e..a3d28b33e 100755 --- a/tests/test_auto_scan_set_value.py +++ b/tests/test_auto_scan_set_value.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -import unittest import copy +import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir diff --git a/tests/test_auto_scan_shape.py b/tests/test_auto_scan_shape.py index c364ae80c..9c900870c 100755 --- a/tests/test_auto_scan_shape.py +++ b/tests/test_auto_scan_shape.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.shape(inputs).astype("int32") - return x + return paddle.shape(inputs).astype("int32") class TestShapeConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_shrink_ops.py b/tests/test_auto_scan_shrink_ops.py index 58133c2fb..357386a7b 100755 --- a/tests/test_auto_scan_shrink_ops.py +++ b/tests/test_auto_scan_shrink_ops.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle op_api_map = { @@ -32,7 +34,7 @@ class Net(BaseNet): def forward(self, inputs): - if self.config["op_names"] in ["tanh_shrink"]: + if self.config["op_names"] == "tanh_shrink": x = op_api_map[self.config["op_names"]](inputs) else: x = op_api_map[self.config["op_names"]]( @@ -65,10 +67,10 @@ def sample_convert_config(self, draw): "threshold": threshold, } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = [] + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) diff --git a/tests/test_auto_scan_size.py b/tests/test_auto_scan_size.py index 90ed6a3c0..67a08a4d3 100644 --- a/tests/test_auto_scan_size.py +++ b/tests/test_auto_scan_size.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs): """ forward """ - x = paddle.numel(inputs) - return x + return paddle.numel(inputs) class TestNumelConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_slice.py b/tests/test_auto_scan_slice.py index 4b373a0a3..96a54e6e2 100644 --- a/tests/test_auto_scan_slice.py +++ b/tests/test_auto_scan_slice.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -36,8 +38,7 @@ def forward(self, inputs): starts = paddle.to_tensor(starts) if self.config["isEndsTensor"]: ends = paddle.to_tensor(ends) - x = paddle.slice(inputs, axes=axes, starts=starts, ends=ends) - return x + return paddle.slice(inputs, axes=axes, starts=starts, ends=ends) class TestSliceConvert(OPConvertAutoScanTest): @@ -99,8 +100,7 @@ def forward(self, inputs): starts = paddle.to_tensor(starts) if self.config["isEndsTensor"]: ends = paddle.to_tensor(ends) - x = paddle.slice(inputs, axes=axes, starts=starts, ends=ends) - return x + return paddle.slice(inputs, axes=axes, starts=starts, ends=ends) class TestSliceConvert1(OPConvertAutoScanTest): @@ -169,8 +169,7 @@ def forward(self, inputs): starts = [1, 0, paddle.to_tensor(0), 0] ends = self.config["ends"] ends = [10, 10, paddle.to_tensor(10), 10] - x = paddle.slice(inputs, axes=axes, starts=starts, ends=ends) - return x + return paddle.slice(inputs, axes=axes, starts=starts, ends=ends) class TestSliceConvert2(OPConvertAutoScanTest): @@ -238,8 +237,7 @@ def forward(self, inputs): paddle.to_tensor(np.array(10).astype("int64")), paddle.to_tensor(np.array(10).astype("int64")), ] - x = paddle.slice(inputs, axes=axes, starts=starts, ends=ends) - return x + return paddle.slice(inputs, axes=axes, starts=starts, ends=ends) class TestSliceConvert3(OPConvertAutoScanTest): @@ -291,8 +289,7 @@ def forward(self, inputs): """ forward """ - x = inputs[1:2, 2, :] - return x + return inputs[1:2, 2, :] class TestSliceConvert4(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_softmax.py b/tests/test_auto_scan_softmax.py index fbbe40d99..2bfe19c55 100755 --- a/tests/test_auto_scan_softmax.py +++ b/tests/test_auto_scan_softmax.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -28,9 +30,7 @@ def forward(self, inputs): forward """ - x = paddle.nn.functional.softmax(inputs, axis=self.config["axis"]) - - return x + return paddle.nn.functional.softmax(inputs, axis=self.config["axis"]) class TestSoftmaxConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_softmax_with_cross_entropy.py b/tests/test_auto_scan_softmax_with_cross_entropy.py index 4ff423db7..b0c3db6b0 100644 --- a/tests/test_auto_scan_softmax_with_cross_entropy.py +++ b/tests/test_auto_scan_softmax_with_cross_entropy.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -28,14 +30,13 @@ def forward(self, logits, label): """ forward """ - x = paddle.nn.functional.softmax_with_cross_entropy( + return paddle.nn.functional.softmax_with_cross_entropy( logits=logits, label=label, soft_label=self.config["soft_label"], return_softmax=self.config["return_softmax"], axis=self.config["axis"], ) - return x class TestSoftmaxWithCrossEntropyConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_split.py b/tests/test_auto_scan_split.py index baadab947..72a3e1040 100644 --- a/tests/test_auto_scan_split.py +++ b/tests/test_auto_scan_split.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -import unittest -import paddle import random +import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -32,10 +34,9 @@ def forward(self, inputs): axis = self.config["axis"] if self.config["isAxisTensor"]: axis = paddle.to_tensor(axis, dtype=self.config["axis_dtype"]) - x = paddle.split( + return paddle.split( inputs, num_or_sections=self.config["num_or_sections"], axis=axis ) - return x class TestSplitConvert(OPConvertAutoScanTest): @@ -50,7 +51,7 @@ def sample_convert_config(self, draw): ) # float64 not supported dtype = draw(st.sampled_from(["float32", "int32", "int64"])) - axis_dtype = "int64" # 只能设置为INT64,设置为INT32时会在axis_tensor后增加cast导致取不到constant数值 + axis_dtype = "int64" # 只能设置为INT64,设置为INT32时会在axis_tensor后增加cast导致取不到constant数值 isAxisTensor = draw(st.booleans()) # when axis is negtive, paddle has bug axis = draw(st.integers(min_value=0, max_value=len(input_shape) - 1)) diff --git a/tests/test_auto_scan_squeeze2.py b/tests/test_auto_scan_squeeze2.py index c8027bdfb..8df815794 100755 --- a/tests/test_auto_scan_squeeze2.py +++ b/tests/test_auto_scan_squeeze2.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -35,8 +37,7 @@ def forward(self, inputs): axis = paddle.to_tensor(self.config["axis"]) else: axis = self.config["axis"] - x = paddle.squeeze(inputs, axis=axis) - return x + return paddle.squeeze(inputs, axis=axis) class TestSqueezeConvert(OPConvertAutoScanTest): @@ -55,19 +56,13 @@ def sample_convert_config(self, draw): axis = draw( st.integers(min_value=-len(input_shape), max_value=len(input_shape) - 1) ) - if axis == 0: - axis = [0, -1] - else: - axis = [0, axis] + axis = [0, -1] if axis == 0 else [0, axis] input_shape[axis[0]] = 1 input_shape[axis[1]] = 1 tensor_attr = draw(st.booleans()) - if draw(st.booleans()): - input_spec_shape = [] - else: - input_spec_shape = [len(input_shape) * [-1]] + input_spec_shape = [] if draw(st.booleans()) else [len(input_shape) * [-1]] config = { "op_names": ["squeeze2"], diff --git a/tests/test_auto_scan_stack.py b/tests/test_auto_scan_stack.py index 359b9e9bb..06a483a42 100755 --- a/tests/test_auto_scan_stack.py +++ b/tests/test_auto_scan_stack.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs1, inputs2): """ forward """ - x = paddle.stack([inputs1, inputs2], axis=self.config["axis"]) - return x + return paddle.stack([inputs1, inputs2], axis=self.config["axis"]) class TestStackConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_strided_slice.py b/tests/test_auto_scan_strided_slice.py index ef35dffea..2e631cf6b 100644 --- a/tests/test_auto_scan_strided_slice.py +++ b/tests/test_auto_scan_strided_slice.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -39,10 +41,9 @@ def forward(self, inputs): ends = paddle.to_tensor(np.array(ends).astype("int32")) if self.config["isStridesTensor"]: strides = paddle.to_tensor(np.array(strides).astype("int32")) - x = paddle.strided_slice( + return paddle.strided_slice( inputs, axes=axes, starts=starts, ends=ends, strides=strides ) - return x class TestStridedsliceConvert(OPConvertAutoScanTest): @@ -67,15 +68,9 @@ def sample_convert_config(self, draw): else: starts = [-input_shape[axes[0]], 0, -input_shape[axes[2]] - 22] - if draw(st.booleans()): - ends = [3, 2, 40000] - else: - ends = [-1, 2, 4] + ends = [3, 2, 40000] if draw(st.booleans()) else [-1, 2, 4] - if draw(st.booleans()): - strides = [2, 1, 2] - else: - strides = [1, 1, 1] + strides = [2, 1, 2] if draw(st.booleans()) else [1, 1, 1] tmp = [i for i, val in enumerate(strides) if val == 1] if len(tmp) == len(strides) and isStridesTensor is False: @@ -123,10 +118,9 @@ def forward(self, inputs): # strides = [1, paddle.to_tensor(1).astype('int32'), 1] # strides = [1, paddle.to_tensor(1, dtype='int32'), 1] # strides = [1, paddle.to_tensor(np.array(1).astype("int32")), 1] - x = paddle.strided_slice( + return paddle.strided_slice( inputs, axes=axes, starts=starts, ends=ends, strides=strides ) - return x class TestStridedsliceConvert1(OPConvertAutoScanTest): @@ -151,10 +145,7 @@ def sample_convert_config(self, draw): else: starts = [-input_shape[axes[0]], 0, -input_shape[axes[2]] - 22] - if draw(st.booleans()): - ends = [3, 2, 40000] - else: - ends = [-1, 2, 4] + ends = [3, 2, 40000] if draw(st.booleans()) else [-1, 2, 4] # if draw(st.booleans()): # strides = [2, 1, 2] diff --git a/tests/test_auto_scan_sum.py b/tests/test_auto_scan_sum.py index 18471ff31..c599307f4 100755 --- a/tests/test_auto_scan_sum.py +++ b/tests/test_auto_scan_sum.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -29,8 +31,7 @@ def forward(self, inputs1, inputs2): forward """ inputs = [inputs1, inputs2] - x = paddle.add_n(inputs) - return x + return paddle.add_n(inputs) class TestSumConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_take_along_axis.py b/tests/test_auto_scan_take_along_axis.py index a5f8acf6d..4631c746b 100755 --- a/tests/test_auto_scan_take_along_axis.py +++ b/tests/test_auto_scan_take_along_axis.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -from onnxbase import randtool -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest +from onnxbase import randtool + import paddle @@ -28,8 +30,7 @@ def forward(self, input1, input2): """ forward """ - x = paddle.take_along_axis(input1, input2, axis=self.config["axis"]) - return x + return paddle.take_along_axis(input1, input2, axis=self.config["axis"]) class TestGatherNDConvert(OPConvertAutoScanTest): @@ -58,8 +59,7 @@ def sample_convert_config(self, draw): input2_shape[axis] = 1 def generator_data(): - input_data = randtool("int", 0, input_shape[axis], input2_shape) - return input_data + return randtool("int", 0, input_shape[axis], input2_shape) config = { "op_names": ["take_along_axis"], diff --git a/tests/test_auto_scan_temporal_shift.py b/tests/test_auto_scan_temporal_shift.py index 25b9b7836..83cfeb3d0 100755 --- a/tests/test_auto_scan_temporal_shift.py +++ b/tests/test_auto_scan_temporal_shift.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,12 +29,11 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.temporal_shift( + return paddle.nn.functional.temporal_shift( inputs, seg_num=self.config["seg_num"], shift_ratio=self.config["shift_ratio"], ) - return x class TestTemporal_shiftConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_thresholded_relu.py b/tests/test_auto_scan_thresholded_relu.py index 63836cbcd..fbf648126 100755 --- a/tests/test_auto_scan_thresholded_relu.py +++ b/tests/test_auto_scan_thresholded_relu.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,10 +29,9 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.thresholded_relu( + return paddle.nn.functional.thresholded_relu( inputs, threshold=self.config["threshold"] ) - return x class TestThresholdedReluConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_tile.py b/tests/test_auto_scan_tile.py index 9be580f54..8fec803fa 100755 --- a/tests/test_auto_scan_tile.py +++ b/tests/test_auto_scan_tile.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st import numpy as np -import unittest -import paddle +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -38,8 +40,7 @@ def forward(self, inputs): ) elif self.config["repeat_times_dtype"] == "int": repeat_times = [repeat_times[0]] - x = paddle.tile(inputs, repeat_times=repeat_times) - return x + return paddle.tile(inputs, repeat_times=repeat_times) class TestTileConvert(OPConvertAutoScanTest): @@ -58,10 +59,7 @@ def sample_convert_config(self, draw): repeat_times_dtype = draw(st.sampled_from(["list", "Tensor", "int"])) shape_dtype = draw(st.sampled_from(["int32", "int64"])) - if len(input_shape) == 0: - repeat_times = [10] - else: - repeat_times = input_shape + repeat_times = [10] if len(input_shape) == 0 else input_shape config = { "op_names": ["tile"], "test_data_shapes": [input_shape], @@ -97,8 +95,7 @@ def forward(self, inputs): # np.array([4, 3, 2, 1]).astype('int32')) # not work # repeat_times = [4, 3, paddle.to_tensor(np.array(2).astype("int64")), 1] - x = paddle.tile(inputs, repeat_times=repeat_times) - return x + return paddle.tile(inputs, repeat_times=repeat_times) class TestTileConvert1(OPConvertAutoScanTest): @@ -115,10 +112,7 @@ def sample_convert_config(self, draw): dtype = draw(st.sampled_from(["float32", "float64", "int32", "int64"])) shape_dtype = draw(st.sampled_from(["int32", "int64"])) - if len(input_shape) == 0: - repeat_times = [10] - else: - repeat_times = input_shape + repeat_times = [10] if len(input_shape) == 0 else input_shape # when repeat_times_dtype is tensor has a bug repeat_times_dtype = draw(st.sampled_from(["list", "Tensor"])) diff --git a/tests/test_auto_scan_top_k.py b/tests/test_auto_scan_top_k.py index 8280eaed1..2f7bcc9bb 100755 --- a/tests/test_auto_scan_top_k.py +++ b/tests/test_auto_scan_top_k.py @@ -12,15 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st -import numpy as np -import unittest -import paddle import random +import unittest from random import shuffle + +import hypothesis.strategies as st +import numpy as np +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -34,14 +36,13 @@ def forward(self, input): k = self.config["k"] if self.config["isTensor"]: k = paddle.to_tensor(k, dtype=self.config["k_dtype"]) - x = paddle.topk( + return paddle.topk( input, k=k, axis=self.config["axis"], largest=self.config["largest"], sorted=self.config["sorted"], ) - return x class TestTopkv2Convert(OPConvertAutoScanTest): @@ -70,10 +71,9 @@ def generator_data(): if len(input_shape) == 0: return np.array(10, dtype="float32") prod = np.prod(input_shape) - input_data = np.array(list(range(0, prod))) + input_data = np.array(list(range(prod))) shuffle(input_data) - input_data = input_data.reshape(input_shape) - return input_data + return input_data.reshape(input_shape) if len(input_shape) > 0: if axis is not None: diff --git a/tests/test_auto_scan_transpose.py b/tests/test_auto_scan_transpose.py index 18b8a0d06..0be1eaa1e 100755 --- a/tests/test_auto_scan_transpose.py +++ b/tests/test_auto_scan_transpose.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, x): """ forward """ - x = paddle.transpose(x, perm=self.config["perm"]) - return x + return paddle.transpose(x, perm=self.config["perm"]) class TestTransposeConvert(OPConvertAutoScanTest): @@ -46,7 +47,7 @@ def sample_convert_config(self, draw): dtype = draw(st.sampled_from(["int32", "int64", "float32", "float64"])) if len(input_shape) >= 2: - perm = [i for i in range(len(input_shape))] + perm = list(range(len(input_shape))) perm[0], perm[1] = perm[1], perm[0] elif len(input_shape) == 1: perm = [0] diff --git a/tests/test_auto_scan_unary_ops.py b/tests/test_auto_scan_unary_ops.py index 65d9bd240..ee285144f 100755 --- a/tests/test_auto_scan_unary_ops.py +++ b/tests/test_auto_scan_unary_ops.py @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle op_api_map = { @@ -112,15 +114,13 @@ def sample_convert_config(self, draw): "opset_version": [7], "input_spec_shape": [], } - models = list() - op_names = list() - opset_versions = list() - for op_name, i in op_api_map.items(): + models = [] + op_names = [] + opset_versions = list(opset_version_map.values()) + for op_name in op_api_map: config["op_names"] = op_name models.append(Net(config)) op_names.append(op_name) - for op_name, vs in opset_version_map.items(): - opset_versions.append(vs) config["op_names"] = op_names config["opset_version"] = opset_versions return (config, models) diff --git a/tests/test_auto_scan_unfold.py b/tests/test_auto_scan_unfold.py index caf405f5f..119ea730f 100755 --- a/tests/test_auto_scan_unfold.py +++ b/tests/test_auto_scan_unfold.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,14 +30,13 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.unfold( + return paddle.nn.functional.unfold( inputs, self.config["kernel_size"], strides=self.config["strides"], paddings=self.config["paddings"], dilations=self.config["dilations"], ) - return x class TestUnfoldConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_unique.py b/tests/test_auto_scan_unique.py index adce8efe4..5be6ed29f 100755 --- a/tests/test_auto_scan_unique.py +++ b/tests/test_auto_scan_unique.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,7 +29,7 @@ def forward(self, input): """ forward """ - x = paddle.unique( + return paddle.unique( input, return_index=self.config["return_index"], return_inverse=self.config["return_inverse"], @@ -36,8 +38,6 @@ def forward(self, input): dtype=self.config["dtype"], ) - return x - class TestUniqueConvert(OPConvertAutoScanTest): """ diff --git a/tests/test_auto_scan_unsqueeze2.py b/tests/test_auto_scan_unsqueeze2.py index 31b469275..de4c252f8 100755 --- a/tests/test_auto_scan_unsqueeze2.py +++ b/tests/test_auto_scan_unsqueeze2.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_only_pir +import paddle + class Net(BaseNet): """ @@ -33,8 +35,7 @@ def forward(self, inputs): axis = paddle.to_tensor(axis) if self.config["isTensor13"]: axis = axis * 1 - x = paddle.unsqueeze(inputs, axis=axis) - return x + return paddle.unsqueeze(inputs, axis=axis) class TestUnsqueezeConvert(OPConvertAutoScanTest): @@ -65,10 +66,7 @@ def sample_convert_config(self, draw): if len(input_shape) == 3: axis = [1, 2, 3] if len(input_shape) == 2: - if draw(st.booleans()): - axis = [0, 1, 2] - else: - axis = [1, 3] + axis = [0, 1, 2] if draw(st.booleans()) else [1, 3] isTensor13 = draw(st.booleans()) opset_version = [7, 9, 10, 11, 12, 13, 14, 15] if isTensor13 or isTensor: diff --git a/tests/test_auto_scan_unstack.py b/tests/test_auto_scan_unstack.py index 1f8ac273c..d3a150a89 100644 --- a/tests/test_auto_scan_unstack.py +++ b/tests/test_auto_scan_unstack.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -27,8 +29,7 @@ def forward(self, inputs): """ forward """ - x = paddle.unstack(inputs, axis=self.config["axis"], num=self.config["num"]) - return x + return paddle.unstack(inputs, axis=self.config["axis"], num=self.config["num"]) class TestUnstackConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_where.py b/tests/test_auto_scan_where.py index 4377f6b96..5a55fe82a 100755 --- a/tests/test_auto_scan_where.py +++ b/tests/test_auto_scan_where.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest -import paddle + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import _test_with_pir +import paddle + class Net(BaseNet): """ @@ -28,8 +30,7 @@ def forward(self, inputs1, inputs2): """ forward """ - x = paddle.where(inputs1 < inputs2, inputs1, inputs2) - return x + return paddle.where(inputs1 < inputs2, inputs1, inputs2) class TestWhereConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_where_index.py b/tests/test_auto_scan_where_index.py index 66194c71e..c17bbbc4e 100755 --- a/tests/test_auto_scan_where_index.py +++ b/tests/test_auto_scan_where_index.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet -import hypothesis.strategies as st import unittest + +import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest + import paddle @@ -29,8 +31,7 @@ def forward(self, inputs): """ condition = paddle.cast(inputs, "bool") - x = paddle.nonzero(condition) - return x + return paddle.nonzero(condition) class TestWhereIndexConvert(OPConvertAutoScanTest): diff --git a/tests/test_auto_scan_yolo_box.py b/tests/test_auto_scan_yolo_box.py index 14f4fb7fd..b3cfe0ed7 100755 --- a/tests/test_auto_scan_yolo_box.py +++ b/tests/test_auto_scan_yolo_box.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from auto_scan_test import OPConvertAutoScanTest, BaseNet +import unittest + import hypothesis.strategies as st +from auto_scan_test import BaseNet, OPConvertAutoScanTest from onnxbase import randtool -import unittest + import paddle @@ -34,7 +36,7 @@ def forward(self, inputs_1, inputs_2): downsample_ratio = self.config["downsample_ratio"] clip_bbox = self.config["clip_bbox"] scale_x_y = self.config["scale_x_y"] - x = paddle.vision.ops.yolo_box( + return paddle.vision.ops.yolo_box( inputs_1, inputs_2, anchors=anchors, @@ -44,7 +46,6 @@ def forward(self, inputs_1, inputs_2): clip_bbox=clip_bbox, scale_x_y=scale_x_y, ) - return x class TestYoloBoxConvert(OPConvertAutoScanTest): @@ -84,8 +85,7 @@ def sample_convert_config(self, draw): dtype = draw(st.sampled_from(["float32", "float64"])) def generator_data(): - input_data = randtool("int", 320, 640, img_size) - return input_data + return randtool("int", 320, 640, img_size) input_shape[1] = num * (5 + class_num) diff --git a/tests/test_avg_pool2d.py b/tests/test_avg_pool2d.py index 52c74da33..9c1c5a19e 100644 --- a/tests/test_avg_pool2d.py +++ b/tests/test_avg_pool2d.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.avg_pool2d(inputs, kernel_size=2) - return x + return paddle.nn.functional.avg_pool2d(inputs, kernel_size=2) @_test_with_pir diff --git a/tests/test_bitwise.py b/tests/test_bitwise.py index c93e0b84e..054065ed9 100644 --- a/tests/test_bitwise.py +++ b/tests/test_bitwise.py @@ -12,18 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir class BitwiseAndNet(paddle.nn.Layer): def __init__(self): - super(BitwiseAndNet, self).__init__() + super().__init__() def forward(self, x, y): - x = paddle.bitwise_and(x, y) - return x + return paddle.bitwise_and(x, y) @_test_with_pir @@ -80,11 +79,10 @@ def test_bitwise_and_bool_type_18(): class BitwiseNotNet(paddle.nn.Layer): def __init__(self): - super(BitwiseNotNet, self).__init__() + super().__init__() def forward(self, x): - x = paddle.bitwise_not(x) - return x + return paddle.bitwise_not(x) @_test_with_pir @@ -131,11 +129,10 @@ def test_bitwise_not_bool_type_18(): class BitwiseOrNet(paddle.nn.Layer): def __init__(self): - super(BitwiseOrNet, self).__init__() + super().__init__() def forward(self, x, y): - x = paddle.bitwise_or(x, y) - return x + return paddle.bitwise_or(x, y) @_test_with_pir @@ -192,11 +189,10 @@ def test_bitwise_or_bool_type_18(): class BitwiseXorNet(paddle.nn.Layer): def __init__(self): - super(BitwiseXorNet, self).__init__() + super().__init__() def forward(self, x, y): - x = paddle.bitwise_xor(x, y) - return x + return paddle.bitwise_xor(x, y) @_test_with_pir diff --git a/tests/test_bmm.py b/tests/test_bmm.py index 4020a02dc..7302e3bbf 100644 --- a/tests/test_bmm.py +++ b/tests/test_bmm.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ diff --git a/tests/test_broadcast_to.py b/tests/test_broadcast_to.py index 1aa4412ef..7660b0865 100644 --- a/tests/test_broadcast_to.py +++ b/tests/test_broadcast_to.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx +import paddle + class Net(paddle.nn.Layer): """ @@ -22,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.broadcast_to(inputs, shape=[2, 3]) - return x + return paddle.broadcast_to(inputs, shape=[2, 3]) def test_broadcast_to_base(): diff --git a/tests/test_cast.py b/tests/test_cast.py index 1e08486d0..8bbbce2ec 100755 --- a/tests/test_cast.py +++ b/tests/test_cast.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.cast(inputs, "float64") - return x + return paddle.cast(inputs, "float64") @_test_with_pir diff --git a/tests/test_ceil.py b/tests/test_ceil.py index 3ac5033f1..7ff0dc350 100644 --- a/tests/test_ceil.py +++ b/tests/test_ceil.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.ceil(inputs) - return x + return paddle.ceil(inputs) @_test_with_pir diff --git a/tests/test_clip.py b/tests/test_clip.py index d0ec2eab0..5a358f561 100644 --- a/tests/test_clip.py +++ b/tests/test_clip.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.clip(inputs) - return x + return paddle.clip(inputs) @_test_with_pir diff --git a/tests/test_concat.py b/tests/test_concat.py index 822c791a3..1bd0b8bbf 100644 --- a/tests/test_concat.py +++ b/tests/test_concat.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.concat([inputs, inputs_]) - return x + return paddle.concat([inputs, inputs_]) @_test_with_pir diff --git a/tests/test_cos.py b/tests/test_cos.py index aeb0bb607..ceaf0278e 100644 --- a/tests/test_cos.py +++ b/tests/test_cos.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.cos(inputs) - return x + return paddle.cos(inputs) @_test_with_pir diff --git a/tests/test_cosh.py b/tests/test_cosh.py index 2151cabde..4a40a7dd8 100644 --- a/tests/test_cosh.py +++ b/tests/test_cosh.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.cosh(inputs) - return x + return paddle.cosh(inputs) def test_cosh_9(): diff --git a/tests/test_cumsum.py b/tests/test_cumsum.py index 1ff9777e9..75273d8cb 100644 --- a/tests/test_cumsum.py +++ b/tests/test_cumsum.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.cumsum(inputs, axis=0) - return x + return paddle.cumsum(inputs, axis=0) @_test_with_pir diff --git a/tests/test_dequantize_linear.py b/tests/test_dequantize_linear.py index a12c55de0..d9be7e4a9 100755 --- a/tests/test_dequantize_linear.py +++ b/tests/test_dequantize_linear.py @@ -13,14 +13,16 @@ # limitations under the License. import os -import paddle -import numpy as np -import paddle2onnx import unittest + +import numpy as np import onnxruntime as ort -from paddle.base import unique_name from onnxbase import _test_only_pir +import paddle +import paddle2onnx +from paddle.base import unique_name + def convert_scale_to_paddle(onnx_scale, qmax): return onnx_scale * qmax / 4 @@ -50,19 +52,21 @@ def build_static_net(input_shape, quant_axis, scale_shape, qmin, qmax, type): stop_gradient=True, ) - dequant_out, out_state, out_accum, out_scale = paddle._C_ops.dequantize_linear( - x, - scale, - zero_points, - accum, - state, - quant_axis, - 8, # bit_length - qmin, # qmin - qmax, # qmax - 0, # rounding_type - True, # is_test - False, # only_observer + dequant_out, _out_state, _out_accum, _out_scale = ( + paddle._C_ops.dequantize_linear( + x, + scale, + zero_points, + accum, + state, + quant_axis, + 8, # bit_length + qmin, # qmin + qmax, # qmax + 0, # rounding_type + True, # is_test + False, # only_observer + ) ) model_dir = f"./dequantize_linear_model_{type}" diff --git a/tests/test_dist.py b/tests/test_dist.py index 2783b3903..069fd06c7 100644 --- a/tests/test_dist.py +++ b/tests/test_dist.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, p=2): - super(Net, self).__init__() + super().__init__() self.p = p def forward(self, inputs, _inputs): """ forward """ - x = paddle.dist(inputs, _inputs, p=self.p) - return x + return paddle.dist(inputs, _inputs, p=self.p) def test_dist_9(): diff --git a/tests/test_divide.py b/tests/test_divide.py index a78337ebf..e0d63a7dd 100644 --- a/tests/test_divide.py +++ b/tests/test_divide.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.divide(inputs, inputs_) - return x + return paddle.divide(inputs, inputs_) def test_divide_9(): diff --git a/tests/test_dot.py b/tests/test_dot.py index 2e5e84b4e..980126bb8 100644 --- a/tests/test_dot.py +++ b/tests/test_dot.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.dot(inputs, inputs_) - return x + return paddle.dot(inputs, inputs_) def test_dot_9(): diff --git a/tests/test_dygraph2onnx.py b/tests/test_dygraph2onnx.py index a43e99587..0f264629d 100644 --- a/tests/test_dygraph2onnx.py +++ b/tests/test_dygraph2onnx.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle import unittest + +import paddle import paddle2onnx diff --git a/tests/test_einsum.py b/tests/test_einsum.py index 0413f83a1..0ff329a7b 100644 --- a/tests/test_einsum.py +++ b/tests/test_einsum.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir @_test_with_pir @@ -30,14 +30,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, input): """ forward """ - x = paddle.einsum("i->", input) - return x + return paddle.einsum("i->", input) op = Net() op.eval() @@ -60,14 +59,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, x, y): """ forward """ - x = paddle.einsum("i,i->", x, y) - return x + return paddle.einsum("i,i->", x, y) op = Net() op.eval() @@ -91,14 +89,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, x, y): """ forward """ - x = paddle.einsum("i,j->ij", x, y) - return x + return paddle.einsum("i,j->ij", x, y) op = Net() op.eval() @@ -123,14 +120,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, x): """ forward """ - x = paddle.einsum("ijk->kji", x) - return x + return paddle.einsum("ijk->kji", x) op = Net() op.eval() @@ -154,14 +150,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, x, y): """ forward """ - x = paddle.einsum("ijk, ikl->ijl", x, y) - return x + return paddle.einsum("ijk, ikl->ijl", x, y) op = Net() op.eval() @@ -186,14 +181,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, x): """ forward """ - x = paddle.einsum("...jk->...kj", x) - return x + return paddle.einsum("...jk->...kj", x) op = Net() op.eval() @@ -217,14 +211,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, x, y): """ forward """ - x = paddle.einsum("...jk, ...kl->...jl", x, y) - return x + return paddle.einsum("...jk, ...kl->...jl", x, y) op = Net() op.eval() diff --git a/tests/test_empty.py b/tests/test_empty.py index f3fb8f690..4e9749809 100644 --- a/tests/test_empty.py +++ b/tests/test_empty.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx, _test_with_pir +import paddle + class Net(paddle.nn.Layer): """ @@ -22,16 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, shape): """ forward """ x = paddle.empty(shape, dtype=paddle.int64) - x = paddle.zeros_like(x) - - return x + return paddle.zeros_like(x) @_test_with_pir diff --git a/tests/test_equal.py b/tests/test_equal.py index 106e09239..74df7a2bf 100644 --- a/tests/test_equal.py +++ b/tests/test_equal.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.equal(inputs, inputs_) - return x + return paddle.equal(inputs, inputs_) # def test_equal_9(): diff --git a/tests/test_erf.py b/tests/test_erf.py index 2beee1041..862a1c24a 100644 --- a/tests/test_erf.py +++ b/tests/test_erf.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.erf(inputs) - return x + return paddle.erf(inputs) def test_erf_9(): diff --git a/tests/test_exp.py b/tests/test_exp.py index 25f7b9793..5d525a6a0 100644 --- a/tests/test_exp.py +++ b/tests/test_exp.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.exp(inputs) - return x + return paddle.exp(inputs) @_test_with_pir diff --git a/tests/test_expand.py b/tests/test_expand.py index a8e75f589..ec667dacb 100644 --- a/tests/test_expand.py +++ b/tests/test_expand.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.expand(inputs, shape=[2, 3, 10]) - return x + return paddle.expand(inputs, shape=[2, 3, 10]) @_test_with_pir diff --git a/tests/test_expand_as.py b/tests/test_expand_as.py index d82395cf0..67eda19c7 100644 --- a/tests/test_expand_as.py +++ b/tests/test_expand_as.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.expand_as(inputs, inputs_) - return x + return paddle.expand_as(inputs, inputs_) def test_expand_as_9(): diff --git a/tests/test_fft_r2c.py b/tests/test_fft_r2c.py index dbe6d41f7..0b2dd3dbd 100644 --- a/tests/test_fft_r2c.py +++ b/tests/test_fft_r2c.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_only_pir class Net(paddle.nn.Layer): @@ -24,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ x = paddle.fft.rfft(inputs, axis=1) - x = paddle.abs(x) - return x + return paddle.abs(x) @_test_only_pir diff --git a/tests/test_fill_constant.py b/tests/test_fill_constant.py index 698f12581..3c75a03ff 100644 --- a/tests/test_fill_constant.py +++ b/tests/test_fill_constant.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx +import paddle + class Net(paddle.nn.Layer): def forward(self, shape): diff --git a/tests/test_flatten.py b/tests/test_flatten.py index 2db2f5168..698f098a8 100644 --- a/tests/test_flatten.py +++ b/tests/test_flatten.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.flatten(inputs, start_axis=1, stop_axis=2) - return x + return paddle.flatten(inputs, start_axis=1, stop_axis=2) @_test_with_pir diff --git a/tests/test_floor.py b/tests/test_floor.py index d11f2f925..b6dcf2d54 100644 --- a/tests/test_floor.py +++ b/tests/test_floor.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.floor(inputs) - return x + return paddle.floor(inputs) @_test_with_pir diff --git a/tests/test_floor_divide.py b/tests/test_floor_divide.py index 523cbadc8..8281ee3e4 100644 --- a/tests/test_floor_divide.py +++ b/tests/test_floor_divide.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.floor_divide(inputs, inputs_) - return x + return paddle.floor_divide(inputs, inputs_) @_test_with_pir diff --git a/tests/test_floor_mod.py b/tests/test_floor_mod.py index 3392d364c..b6a4d975d 100644 --- a/tests/test_floor_mod.py +++ b/tests/test_floor_mod.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx +import paddle + class Net(paddle.nn.Layer): """ @@ -22,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, _inputs): """ forward """ - x = paddle.floor_mod(inputs, _inputs) - return x + return paddle.floor_mod(inputs, _inputs) def test_floor_mod_10(): diff --git a/tests/test_full_with_tensor.py b/tests/test_full_with_tensor.py index cf349fd3a..5c5808591 100644 --- a/tests/test_full_with_tensor.py +++ b/tests/test_full_with_tensor.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_only_pir class Net1(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net1(paddle.nn.Layer): """ def __init__(self): - super(Net1, self).__init__() + super().__init__() def forward(self, shape): """ forward """ - x = paddle.full(shape, fill_value=3) - return x + return paddle.full(shape, fill_value=3) @_test_only_pir @@ -52,14 +51,13 @@ class Net2(paddle.nn.Layer): """ def __init__(self): - super(Net2, self).__init__() + super().__init__() def forward(self, shape, fill_value): """ forward """ - x = paddle.full(shape, fill_value) - return x + return paddle.full(shape, fill_value) @_test_only_pir diff --git a/tests/test_gather.py b/tests/test_gather.py index a2540edb6..770e4dd56 100644 --- a/tests/test_gather.py +++ b/tests/test_gather.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_only_pir class Net(paddle.nn.Layer): @@ -24,18 +23,17 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.gather( + return paddle.gather( inputs, index=paddle.to_tensor([1, 2], dtype="int64"), axis=paddle.to_tensor([0]), ) - return x @_test_only_pir @@ -95,16 +93,15 @@ class Net2(paddle.nn.Layer): """ def __init__(self): - super(Net2, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.gather( + return paddle.gather( inputs, index=paddle.to_tensor([[1], [2]], dtype="int64"), axis=1 ) - return x # Attention : GatherND don't have opset < 11 version, so we don't test it. @@ -150,14 +147,15 @@ class Net3(paddle.nn.Layer): """ def __init__(self): - super(Net3, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.gather(inputs, index=paddle.to_tensor([0, 1], dtype="int64"), axis=1) - return x + return paddle.gather( + inputs, index=paddle.to_tensor([0, 1], dtype="int64"), axis=1 + ) @_test_only_pir @@ -175,9 +173,9 @@ def test_gather_7_3(): print(data.shape) obj.set_input_data("input_data", data) obj.run() - assert len(obj.res_fict["7"][0].shape) == len( - data.shape - ), "The result of ONNX inference is not equal to Paddle inference!\n" + assert len(obj.res_fict["7"][0].shape) == len(data.shape), ( + "The result of ONNX inference is not equal to Paddle inference!\n" + ) @_test_only_pir @@ -195,9 +193,9 @@ def test_gather_11_3(): print(data.shape) obj.set_input_data("input_data", data) obj.run() - assert len(obj.res_fict["11"][0].shape) == len( - data.shape - ), "The result of ONNX inference is not equal to Paddle inference!\n" + assert len(obj.res_fict["11"][0].shape) == len(data.shape), ( + "The result of ONNX inference is not equal to Paddle inference!\n" + ) @_test_only_pir @@ -216,9 +214,9 @@ def test_gather_13_3(): obj.set_input_data("input_data", data) obj.run() - assert len(obj.res_fict["13"][0].shape) == len( - data.shape - ), "The result of ONNX inference is not equal to Paddle inference!\n" + assert len(obj.res_fict["13"][0].shape) == len(data.shape), ( + "The result of ONNX inference is not equal to Paddle inference!\n" + ) class Net4(paddle.nn.Layer): @@ -227,16 +225,15 @@ class Net4(paddle.nn.Layer): """ def __init__(self): - super(Net4, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.gather( + return paddle.gather( inputs, index=paddle.to_tensor([[0], [1]], dtype="int64"), axis=2 ) - return x @_test_only_pir @@ -271,6 +268,6 @@ def test_gather_13_4(): print(len(data.shape)) obj.set_input_data("input_data", data) obj.run() - assert len(obj.res_fict["13"][0].shape) == len( - data.shape - ), "The result of ONNX inference is not equal to Paddle inference!\n" + assert len(obj.res_fict["13"][0].shape) == len(data.shape), ( + "The result of ONNX inference is not equal to Paddle inference!\n" + ) diff --git a/tests/test_gelu.py b/tests/test_gelu.py index b7d37be39..1c01efdc2 100644 --- a/tests/test_gelu.py +++ b/tests/test_gelu.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.gelu(inputs) - return x + return paddle.nn.functional.gelu(inputs) @_test_with_pir diff --git a/tests/test_glu.py b/tests/test_glu.py index 61fddca81..27920fa0e 100644 --- a/tests/test_glu.py +++ b/tests/test_glu.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.glu(inputs) - return x + return paddle.nn.functional.glu(inputs) def test_glu_7(): diff --git a/tests/test_greater_equal.py b/tests/test_greater_equal.py index 77c0214f6..388c035b3 100644 --- a/tests/test_greater_equal.py +++ b/tests/test_greater_equal.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.greater_equal(inputs, inputs_) - return x + return paddle.greater_equal(inputs, inputs_) @_test_with_pir diff --git a/tests/test_greater_than.py b/tests/test_greater_than.py index 3fd7272c2..5b76e8279 100644 --- a/tests/test_greater_than.py +++ b/tests/test_greater_than.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ diff --git a/tests/test_hardsigmoid.py b/tests/test_hardsigmoid.py index 1020a523b..312048fb7 100644 --- a/tests/test_hardsigmoid.py +++ b/tests/test_hardsigmoid.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.hardsigmoid(inputs) - return x + return paddle.nn.functional.hardsigmoid(inputs) @_test_with_pir diff --git a/tests/test_hardswish.py b/tests/test_hardswish.py index c67aaa850..3ddd6dff9 100644 --- a/tests/test_hardswish.py +++ b/tests/test_hardswish.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.hardswish(inputs) - return x + return paddle.nn.functional.hardswish(inputs) @_test_with_pir diff --git a/tests/test_hardtanh.py b/tests/test_hardtanh.py index aa2554eef..3bed3cc33 100644 --- a/tests/test_hardtanh.py +++ b/tests/test_hardtanh.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -22,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.hardtanh(inputs) - return x + return paddle.nn.functional.hardtanh(inputs) @_test_with_pir diff --git a/tests/test_ifelse.py b/tests/test_ifelse.py index 84e43769b..acf212477 100644 --- a/tests/test_ifelse.py +++ b/tests/test_ifelse.py @@ -12,19 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx +import paddle + class BaseNet1(paddle.nn.Layer): def __init__(self): - super(BaseNet1, self).__init__() + super().__init__() def forward(self, inputs): if inputs == 1: return inputs * 1 - else: - return inputs * 3 + return inputs * 3 def test_ifelse_1_true(): @@ -45,13 +45,12 @@ def test_ifelse_1_false(): class BaseNet2(paddle.nn.Layer): def __init__(self): - super(BaseNet2, self).__init__() + super().__init__() def forward(self, cond, inputs): if cond == 1: return inputs * 1, inputs * 2 - else: - return inputs * 3, inputs * 4 + return inputs * 3, inputs * 4 def test_ifelse_2_true(): @@ -72,13 +71,12 @@ def test_ifelse_2_false(): class BaseNet3(paddle.nn.Layer): def __init__(self): - super(BaseNet3, self).__init__() + super().__init__() def forward(self, inputs): if inputs == 1: return 1 - else: - return 2 + return 2 def test_ifelse_3_true(): @@ -99,13 +97,12 @@ def test_ifelse_3_false(): class BaseNet4(paddle.nn.Layer): def __init__(self): - super(BaseNet4, self).__init__() + super().__init__() def forward(self, inputs): if inputs == 1: return inputs + 1 - else: - return 2 + return 2 def test_ifelse_4_true(): @@ -126,13 +123,12 @@ def test_ifelse_4_false(): class BaseNet5(paddle.nn.Layer): def __init__(self): - super(BaseNet5, self).__init__() + super().__init__() def forward(self, inputs): if inputs == 1: return 1, 2 - else: - return 2, 3 + return 2, 3 def test_ifelse_5_true(): diff --git a/tests/test_index_select.py b/tests/test_index_select.py index 4dc7edc95..9e91eb6bb 100644 --- a/tests/test_index_select.py +++ b/tests/test_index_select.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,17 +23,16 @@ class Net(paddle.nn.Layer): """ def __init__(self, axis=0): - super(Net, self).__init__() + super().__init__() self.axis = axis def forward(self, inputs): """ forward """ - x = paddle.index_select( + return paddle.index_select( inputs, index=paddle.to_tensor([1, 2], dtype="int64"), axis=self.axis ) - return x @_test_with_pir diff --git a/tests/test_isfinite.py b/tests/test_isfinite.py index 1eb9dd405..5e2b3b026 100644 --- a/tests/test_isfinite.py +++ b/tests/test_isfinite.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ diff --git a/tests/test_isinf.py b/tests/test_isinf.py index fed182a41..8aaf4814a 100644 --- a/tests/test_isinf.py +++ b/tests/test_isinf.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ @@ -67,7 +67,7 @@ def test_isinf_10(): obj.set_input_data( "input_data", paddle.to_tensor( - ([float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")]) + [float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")] ), ) obj.run() @@ -86,7 +86,7 @@ def test_isinf_11(): obj.set_input_data( "input_data", paddle.to_tensor( - ([float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")]) + [float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")] ), ) obj.run() @@ -105,7 +105,7 @@ def test_isinf_12(): obj.set_input_data( "input_data", paddle.to_tensor( - ([float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")]) + [float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")] ), ) obj.run() diff --git a/tests/test_isnan.py b/tests/test_isnan.py index f98d752e4..572cf77d8 100644 --- a/tests/test_isnan.py +++ b/tests/test_isnan.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ @@ -67,7 +67,7 @@ def test_isnan_9(): obj.set_input_data( "input_data", paddle.to_tensor( - ([float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")]) + [float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")] ), ) obj.run() @@ -86,7 +86,7 @@ def test_isnan_10(): obj.set_input_data( "input_data", paddle.to_tensor( - ([float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")]) + [float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")] ), ) obj.run() @@ -105,7 +105,7 @@ def test_isnan_11(): obj.set_input_data( "input_data", paddle.to_tensor( - ([float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")]) + [float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")] ), ) obj.run() @@ -124,7 +124,7 @@ def test_isnan_12(): obj.set_input_data( "input_data", paddle.to_tensor( - ([float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")]) + [float("-inf"), -2, 3.6, float("inf"), 0, float("-nan"), float("nan")] ), ) obj.run() diff --git a/tests/test_leaky_relu.py b/tests/test_leaky_relu.py index cc88b7a2a..7f6d66523 100644 --- a/tests/test_leaky_relu.py +++ b/tests/test_leaky_relu.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.leaky_relu(inputs) - return x + return paddle.nn.functional.leaky_relu(inputs) @_test_with_pir diff --git a/tests/test_less_equal.py b/tests/test_less_equal.py index d4500c366..b4d9af91e 100644 --- a/tests/test_less_equal.py +++ b/tests/test_less_equal.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.less_equal(inputs, inputs_) - return x + return paddle.less_equal(inputs, inputs_) def test_less_equal_12(): diff --git a/tests/test_less_than.py b/tests/test_less_than.py index 91afe511c..795030ade 100644 --- a/tests/test_less_than.py +++ b/tests/test_less_than.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.less_than(inputs, inputs_) - return x + return paddle.less_than(inputs, inputs_) @_test_with_pir diff --git a/tests/test_log10.py b/tests/test_log10.py index 5b4151a0f..4edb3a435 100644 --- a/tests/test_log10.py +++ b/tests/test_log10.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.log10(inputs) - return x + return paddle.log10(inputs) def test_log10_7(): diff --git a/tests/test_log1p.py b/tests/test_log1p.py index 297578ddd..fd938569f 100644 --- a/tests/test_log1p.py +++ b/tests/test_log1p.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.log1p(inputs) - return x + return paddle.log1p(inputs) def test_log1p_7(): diff --git a/tests/test_log2.py b/tests/test_log2.py index 602c91ddf..e929e6da9 100644 --- a/tests/test_log2.py +++ b/tests/test_log2.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.log2(inputs) - return x + return paddle.log2(inputs) def test_log2_7(): diff --git a/tests/test_logical_and.py b/tests/test_logical_and.py index fb3abccc9..237315266 100644 --- a/tests/test_logical_and.py +++ b/tests/test_logical_and.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.logical_and(inputs, inputs_) - return x + return paddle.logical_and(inputs, inputs_) @_test_with_pir diff --git a/tests/test_logical_not.py b/tests/test_logical_not.py index 86be25f0e..012f4113f 100644 --- a/tests/test_logical_not.py +++ b/tests/test_logical_not.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.logical_not(inputs) - return x + return paddle.logical_not(inputs) @_test_with_pir diff --git a/tests/test_logical_or.py b/tests/test_logical_or.py index aeab8e479..ab0fed1f8 100644 --- a/tests/test_logical_or.py +++ b/tests/test_logical_or.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.logical_or(inputs, inputs_) - return x + return paddle.logical_or(inputs, inputs_) def test_logical_or_7(): diff --git a/tests/test_logical_xor.py b/tests/test_logical_xor.py index a2d8afa50..e34b93ef6 100644 --- a/tests/test_logical_xor.py +++ b/tests/test_logical_xor.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.logical_xor(inputs, inputs_) - return x + return paddle.logical_xor(inputs, inputs_) def test_logical_xor_7(): diff --git a/tests/test_mask_select.py b/tests/test_mask_select.py index 33ed36da8..d609e7901 100644 --- a/tests/test_mask_select.py +++ b/tests/test_mask_select.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, mask): """ forward """ - x = paddle.masked_select(inputs, mask) - return x + return paddle.masked_select(inputs, mask) @_test_with_pir diff --git a/tests/test_matmul.py b/tests/test_matmul.py index f5dd47431..2a8a2c40d 100644 --- a/tests/test_matmul.py +++ b/tests/test_matmul.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.matmul(inputs, inputs_, transpose_x=False, transpose_y=True) - return x + return paddle.matmul(inputs, inputs_, transpose_x=False, transpose_y=True) @_test_with_pir diff --git a/tests/test_median.py b/tests/test_median.py index 727d8cf13..38832f6b3 100644 --- a/tests/test_median.py +++ b/tests/test_median.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx, _test_with_pir, randtool +import paddle + class Net(paddle.nn.Layer): """ @@ -22,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.median(inputs, axis=None, keepdim=False, name=None) - return x + return paddle.median(inputs, axis=None, keepdim=False, name=None) @_test_with_pir diff --git a/tests/test_meshgrid.py b/tests/test_meshgrid.py index b043850a9..5a5037adb 100644 --- a/tests/test_meshgrid.py +++ b/tests/test_meshgrid.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, _inputs): """ @@ -75,7 +75,7 @@ class Net_3(paddle.nn.Layer): """ def __init__(self): - super(Net_3, self).__init__() + super().__init__() def forward(self, inputs, _inputs, _input): """ diff --git a/tests/test_mod.py b/tests/test_mod.py index 55acf4908..26430b9c6 100644 --- a/tests/test_mod.py +++ b/tests/test_mod.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx +import paddle + class Net(paddle.nn.Layer): """ @@ -22,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.mod(inputs, inputs_) - return x + return paddle.mod(inputs, inputs_) def test_mod_10(): diff --git a/tests/test_multiclass_nms.py b/tests/test_multiclass_nms.py index 15c7f6b21..c5ac7be97 100644 --- a/tests/test_multiclass_nms.py +++ b/tests/test_multiclass_nms.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_only_pir class BaseNet(paddle.nn.Layer): def __init__(self, config): - super(BaseNet, self).__init__() + super().__init__() self.score_threshold = config["score_threshold"] self.nms_top_k = config["nms_top_k"] self.keep_top_k = config["keep_top_k"] diff --git a/tests/test_mv.py b/tests/test_mv.py index af4034479..e3c6db34d 100644 --- a/tests/test_mv.py +++ b/tests/test_mv.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, _ver): """ forward """ - x = paddle.mv(inputs, _ver) - return x + return paddle.mv(inputs, _ver) def test_mv_9(): diff --git a/tests/test_neg.py b/tests/test_neg.py index 2695e231e..f066f95fe 100644 --- a/tests/test_neg.py +++ b/tests/test_neg.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.neg(inputs) - return x + return paddle.neg(inputs) def test_neg_9(): diff --git a/tests/test_nn_AdaptiveAvgPool1D.py b/tests/test_nn_AdaptiveAvgPool1D.py index 0c684179c..4de71ebcb 100755 --- a/tests/test_nn_AdaptiveAvgPool1D.py +++ b/tests/test_nn_AdaptiveAvgPool1D.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._avg_pool = paddle.nn.AdaptiveAvgPool1D(output_size=3) def forward(self, inputs): """ forward """ - x = self._avg_pool(inputs) - return x + return self._avg_pool(inputs) def test_AdaptiveAvgPool1D_base(): diff --git a/tests/test_nn_AdaptiveAvgPool2D.py b/tests/test_nn_AdaptiveAvgPool2D.py index a849d6321..72782981f 100755 --- a/tests/test_nn_AdaptiveAvgPool2D.py +++ b/tests/test_nn_AdaptiveAvgPool2D.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=3) def forward(self, inputs): """ forward """ - x = self._avg_pool(inputs) - return x + return self._avg_pool(inputs) @_test_with_pir diff --git a/tests/test_nn_AdaptiveAvgPool3D.py b/tests/test_nn_AdaptiveAvgPool3D.py index 6756aa0ad..6045b65f5 100755 --- a/tests/test_nn_AdaptiveAvgPool3D.py +++ b/tests/test_nn_AdaptiveAvgPool3D.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=3) def forward(self, inputs): """ forward """ - x = self._avg_pool(inputs) - return x + return self._avg_pool(inputs) def test_AdaptiveAvgPool3D_base(): diff --git a/tests/test_nn_BatchNorm2D.py b/tests/test_nn_BatchNorm2D.py index 8f6ddc79d..ee63039da 100644 --- a/tests/test_nn_BatchNorm2D.py +++ b/tests/test_nn_BatchNorm2D.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._bn = paddle.nn.BatchNorm2D(num_features=1) def forward(self, inputs): """ forward """ - x = self._bn(inputs) - return x + return self._bn(inputs) @_test_with_pir diff --git a/tests/test_nn_Conv1D.py b/tests/test_nn_Conv1D.py index 0934edaf8..4ee6140c0 100644 --- a/tests/test_nn_Conv1D.py +++ b/tests/test_nn_Conv1D.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -37,7 +36,7 @@ def __init__( bias_attr=None, data_format="NCL", ): - super(Net, self).__init__() + super().__init__() self._conv1d = paddle.nn.Conv1D( in_channels=in_channels, out_channels=out_channels, @@ -56,8 +55,7 @@ def forward(self, inputs): """ forward """ - x = self._conv1d(inputs) - return x + return self._conv1d(inputs) @_test_with_pir diff --git a/tests/test_nn_Conv1DTranspose.py b/tests/test_nn_Conv1DTranspose.py index 78a8cb293..df7c5d312 100644 --- a/tests/test_nn_Conv1DTranspose.py +++ b/tests/test_nn_Conv1DTranspose.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -37,7 +36,7 @@ def __init__( bias_attr=None, data_format="NCL", ): - super(Net, self).__init__() + super().__init__() self._conv2d_t = paddle.nn.Conv1DTranspose( in_channels=in_channels, out_channels=out_channels, @@ -56,8 +55,7 @@ def forward(self, inputs): """ forward """ - x = self._conv2d_t(inputs) - return x + return self._conv2d_t(inputs) @_test_with_pir diff --git a/tests/test_nn_Conv2D.py b/tests/test_nn_Conv2D.py index d872fe663..7f93dab4a 100755 --- a/tests/test_nn_Conv2D.py +++ b/tests/test_nn_Conv2D.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -36,7 +35,7 @@ def __init__( bias_attr=None, data_format="NCHW", ): - super(Net, self).__init__() + super().__init__() self.conv2d = paddle.nn.Conv2D( in_channels=in_channels, out_channels=out_channels, @@ -55,8 +54,7 @@ def forward(self, inputs): """ forward """ - x = self.conv2d(inputs) - return x + return self.conv2d(inputs) @_test_with_pir diff --git a/tests/test_nn_Conv2DTranspose.py b/tests/test_nn_Conv2DTranspose.py index 36c073687..b928f1b54 100644 --- a/tests/test_nn_Conv2DTranspose.py +++ b/tests/test_nn_Conv2DTranspose.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -36,7 +35,7 @@ def __init__( bias_attr=None, data_format="NCHW", ): - super(Net, self).__init__() + super().__init__() self._conv2d_t = paddle.nn.Conv2DTranspose( in_channels=in_channels, out_channels=out_channels, @@ -55,8 +54,7 @@ def forward(self, inputs): """ forward """ - x = self._conv2d_t(inputs) - return x + return self._conv2d_t(inputs) @_test_with_pir diff --git a/tests/test_nn_Conv3D.py b/tests/test_nn_Conv3D.py index 4d3d570ba..17e691dd6 100755 --- a/tests/test_nn_Conv3D.py +++ b/tests/test_nn_Conv3D.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -36,7 +35,7 @@ def __init__( bias_attr=None, data_format="NCDHW", ): - super(Net, self).__init__() + super().__init__() self.conv3d = paddle.nn.Conv3D( in_channels=in_channels, out_channels=out_channels, @@ -55,8 +54,7 @@ def forward(self, inputs): """ forward """ - x = self.conv3d(inputs) - return x + return self.conv3d(inputs) @_test_with_pir diff --git a/tests/test_nn_Conv3DTranspose.py b/tests/test_nn_Conv3DTranspose.py index 38a04d946..90a065124 100755 --- a/tests/test_nn_Conv3DTranspose.py +++ b/tests/test_nn_Conv3DTranspose.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -34,7 +35,7 @@ def __init__( bias_attr=None, data_format="NCDHW", ): - super(Net, self).__init__() + super().__init__() self.conv3dTranspose = paddle.nn.Conv3DTranspose( in_channels=in_channels, out_channels=out_channels, @@ -53,8 +54,7 @@ def forward(self, inputs): """ forward """ - x = self.conv3dTranspose(inputs) - return x + return self.conv3dTranspose(inputs) @_test_with_pir diff --git a/tests/test_nn_ELU.py b/tests/test_nn_ELU.py index f264c2180..08a2f21ae 100644 --- a/tests/test_nn_ELU.py +++ b/tests/test_nn_ELU.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._elu = paddle.nn.ELU(alpha=1.0) def forward(self, inputs): """ forward """ - x = self._elu(inputs) - return x + return self._elu(inputs) def test_ELU_9(): diff --git a/tests/test_nn_Embedding.py b/tests/test_nn_Embedding.py index 05614a8d6..5dbdb55f2 100644 --- a/tests/test_nn_Embedding.py +++ b/tests/test_nn_Embedding.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir import numpy as np +from onnxbase import APIOnnx, _test_with_pir + +import paddle class Net(paddle.nn.Layer): @@ -24,7 +24,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._embedding = paddle.nn.Embedding( num_embeddings=10, embedding_dim=3, @@ -38,8 +38,7 @@ def forward(self, inputs): """ forward """ - x = self._embedding(inputs) - return x + return self._embedding(inputs) @_test_with_pir diff --git a/tests/test_nn_Functional_LogSoftmax.py b/tests/test_nn_Functional_LogSoftmax.py index 4913f35cf..ca8e9b168 100644 --- a/tests/test_nn_Functional_LogSoftmax.py +++ b/tests/test_nn_Functional_LogSoftmax.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle import paddle.nn as nn -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -24,7 +24,7 @@ class Net(paddle.nn.Layer): """ def __init__(self, axis=-1, dtype=None): - super(Net, self).__init__() + super().__init__() self.axis = axis self.dtype = dtype @@ -32,8 +32,7 @@ def forward(self, inputs): """ forward """ - x = nn.functional.log_softmax(inputs, axis=self.axis, dtype=self.dtype) - return x + return nn.functional.log_softmax(inputs, axis=self.axis, dtype=self.dtype) def test_nn_functional_LogSigmoid_9(): diff --git a/tests/test_nn_Functional_interpolate.py b/tests/test_nn_Functional_interpolate.py index 0cb57635f..51fdccd2d 100644 --- a/tests/test_nn_Functional_interpolate.py +++ b/tests/test_nn_Functional_interpolate.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir, randtool + import paddle import paddle.nn as nn -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_only_pir class Net(paddle.nn.Layer): @@ -33,7 +32,7 @@ def __init__( align_mode=0, data_format="NCHW", ): - super(Net, self).__init__() + super().__init__() self.size = size self.scale_factor = scale_factor self.mode = mode @@ -45,7 +44,7 @@ def forward(self, inputs): """ forward """ - x = nn.functional.interpolate( + return nn.functional.interpolate( x=inputs, size=self.size, scale_factor=self.scale_factor, @@ -54,7 +53,6 @@ def forward(self, inputs): align_mode=self.align_mode, data_format=self.data_format, ) - return x @_test_only_pir diff --git a/tests/test_nn_Functional_softsign.py b/tests/test_nn_Functional_softsign.py index 09bc56eac..71d5b3f8b 100644 --- a/tests/test_nn_Functional_softsign.py +++ b/tests/test_nn_Functional_softsign.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.softsign(inputs) - return x + return paddle.nn.functional.softsign(inputs) def test_softsign_9(): diff --git a/tests/test_nn_Functional_thresholded.py b/tests/test_nn_Functional_thresholded.py index c8c945824..3797ca4cb 100644 --- a/tests/test_nn_Functional_thresholded.py +++ b/tests/test_nn_Functional_thresholded.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle import paddle.nn as nn -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -24,15 +24,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, threshold=1): - super(Net, self).__init__() + super().__init__() self.threshold = threshold def forward(self, inputs): """ forward """ - x = nn.functional.thresholded_relu(inputs, threshold=self.threshold) - return x + return nn.functional.thresholded_relu(inputs, threshold=self.threshold) def test_nn_functional_thresholded_relu_10(): diff --git a/tests/test_nn_GRU.py b/tests/test_nn_GRU.py index 5f668ecab..e363bdc1b 100644 --- a/tests/test_nn_GRU.py +++ b/tests/test_nn_GRU.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx, _test_with_pir +import paddle + class Net(paddle.nn.Layer): """ @@ -22,14 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._gru = paddle.nn.GRU(16, 32, 2) def forward(self, inputs, inputs_): """ forward """ - x, h = self._gru(inputs, inputs_) + x, _h = self._gru(inputs, inputs_) return x diff --git a/tests/test_nn_GroupNorm.py b/tests/test_nn_GroupNorm.py index 6e0c80215..c413ce426 100644 --- a/tests/test_nn_GroupNorm.py +++ b/tests/test_nn_GroupNorm.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -22,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._group_norm = paddle.nn.GroupNorm(num_groups=5, num_channels=10) def forward(self, inputs): """ forward """ - x = self._group_norm(inputs) - return x + return self._group_norm(inputs) @_test_with_pir diff --git a/tests/test_nn_Hardshrink.py b/tests/test_nn_Hardshrink.py index 060370c91..2385b73b6 100644 --- a/tests/test_nn_Hardshrink.py +++ b/tests/test_nn_Hardshrink.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._hardshrink = paddle.nn.Hardshrink() def forward(self, inputs): """ forward """ - x = self._hardshrink(inputs) - return x + return self._hardshrink(inputs) def test_hardshrink_9(): diff --git a/tests/test_nn_Hardsigmoid.py b/tests/test_nn_Hardsigmoid.py index d9a9764d3..f645851ec 100644 --- a/tests/test_nn_Hardsigmoid.py +++ b/tests/test_nn_Hardsigmoid.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._hard_sigmoid = paddle.nn.Hardsigmoid() def forward(self, inputs): """ forward """ - x = self._hard_sigmoid(inputs) - return x + return self._hard_sigmoid(inputs) def test_Hardsigmoid_9(): diff --git a/tests/test_nn_InstanceNorm3D.py b/tests/test_nn_InstanceNorm3D.py index 3f9f6c1d2..da71393a9 100644 --- a/tests/test_nn_InstanceNorm3D.py +++ b/tests/test_nn_InstanceNorm3D.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -22,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._instance_norm = paddle.nn.InstanceNorm3D( num_features=2, epsilon=1e-05, @@ -37,8 +38,7 @@ def forward(self, inputs): """ forward """ - x = self._instance_norm(inputs) - return x + return self._instance_norm(inputs) @_test_with_pir diff --git a/tests/test_nn_LayerNorm.py b/tests/test_nn_LayerNorm.py index 873a79c87..bc45a159d 100644 --- a/tests/test_nn_LayerNorm.py +++ b/tests/test_nn_LayerNorm.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._layer_norm = paddle.nn.LayerNorm(normalized_shape=[1, 10, 10]) def forward(self, inputs): """ forward """ - x = self._layer_norm(inputs) - return x + return self._layer_norm(inputs) @_test_with_pir diff --git a/tests/test_nn_LeakyReLU.py b/tests/test_nn_LeakyReLU.py index 0ada2df8d..d045abff4 100644 --- a/tests/test_nn_LeakyReLU.py +++ b/tests/test_nn_LeakyReLU.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._leaky_relu = paddle.nn.LeakyReLU(negative_slope=0.01, name=None) def forward(self, inputs): """ forward """ - x = self._leaky_relu(inputs) - return x + return self._leaky_relu(inputs) def test_LeakyReLU_base(): diff --git a/tests/test_nn_LogSigmoid.py b/tests/test_nn_LogSigmoid.py index 2826b1365..a7fea1041 100644 --- a/tests/test_nn_LogSigmoid.py +++ b/tests/test_nn_LogSigmoid.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._Log_Sigmoid = paddle.nn.LogSigmoid() def forward(self, inputs): """ forward """ - x = self._Log_Sigmoid(inputs) - return x + return self._Log_Sigmoid(inputs) def test_nn_LogSigmoid_9(): diff --git a/tests/test_nn_MaxPool1D.py b/tests/test_nn_MaxPool1D.py index dcb6c0b70..f5f0882e2 100644 --- a/tests/test_nn_MaxPool1D.py +++ b/tests/test_nn_MaxPool1D.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._max_pool = paddle.nn.MaxPool1D( kernel_size=2, stride=None, @@ -37,8 +37,7 @@ def forward(self, inputs): """ forward """ - x = self._max_pool(inputs) - return x + return self._max_pool(inputs) def test_MaxPool1D_base(): diff --git a/tests/test_nn_MaxPool2D.py b/tests/test_nn_MaxPool2D.py index 1fab4e980..402a25e3d 100644 --- a/tests/test_nn_MaxPool2D.py +++ b/tests/test_nn_MaxPool2D.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -33,7 +32,7 @@ def __init__( data_format="NCHW", name=None, ): - super(Net, self).__init__() + super().__init__() self._max_pool = paddle.nn.MaxPool2D( kernel_size=kernel_size, stride=stride, @@ -48,8 +47,7 @@ def forward(self, inputs): """ forward """ - x = self._max_pool(inputs) - return x + return self._max_pool(inputs) @_test_with_pir diff --git a/tests/test_nn_MaxPool3D.py b/tests/test_nn_MaxPool3D.py index c87259404..fe10efda2 100755 --- a/tests/test_nn_MaxPool3D.py +++ b/tests/test_nn_MaxPool3D.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._max_pool = paddle.nn.MaxPool3D( kernel_size=2, stride=None, @@ -38,8 +38,7 @@ def forward(self, inputs): """ forward """ - x = self._max_pool(inputs) - return x + return self._max_pool(inputs) def test_MaxPool3D_base(): diff --git a/tests/test_nn_Pad1D.py b/tests/test_nn_Pad1D.py index afc19ad8c..dbb87d4ca 100644 --- a/tests/test_nn_Pad1D.py +++ b/tests/test_nn_Pad1D.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self, mode="constant", padding=1): - super(Net, self).__init__() + super().__init__() self.mode = mode self.padding = padding self._pad = paddle.nn.Pad1D(padding=self.padding, mode=self.mode) @@ -32,8 +32,7 @@ def forward(self, inputs): """ forward """ - x = self._pad(inputs) - return x + return self._pad(inputs) def test_Pad1D_9(): diff --git a/tests/test_nn_Pad2D.py b/tests/test_nn_Pad2D.py index c2070cbe7..8e72f6e3a 100644 --- a/tests/test_nn_Pad2D.py +++ b/tests/test_nn_Pad2D.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._bn = paddle.nn.Pad2D(padding=1, mode="constant") def forward(self, inputs): """ forward """ - x = self._bn(inputs) - return x + return self._bn(inputs) def test_Pad2D_9(): diff --git a/tests/test_nn_Pad3D.py b/tests/test_nn_Pad3D.py index 60fa7d3bc..b45d158fd 100644 --- a/tests/test_nn_Pad3D.py +++ b/tests/test_nn_Pad3D.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._pad = paddle.nn.Pad3D(padding=1, mode="constant") def forward(self, inputs): """ forward """ - x = self._pad(inputs) - return x + return self._pad(inputs) @_test_with_pir @@ -116,15 +115,14 @@ class Net2(paddle.nn.Layer): """ def __init__(self): - super(Net2, self).__init__() + super().__init__() self._pad = paddle.nn.Pad3D(padding=1, mode="circular") def forward(self, inputs): """ forward """ - x = self._pad(inputs) - return x + return self._pad(inputs) @_test_with_pir diff --git a/tests/test_nn_PixelShuffle.py b/tests/test_nn_PixelShuffle.py index e46962183..f0536dc2f 100644 --- a/tests/test_nn_PixelShuffle.py +++ b/tests/test_nn_PixelShuffle.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx +import paddle + class Net(paddle.nn.Layer): """ @@ -22,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self.shuffle = paddle.nn.PixelShuffle( upscale_factor=3, data_format="NCHW", name=None ) @@ -31,8 +32,7 @@ def forward(self, inputs): """ forward """ - x = self.shuffle(inputs) - return x + return self.shuffle(inputs) def test_PixelShuffle_base(): diff --git a/tests/test_nn_Softmax.py b/tests/test_nn_Softmax.py index 231e03997..223450b03 100644 --- a/tests/test_nn_Softmax.py +++ b/tests/test_nn_Softmax.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._softmax = paddle.nn.Softmax(axis=-1, name=None) def forward(self, inputs): """ forward """ - x = self._softmax(inputs) - return x + return self._softmax(inputs) def test_Softmax_9(): diff --git a/tests/test_nn_TanhShrink.py b/tests/test_nn_TanhShrink.py index d556c4956..9b4c51c5e 100644 --- a/tests/test_nn_TanhShrink.py +++ b/tests/test_nn_TanhShrink.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self._tanhshrink = paddle.nn.Tanhshrink() def forward(self, inputs): """ forward """ - x = self._tanhshrink(inputs) - return x + return self._tanhshrink(inputs) def test_nn_tanhshrink_9(): diff --git a/tests/test_nn_Upsample.py b/tests/test_nn_Upsample.py index f21d87452..87231d477 100755 --- a/tests/test_nn_Upsample.py +++ b/tests/test_nn_Upsample.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -30,7 +31,7 @@ def __init__( align_mode=0, data_format="NCHW", ): - super(Net, self).__init__() + super().__init__() self.size = size self.scale_factor = scale_factor self.mode = mode @@ -42,7 +43,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.upsample( + return paddle.nn.functional.upsample( x=inputs, size=self.size, scale_factor=self.scale_factor, @@ -51,7 +52,6 @@ def forward(self, inputs): align_mode=self.align_mode, data_format=self.data_format, ) - return x @_test_with_pir diff --git a/tests/test_nn_initializer_Uniform.py b/tests/test_nn_initializer_Uniform.py index 66a7080c3..caf69bd3d 100644 --- a/tests/test_nn_initializer_Uniform.py +++ b/tests/test_nn_initializer_Uniform.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self.weight_attr = paddle.framework.ParamAttr( name="linear_weight", initializer=paddle.nn.initializer.Uniform(low=-0.5, high=0.5), @@ -41,8 +40,7 @@ def forward(self, inputs): """ forward """ - x = self._linear(inputs) - return x + return self._linear(inputs) @_test_with_pir diff --git a/tests/test_nonzero.py b/tests/test_nonzero.py index c4c3bd087..c6bb63074 100644 --- a/tests/test_nonzero.py +++ b/tests/test_nonzero.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nonzero(inputs, as_tuple=False) - return x + return paddle.nonzero(inputs, as_tuple=False) @_test_with_pir diff --git a/tests/test_normalize.py b/tests/test_normalize.py index 97efe33fe..ca5a057c2 100644 --- a/tests/test_normalize.py +++ b/tests/test_normalize.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx, randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -22,16 +23,15 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.normalize( + return paddle.nn.functional.normalize( inputs, p=2, axis=1, epsilon=1e-12, name=None ) - return x @_test_with_pir diff --git a/tests/test_numel.py b/tests/test_numel.py index 589e197ff..f9a31281b 100644 --- a/tests/test_numel.py +++ b/tests/test_numel.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.numel(inputs) - return x + return paddle.numel(inputs) @_test_with_pir diff --git a/tests/test_pow.py b/tests/test_pow.py index ce09250b1..dd964e8c2 100644 --- a/tests/test_pow.py +++ b/tests/test_pow.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.pow(inputs, 2) - return x + return paddle.pow(inputs, 2) @_test_with_pir diff --git a/tests/test_prelu.py b/tests/test_prelu.py index c2fa420d1..a047277fd 100644 --- a/tests/test_prelu.py +++ b/tests/test_prelu.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_only_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.prelu(inputs, paddle.to_tensor([0.25])) - return x + return paddle.nn.functional.prelu(inputs, paddle.to_tensor([0.25])) @_test_only_pir diff --git a/tests/test_quantize_linear.py b/tests/test_quantize_linear.py index cc2b53736..1c9a6727e 100755 --- a/tests/test_quantize_linear.py +++ b/tests/test_quantize_linear.py @@ -13,14 +13,16 @@ # limitations under the License. import os -import paddle -import numpy as np -import paddle2onnx import unittest + +import numpy as np import onnxruntime as ort -from paddle.base import unique_name from onnxbase import _test_only_pir +import paddle +import paddle2onnx +from paddle.base import unique_name + def convert_scale_to_paddle(onnx_scale, qmax): return onnx_scale * qmax @@ -49,7 +51,7 @@ def build_static_net(input_shape, quant_axis, scale_shape, qmin, qmax, type): initializer=paddle.nn.initializer.Constant(0.0), stop_gradient=True, ) - quant_out, out_state, out_accum, out_scale = paddle._C_ops.quantize_linear( + quant_out, _out_state, _out_accum, _out_scale = paddle._C_ops.quantize_linear( x, scale, zero_points, diff --git a/tests/test_quantize_model.py b/tests/test_quantize_model.py index 84fa4b2dd..807442e94 100755 --- a/tests/test_quantize_model.py +++ b/tests/test_quantize_model.py @@ -11,19 +11,21 @@ # without warranties or conditions of any kind, either express or implied. # see the license for the specific language governing permissions and # limitations under the license. -import unittest +import functools import os -import time -import sys +import platform import random -import functools +import sys +import time +import unittest + import numpy as np -from paddle.static.io import load_inference_model from PIL import Image + import paddle from paddle.dataset.common import download +from paddle.static.io import load_inference_model from paddle.static.quantization import PostTrainingQuantization -import platform if platform.system() == "Windows": os.system("set no_proxy=bcebos.com") @@ -46,10 +48,9 @@ def resize_short(img, target_size): percent = float(target_size) / min(img.size[0], img.size[1]) - resized_width = int(round(img.size[0] * percent)) - resized_height = int(round(img.size[1] * percent)) - img = img.resize((resized_width, resized_height), Image.LANCZOS) - return img + resized_width = round(img.size[0] * percent) + resized_height = round(img.size[1] * percent) + return img.resize((resized_width, resized_height), Image.LANCZOS) def crop_image(img, target_size, center): @@ -63,8 +64,7 @@ def crop_image(img, target_size, center): h_start = np.random.randint(0, height - size + 1) w_end = w_start + size h_end = h_start + size - img = img.crop((w_start, h_start, w_end, h_end)) - return img + return img.crop((w_start, h_start, w_end, h_end)) def process_image(sample, mode, color_jitter, rotate): @@ -155,7 +155,7 @@ def tearDown(self): def cache_unzipping(self, target_folder, zip_path): if not os.path.exists(target_folder): - cmd = "mkdir {0} && tar xf {1} -C {0}".format(target_folder, zip_path) + cmd = f"mkdir {target_folder} && tar xf {zip_path} -C {target_folder}" os.system(cmd) def download_data(self, data_urls, data_md5s, folder_name, is_model=True): @@ -163,7 +163,7 @@ def download_data(self, data_urls, data_md5s, folder_name, is_model=True): zip_path = "" if os.environ.get("DATASET") == "full": file_names = [] - for i in range(0, len(data_urls)): + for i in range(len(data_urls)): download(data_urls[i], self.int8_download, data_md5s[i]) file_names.append(data_urls[i].split("/")[-1]) @@ -180,7 +180,7 @@ def download_data(self, data_urls, data_md5s, folder_name, is_model=True): file_name = data_urls[0].split("/")[-1] zip_path = os.path.join(self.cache_folder, file_name) - print("Data is downloaded at {0}".format(zip_path)) + print(f"Data is downloaded at {zip_path}") self.cache_unzipping(data_cache_folder, zip_path) return data_cache_folder @@ -205,6 +205,7 @@ def run_program( sess = None if run_onnxruntime: import onnxruntime as rt + import paddle2onnx onnx_model = paddle2onnx.command.c_paddle_to_onnx( @@ -266,12 +267,12 @@ def run_program( cnt += len(data) if (batch_id + 1) % 100 == 0: - print("{0} images,".format(batch_id + 1)) + print(f"{batch_id + 1} images,") sys.stdout.flush() if (batch_id + 1) == iterations: break result = np.mean(np.array(results), axis=0) - print("top1_acc = {}".format(result)) + print(f"top1_acc = {result}") throughput = cnt / np.sum(periods) latency = np.average(periods) acc1 = result @@ -292,7 +293,7 @@ def generate_quantized_model( try: os.system("mkdir " + self.int8_model) except Exception as e: - print("Failed to create {} due to {}".format(self.int8_model, str(e))) + print(f"Failed to create {self.int8_model} due to {e!s}") sys.exit(-1) place = paddle.CPUPlace() @@ -337,9 +338,7 @@ def run_test( model_cache_folder = os.path.join(self.cache_folder, model) print( - "Start FP32 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size - ) + f"Start FP32 inference for {model} on {infer_iterations * batch_size} images ..." ) (fp32_throughput, fp32_latency, fp32_acc1) = self.run_program( model_cache_folder, @@ -350,9 +349,7 @@ def run_test( ) print( - "Start INT8 post training quantization for {0} on {1} images ...".format( - model, sample_iterations * batch_size - ) + f"Start INT8 post training quantization for {model} on {sample_iterations * batch_size} images ..." ) self.generate_quantized_model( model_cache_folder, @@ -367,9 +364,7 @@ def run_test( ) print( - "Start INT8 inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size - ) + f"Start INT8 inference for {model} on {infer_iterations * batch_size} images ..." ) (int8_throughput, int8_latency, int8_acc1) = self.run_program( self.int8_model, @@ -380,9 +375,7 @@ def run_test( ) print( - "Start use ONNXRuntime inference for {0} on {1} images ...".format( - model, infer_iterations * batch_size - ) + f"Start use ONNXRuntime inference for {model} on {infer_iterations * batch_size} images ..." ) (onnx_int8_throughput, onnx_int8_latency, onnx_int8_acc1) = self.run_program( self.int8_model, @@ -393,25 +386,15 @@ def run_test( run_onnxruntime=True, ) - print("---Post training quantization of {} method---".format(algo)) + print(f"---Post training quantization of {algo} method---") print( - "FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.".format( - model, batch_size, fp32_throughput, fp32_latency, fp32_acc1 - ) + f"FP32 {model}: batch_size {batch_size}, throughput {fp32_throughput} images/second, latency {fp32_latency} second, accuracy {fp32_acc1}." ) print( - "INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.".format( - model, batch_size, int8_throughput, int8_latency, int8_acc1 - ) + f"INT8 {model}: batch_size {batch_size}, throughput {int8_throughput} images/second, latency {int8_latency} second, accuracy {int8_acc1}." ) print( - "ONNXRuntime INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n".format( - model, - batch_size, - onnx_int8_throughput, - onnx_int8_latency, - onnx_int8_acc1, - ) + f"ONNXRuntime INT8 {model}: batch_size {batch_size}, throughput {onnx_int8_throughput} images/second, latency {onnx_int8_latency} second, accuracy {onnx_int8_acc1}.\n" ) sys.stdout.flush() diff --git a/tests/test_quantize_model_minist.py b/tests/test_quantize_model_minist.py index 160035b9c..31e4f157f 100755 --- a/tests/test_quantize_model_minist.py +++ b/tests/test_quantize_model_minist.py @@ -11,11 +11,12 @@ # without warranties or conditions of any kind, either express or implied. # see the license for the specific language governing permissions and # limitations under the license. -import unittest import os -import time -import sys import random +import sys +import time +import unittest + import numpy as np import paddle import paddle.fluid as fluid @@ -37,15 +38,15 @@ def setUp(self): try: os.system("mkdir -p " + self.int8_model_path) except Exception as e: - print("Failed to create {} due to {}".format(self.int8_model_path, str(e))) + print(f"Failed to create {self.int8_model_path} due to {e!s}") sys.exit(-1) def tearDown(self): pass def merge_params(self, input_model_path, output_model_path): - import paddle.fluid as fluid import paddle + import paddle.fluid as fluid paddle.enable_static() model_dir = input_model_path @@ -83,6 +84,7 @@ def run_program( sess = None if use_onnxruntime: import onnxruntime as rt + import paddle2onnx new_model_path = model_path @@ -157,7 +159,7 @@ def generate_quantized_model( self, model_path, algo="KL", - quantizable_op_type=["conv2d"], + quantizable_op_type=None, is_full_quantize=False, is_use_cache_file=False, is_optimize_model=False, @@ -166,7 +168,8 @@ def generate_quantized_model( onnx_format=False, skip_tensor_list=None, ): - + if quantizable_op_type is None: + quantizable_op_type = ["conv2d"] place = fluid.CPUPlace() exe = fluid.Executor(place) val_reader = paddle.dataset.mnist.train() @@ -213,27 +216,21 @@ def run_test( origin_model_path = os.path.join(self.cache_folder, model_name) print( - "Start FP32 inference for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size - ) + f"Start FP32 inference for {model_name} on {infer_iterations * batch_size} images ..." ) (fp32_throughput, fp32_latency, fp32_acc1) = self.run_program( origin_model_path, batch_size, infer_iterations ) print( - "Start FP32 inference on onnxruntime for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size - ) + f"Start FP32 inference on onnxruntime for {model_name} on {infer_iterations * batch_size} images ..." ) (onnx_fp32_throughput, onnx_fp32_latency, onnx_fp32_acc1) = self.run_program( origin_model_path, batch_size, infer_iterations, use_onnxruntime=True ) print( - "Start INT8 post training quantization for {0} on {1} images ...".format( - model_name, quant_iterations * batch_size - ) + f"Start INT8 post training quantization for {model_name} on {quant_iterations * batch_size} images ..." ) self.generate_quantized_model( origin_model_path, @@ -249,9 +246,7 @@ def run_test( ) print( - "Start INT8 inference for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size - ) + f"Start INT8 inference for {model_name} on {infer_iterations * batch_size} images ..." ) (int8_throughput, int8_latency, int8_acc1) = self.run_program( self.int8_model_path, @@ -262,9 +257,7 @@ def run_test( ) print( - "Start INT8 inference on onnxruntime for {0} on {1} images ...".format( - model_name, infer_iterations * batch_size - ) + f"Start INT8 inference on onnxruntime for {model_name} on {infer_iterations * batch_size} images ..." ) (onnx_int8_throughput, onnx_int8_latency, onnx_int8_acc1) = self.run_program( self.int8_model_path, @@ -275,34 +268,18 @@ def run_test( use_onnxruntime=True, ) - print("---Post training quantization of {} method---".format(algo)) + print(f"---Post training quantization of {algo} method---") print( - "FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.".format( - model_name, batch_size, fp32_throughput, fp32_latency, fp32_acc1 - ) + f"FP32 {model_name}: batch_size {batch_size}, throughput {fp32_throughput} img/s, latency {fp32_latency} s, acc1 {fp32_acc1}." ) print( - "ONNXRuntime FP32 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.".format( - model_name, - batch_size, - onnx_fp32_throughput, - onnx_fp32_latency, - onnx_fp32_acc1, - ) + f"ONNXRuntime FP32 {model_name}: batch_size {batch_size}, throughput {onnx_fp32_throughput} img/s, latency {onnx_fp32_latency} s, acc1 {onnx_fp32_acc1}." ) print( - "INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.\n".format( - model_name, batch_size, int8_throughput, int8_latency, int8_acc1 - ) + f"INT8 {model_name}: batch_size {batch_size}, throughput {int8_throughput} img/s, latency {int8_latency} s, acc1 {int8_acc1}.\n" ) print( - "ONNXRuntime INT8 {0}: batch_size {1}, throughput {2} img/s, latency {3} s, acc1 {4}.\n".format( - model_name, - batch_size, - onnx_int8_throughput, - onnx_int8_latency, - onnx_int8_acc1, - ) + f"ONNXRuntime INT8 {model_name}: batch_size {batch_size}, throughput {onnx_int8_throughput} img/s, latency {onnx_int8_latency} s, acc1 {onnx_int8_acc1}.\n" ) sys.stdout.flush() diff --git a/tests/test_quantize_model_speedup.py b/tests/test_quantize_model_speedup.py index bcca64c73..25ee3dfef 100755 --- a/tests/test_quantize_model_speedup.py +++ b/tests/test_quantize_model_speedup.py @@ -11,11 +11,12 @@ # without warranties or conditions of any kind, either express or implied. # see the license for the specific language governing permissions and # limitations under the license. -import unittest import os -import time -import sys import random +import sys +import time +import unittest + import numpy as np import paddle from fake_quant import post_quant_fake @@ -40,6 +41,7 @@ def run_program( ): print("test model path:" + model_path) import onnxruntime as rt + import paddle2onnx onnx_model = paddle2onnx.command.c_paddle_to_onnx( @@ -78,8 +80,7 @@ def run_program( period = t2 - t1 periods.append(period) - latency = np.average(periods) - return latency + return np.average(periods) def generate_quantized_model( self, @@ -111,12 +112,12 @@ def run_test( self.model_name = model_name origin_model_path = os.path.join(self.quantize_model_dir, model_name) - print("Start FP32 inference for {0} ...".format(model_name)) + print(f"Start FP32 inference for {model_name} ...") fp32_latency = self.run_program( origin_model_path, model_filename, params_filename, threads_num ) - print("Start INT8 post training quantization for {0} ...".format(model_name)) + print(f"Start INT8 post training quantization for {model_name} ...") quantize_model_path = os.path.join( self.quantize_model_dir, model_name + "_quantized" ) @@ -124,7 +125,7 @@ def run_test( origin_model_path, quantize_model_path, model_filename, params_filename ) - print("Start INT8 inference for {0} ...".format(model_name)) + print(f"Start INT8 inference for {model_name} ...") if ".pdmodel" in model_filename: int8_latency = self.run_program( quantize_model_path, model_filename, params_filename, threads_num @@ -138,8 +139,8 @@ def run_test( ) print("---Post training quantization---") - print("FP32 lentency {0}: latency {1} s.".format(model_name, fp32_latency)) - print("INT8 {0}: latency {1} s.\n".format(model_name, int8_latency)) + print(f"FP32 lentency {model_name}: latency {fp32_latency} s.") + print(f"INT8 {model_name}: latency {int8_latency} s.\n") sys.stdout.flush() latency_diff = int8_latency - fp32_latency diff --git a/tests/test_reciprocal.py b/tests/test_reciprocal.py index cdf5770ee..fc8001121 100644 --- a/tests/test_reciprocal.py +++ b/tests/test_reciprocal.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.reciprocal(inputs) - return x + return paddle.reciprocal(inputs) @_test_with_pir diff --git a/tests/test_register_buffer.py b/tests/test_register_buffer.py index 68fa1f5de..607788ff7 100644 --- a/tests/test_register_buffer.py +++ b/tests/test_register_buffer.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() self.register_buffer( "my_buffer", paddle.to_tensor(10, dtype="float32"), diff --git a/tests/test_relu.py b/tests/test_relu.py index f64eeb8ab..4575a29b4 100644 --- a/tests/test_relu.py +++ b/tests/test_relu.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.relu(inputs) - return x + return paddle.nn.functional.relu(inputs) @_test_with_pir diff --git a/tests/test_relu6.py b/tests/test_relu6.py index d81233bb0..afac67927 100644 --- a/tests/test_relu6.py +++ b/tests/test_relu6.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.relu6(inputs) - return x + return paddle.nn.functional.relu6(inputs) @_test_with_pir diff --git a/tests/test_repeat_interleave.py b/tests/test_repeat_interleave.py index 1e4d48cda..15e978157 100644 --- a/tests/test_repeat_interleave.py +++ b/tests/test_repeat_interleave.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_only_pir class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ @@ -31,8 +31,7 @@ def forward(self, inputs): """ # repeats = paddle.to_tensor([3,2,1], dtype='int32') - x = paddle.repeat_interleave(inputs, repeats=2, axis=0) - return x + return paddle.repeat_interleave(inputs, repeats=2, axis=0) @_test_only_pir @@ -58,7 +57,7 @@ class Net2(paddle.nn.Layer): """ def __init__(self): - super(Net2, self).__init__() + super().__init__() def forward(self, inputs): """ @@ -66,8 +65,7 @@ def forward(self, inputs): """ repeats = paddle.to_tensor([3, 2, 1], dtype="int32") - x = paddle.repeat_interleave(inputs, repeats=repeats, axis=1) - return x + return paddle.repeat_interleave(inputs, repeats=repeats, axis=1) @_test_only_pir diff --git a/tests/test_reshape.py b/tests/test_reshape.py index e06fdcefb..93848dda4 100644 --- a/tests/test_reshape.py +++ b/tests/test_reshape.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.reshape(inputs, [3, -1, 2]) - return x + return paddle.reshape(inputs, [3, -1, 2]) def test_reshape_9(): diff --git a/tests/test_resnet_fp16.py b/tests/test_resnet_fp16.py index 69d7737d0..99139683c 100644 --- a/tests/test_resnet_fp16.py +++ b/tests/test_resnet_fp16.py @@ -13,12 +13,13 @@ # limitations under the License. import os + import numpy as np import onnxruntime import paddle import paddle2onnx -from paddle.inference import PrecisionType, PlaceType, convert_to_mixed_precision +from paddle.inference import PlaceType, PrecisionType, convert_to_mixed_precision def test_resnet_fp16_convert(): diff --git a/tests/test_roi_align.py b/tests/test_roi_align.py index 8b50ab568..3fd889f2b 100644 --- a/tests/test_roi_align.py +++ b/tests/test_roi_align.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_only_pir class BaseNet(paddle.nn.Layer): def __init__(self): - super(BaseNet, self).__init__() + super().__init__() self.output_size = 3 self.spatial_scale = 1.0 self.sampling_ratio = -1 diff --git a/tests/test_roll.py b/tests/test_roll.py index 571c15898..0e7531839 100644 --- a/tests/test_roll.py +++ b/tests/test_roll.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.roll(inputs, 1) - return x + return paddle.roll(inputs, 1) @_test_with_pir diff --git a/tests/test_round.py b/tests/test_round.py index cde4b1bb6..cad181f9e 100644 --- a/tests/test_round.py +++ b/tests/test_round.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.round(inputs) - return x + return paddle.round(inputs) @_test_with_pir diff --git a/tests/test_rsqrt.py b/tests/test_rsqrt.py index 07efc45cd..4aa9ecef8 100644 --- a/tests/test_rsqrt.py +++ b/tests/test_rsqrt.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.rsqrt(inputs) - return x + return paddle.rsqrt(inputs) def test_rsqrt_10(): diff --git a/tests/test_scatter.py b/tests/test_scatter.py index c262a7dd0..26a894e63 100644 --- a/tests/test_scatter.py +++ b/tests/test_scatter.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx +import paddle + class Net(paddle.nn.Layer): """ @@ -22,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, overwrite=True): - super(Net, self).__init__() + super().__init__() self.overwrite = overwrite def forward(self, inputs, _index, _updates): """ forward """ - x = paddle.scatter(inputs, _index, _updates, overwrite=self.overwrite) - return x + return paddle.scatter(inputs, _index, _updates, overwrite=self.overwrite) def test_scatter_11(): diff --git a/tests/test_scatter_nd_add.py b/tests/test_scatter_nd_add.py index 503ebd737..33b8cd0b9 100644 --- a/tests/test_scatter_nd_add.py +++ b/tests/test_scatter_nd_add.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx, _test_with_pir +import paddle + class Net(paddle.nn.Layer): """ @@ -22,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, _index, _updates): """ forward """ - x = paddle.scatter_nd_add(inputs, _index, _updates) - return x + return paddle.scatter_nd_add(inputs, _index, _updates) @_test_with_pir diff --git a/tests/test_selu.py b/tests/test_selu.py index 612bc82dd..798032873 100644 --- a/tests/test_selu.py +++ b/tests/test_selu.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -27,7 +27,7 @@ def __init__( alpha=1.6732632423543772848170429916717, scale=1.0507009873554804934193349852946, ): - super(Net, self).__init__() + super().__init__() self.alpha = alpha self.scale = scale @@ -35,8 +35,7 @@ def forward(self, inputs): """ forward """ - x = paddle.nn.functional.selu(inputs, alpha=self.alpha, scale=self.scale) - return x + return paddle.nn.functional.selu(inputs, alpha=self.alpha, scale=self.scale) def test_nn_functional_selu_10(): diff --git a/tests/test_set_value.py b/tests/test_set_value.py index a0f88edba..c820b3029 100644 --- a/tests/test_set_value.py +++ b/tests/test_set_value.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_only_pir class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self, config): - super(Net, self).__init__() + super().__init__() self.config = config def forward(self, input): diff --git a/tests/test_shape.py b/tests/test_shape.py index 8aaac5f79..afb39466b 100644 --- a/tests/test_shape.py +++ b/tests/test_shape.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.shape(inputs).astype("int32") - return x + return paddle.shape(inputs).astype("int32") @_test_with_pir diff --git a/tests/test_share_data.py b/tests/test_share_data.py index 01b799caa..da70c089a 100644 --- a/tests/test_share_data.py +++ b/tests/test_share_data.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_only_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle._C_ops.share_data(inputs) - return x + return paddle._C_ops.share_data(inputs) @_test_only_pir diff --git a/tests/test_sigmoid.py b/tests/test_sigmoid.py index 62bebc98c..0ba12bb74 100644 --- a/tests/test_sigmoid.py +++ b/tests/test_sigmoid.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.sigmoid(inputs) - return x + return paddle.nn.functional.sigmoid(inputs) @_test_with_pir diff --git a/tests/test_sign.py b/tests/test_sign.py index 7a56c0427..df1ad42a1 100644 --- a/tests/test_sign.py +++ b/tests/test_sign.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.sign(inputs) - return x + return paddle.sign(inputs) def test_sign_9(): diff --git a/tests/test_sin.py b/tests/test_sin.py index 3a3683262..e4dd47f9a 100644 --- a/tests/test_sin.py +++ b/tests/test_sin.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.sin(inputs) - return x + return paddle.sin(inputs) @_test_with_pir diff --git a/tests/test_sinh.py b/tests/test_sinh.py index 5ed045008..307b12c14 100644 --- a/tests/test_sinh.py +++ b/tests/test_sinh.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.sinh(inputs) - return x + return paddle.sinh(inputs) @_test_with_pir diff --git a/tests/test_slice.py b/tests/test_slice.py index e5fe6a2b1..a11bcb907 100644 --- a/tests/test_slice.py +++ b/tests/test_slice.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.slice(inputs, axes=[0, 1], starts=[1, 0], ends=[4, 7]) - return x + return paddle.slice(inputs, axes=[0, 1], starts=[1, 0], ends=[4, 7]) # @_test_with_pir diff --git a/tests/test_softplus.py b/tests/test_softplus.py index 453b7ab5b..37507e677 100644 --- a/tests/test_softplus.py +++ b/tests/test_softplus.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.softplus(inputs) - return x + return paddle.nn.functional.softplus(inputs) @_test_with_pir diff --git a/tests/test_softshrink.py b/tests/test_softshrink.py index ad41351f6..6efc57ac7 100644 --- a/tests/test_softshrink.py +++ b/tests/test_softshrink.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle import paddle.nn as nn -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -25,15 +24,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, threshold=0.5): - super(Net, self).__init__() + super().__init__() self.threshold = threshold def forward(self, inputs): """ forward """ - x = nn.functional.softshrink(inputs, threshold=self.threshold) - return x + return nn.functional.softshrink(inputs, threshold=self.threshold) @_test_with_pir diff --git a/tests/test_split.py b/tests/test_split.py index f9aab469b..18369b88b 100644 --- a/tests/test_split.py +++ b/tests/test_split.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.split(inputs, num_or_sections=5, axis=1) - return x + return paddle.split(inputs, num_or_sections=5, axis=1) class Net2(paddle.nn.Layer): @@ -40,14 +38,13 @@ class Net2(paddle.nn.Layer): """ def __init__(self): - super(Net2, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.split(inputs, num_or_sections=[2, 3, 5], axis=-1) - return x + return paddle.split(inputs, num_or_sections=[2, 3, 5], axis=-1) # @_test_with_pir diff --git a/tests/test_square.py b/tests/test_square.py index dbf1591b7..e16c9adf0 100644 --- a/tests/test_square.py +++ b/tests/test_square.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.square(inputs) - return x + return paddle.square(inputs) @_test_with_pir diff --git a/tests/test_squeeze.py b/tests/test_squeeze.py index d33f91300..41e301fad 100644 --- a/tests/test_squeeze.py +++ b/tests/test_squeeze.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, axis=0): - super(Net, self).__init__() + super().__init__() self.axis = axis def forward(self, inputs): """ forward """ - x = paddle.squeeze(inputs, axis=self.axis) - return x + return paddle.squeeze(inputs, axis=self.axis) def test_squeeze_9(): diff --git a/tests/test_stack.py b/tests/test_stack.py index 7acf78d35..c9823e980 100644 --- a/tests/test_stack.py +++ b/tests/test_stack.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, inputs_): """ forward """ - x = paddle.stack([inputs, inputs_]) - return x + return paddle.stack([inputs, inputs_]) @_test_with_pir diff --git a/tests/test_sum.py b/tests/test_sum.py index 9fe62a56d..26f31c65e 100644 --- a/tests/test_sum.py +++ b/tests/test_sum.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.sum(inputs, axis=0) - return x + return paddle.sum(inputs, axis=0) def test_sum_13_18(): diff --git a/tests/test_swish.py b/tests/test_swish.py index 8411e942b..d50edfe86 100644 --- a/tests/test_swish.py +++ b/tests/test_swish.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.nn.functional.swish(inputs) - return x + return paddle.nn.functional.swish(inputs) @_test_with_pir diff --git a/tests/test_tan.py b/tests/test_tan.py index eba01623a..7e0174efb 100644 --- a/tests/test_tan.py +++ b/tests/test_tan.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.tan(inputs) - return x + return paddle.tan(inputs) @_test_with_pir diff --git a/tests/test_tanh.py b/tests/test_tanh.py index 5a2f40703..5e179c3a0 100644 --- a/tests/test_tanh.py +++ b/tests/test_tanh.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool, _test_with_pir class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.tanh(inputs) - return x + return paddle.tanh(inputs) @_test_with_pir diff --git a/tests/test_tensor_array.py b/tests/test_tensor_array.py index b15b0ac99..afbcb5860 100644 --- a/tests/test_tensor_array.py +++ b/tests/test_tensor_array.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_only_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_only_pir class BaseNet(paddle.nn.Layer): def __init__(self): - super(BaseNet, self).__init__() + super().__init__() def forward(self, input): arr = paddle.tensor.create_array(dtype="float32") diff --git a/tests/test_tile.py b/tests/test_tile.py index ffc2b1090..322e4f149 100644 --- a/tests/test_tile.py +++ b/tests/test_tile.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.tile(inputs, repeat_times=[2, 1]) - return x + return paddle.tile(inputs, repeat_times=[2, 1]) @_test_with_pir diff --git a/tests/test_topk.py b/tests/test_topk.py index efaebdc72..9018d3827 100644 --- a/tests/test_topk.py +++ b/tests/test_topk.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -23,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ diff --git a/tests/test_transpose.py b/tests/test_transpose.py index 10fc8aee8..1910fe2bd 100644 --- a/tests/test_transpose.py +++ b/tests/test_transpose.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.transpose(inputs, perm=[1, 0, 2]) - return x + return paddle.transpose(inputs, perm=[1, 0, 2]) @_test_with_pir diff --git a/tests/test_tril_triu.py b/tests/test_tril_triu.py index e7394c9d7..89023c911 100644 --- a/tests/test_tril_triu.py +++ b/tests/test_tril_triu.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ @@ -59,7 +58,7 @@ class Net2(paddle.nn.Layer): """ def __init__(self): - super(Net2, self).__init__() + super().__init__() def forward(self, inputs): """ @@ -94,7 +93,7 @@ class Net3(paddle.nn.Layer): """ def __init__(self): - super(Net3, self).__init__() + super().__init__() def forward(self, inputs): """ @@ -129,7 +128,7 @@ class Net4(paddle.nn.Layer): """ def __init__(self): - super(Net4, self).__init__() + super().__init__() def forward(self, inputs): """ diff --git a/tests/test_unbind.py b/tests/test_unbind.py index 3912181f0..adb3007c4 100644 --- a/tests/test_unbind.py +++ b/tests/test_unbind.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle from onnxbase import APIOnnx, _test_with_pir +import paddle + class Net(paddle.nn.Layer): """ @@ -22,7 +23,7 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, axis=1): """ diff --git a/tests/test_unfold.py b/tests/test_unfold.py index c6783d6aa..dcf7c86b1 100644 --- a/tests/test_unfold.py +++ b/tests/test_unfold.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool import paddle.nn.functional as F -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -25,14 +24,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, x): """ forward """ - x = F.unfold(x, [3, 3], 1, 1, 1) - return x + return F.unfold(x, [3, 3], 1, 1, 1) @_test_with_pir diff --git a/tests/test_unique.py b/tests/test_unique.py index dad9665e6..280f0f9f8 100644 --- a/tests/test_unique.py +++ b/tests/test_unique.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,16 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, axis=None): - super(Net, self).__init__() + super().__init__() self.axis = axis def forward(self, inputs): """ forward """ - x = paddle.unique(inputs, axis=self.axis) - - return x + return paddle.unique(inputs, axis=self.axis) @_test_with_pir @@ -95,7 +92,7 @@ class Net_mult_2(paddle.nn.Layer): def __init__( self, return_index=False, return_inverse=False, return_counts=False, axis=None ): - super(Net_mult_2, self).__init__() + super().__init__() self.return_index = return_index self.return_inverse = return_inverse self.return_counts = return_counts @@ -175,7 +172,7 @@ class Net_mult_3(paddle.nn.Layer): def __init__( self, return_index=False, return_inverse=False, return_counts=False, axis=None ): - super(Net_mult_3, self).__init__() + super().__init__() self.return_index = return_index self.return_inverse = return_inverse self.return_counts = return_counts @@ -253,7 +250,7 @@ class Net_mult_all(paddle.nn.Layer): """ def __init__(self, axis=None): - super(Net_mult_all, self).__init__() + super().__init__() self.axis = axis def forward(self, inputs): diff --git a/tests/test_unsqueeze.py b/tests/test_unsqueeze.py index 4d5a881ff..28102ab62 100755 --- a/tests/test_unsqueeze.py +++ b/tests/test_unsqueeze.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,15 +23,14 @@ class Net(paddle.nn.Layer): """ def __init__(self, axis=0): - super(Net, self).__init__() + super().__init__() self.axis = axis def forward(self, inputs): """ forward """ - x = paddle.unsqueeze(inputs, axis=self.axis) - return x + return paddle.unsqueeze(inputs, axis=self.axis) @_test_with_pir diff --git a/tests/test_where.py b/tests/test_where.py index 92fd1c1e2..f1f36db15 100644 --- a/tests/test_where.py +++ b/tests/test_where.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool -from onnxbase import _test_with_pir class Net(paddle.nn.Layer): @@ -24,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs, _inputs): """ forward """ - x = paddle.where(inputs < _inputs, inputs, _inputs) - return x + return paddle.where(inputs < _inputs, inputs, _inputs) @_test_with_pir diff --git a/tests/test_while.py b/tests/test_while.py index c4e83b258..3ec214c1d 100644 --- a/tests/test_while.py +++ b/tests/test_while.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, _test_with_pir + import paddle -from onnxbase import APIOnnx -from onnxbase import _test_with_pir class BaseNet1(paddle.nn.Layer): def __init__(self): - super(BaseNet1, self).__init__() + super().__init__() def forward(self, inputs): i = 0 @@ -40,7 +40,7 @@ def test_while_1(): class BaseNet2(paddle.nn.Layer): def __init__(self): - super(BaseNet2, self).__init__() + super().__init__() def forward(self, i, inputs): while i <= 3: @@ -60,7 +60,7 @@ def test_while_2(): class BaseNet3(paddle.nn.Layer): def __init__(self): - super(BaseNet3, self).__init__() + super().__init__() def forward(self, i, j, k): while i <= 3: @@ -83,7 +83,7 @@ def test_while_3(): class BaseNet4(paddle.nn.Layer): def __init__(self): - super(BaseNet4, self).__init__() + super().__init__() def forward(self, i, j, k): while i <= 3: @@ -108,7 +108,7 @@ def test_while_4(): class BaseNet5(paddle.nn.Layer): def __init__(self): - super(BaseNet5, self).__init__() + super().__init__() def forward(self, i, j, k): while i <= 3: diff --git a/tests/test_zeros_like.py b/tests/test_zeros_like.py index 35671ef54..f0a8db4cb 100644 --- a/tests/test_zeros_like.py +++ b/tests/test_zeros_like.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from onnxbase import APIOnnx, randtool + import paddle -from onnxbase import APIOnnx -from onnxbase import randtool class Net(paddle.nn.Layer): @@ -23,14 +23,13 @@ class Net(paddle.nn.Layer): """ def __init__(self): - super(Net, self).__init__() + super().__init__() def forward(self, inputs): """ forward """ - x = paddle.zeros_like(inputs, dtype=None, name=None) - return x + return paddle.zeros_like(inputs, dtype=None, name=None) def test_zeros_like_base(): diff --git a/third_party/onnx b/third_party/onnx index 990217f04..d3f6b795a 160000 --- a/third_party/onnx +++ b/third_party/onnx @@ -1 +1 @@ -Subproject commit 990217f043af7222348ca8f0301e17fa7b841781 +Subproject commit d3f6b795aedb48eaecc881bf5e8f5dd6efbe25b3 diff --git a/third_party/optimizer b/third_party/optimizer index b3a461186..94d238d96 160000 --- a/third_party/optimizer +++ b/third_party/optimizer @@ -1 +1 @@ -Subproject commit b3a4611861734e0731bbcc2bed1f080139e4988b +Subproject commit 94d238d96e3fb3a7ba34f03c284b9ad3516163be diff --git a/third_party/pybind11 b/third_party/pybind11 index 3e9dfa286..45fab4087 160000 --- a/third_party/pybind11 +++ b/third_party/pybind11 @@ -1 +1 @@ -Subproject commit 3e9dfa2866941655c56877882565e7577de6fc7b +Subproject commit 45fab4087eaaff234227a10cf7845e8b07f28a98 diff --git a/tools/codestyle/clang-tidy.py b/tools/codestyle/clang-tidy.py index 3d7fd7c6b..4e82a15f8 100644 --- a/tools/codestyle/clang-tidy.py +++ b/tools/codestyle/clang-tidy.py @@ -36,7 +36,6 @@ http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html """ - import argparse import glob import json @@ -83,7 +82,7 @@ def make_absolute(f, directory): def analysis_gitignore(path, filename=".gitignore"): """Analysis gitignore file and return ignore file list""" - with open(path + "/" + filename, "r") as f: + with open(path + "/" + filename) as f: lines = f.readlines() ignore_file_list = [] for line in lines: @@ -171,7 +170,7 @@ def merge_replacement_files(tmpdir, mergefile): mergekey = "Diagnostics" merged = [] for replacefile in glob.iglob(os.path.join(tmpdir, "*.yaml")): - content = yaml.safe_load(open(replacefile, "r")) + content = yaml.safe_load(open(replacefile)) if not content: continue # Skip empty files. merged.extend(content.get(mergekey, [])) @@ -268,7 +267,7 @@ def main(): parser.add_argument( "-checks", default=None, - help="checks filter, when not specified, use clang-tidy " "default", + help="checks filter, when not specified, use clang-tidy default", ) parser.add_argument( "-config", @@ -313,12 +312,12 @@ def main(): parser.add_argument( "-format", action="store_true", - help="Reformat code " "after applying fixes", + help="Reformat code after applying fixes", ) parser.add_argument( "-style", default="file", - help="The style of reformat " "code after applying fixes", + help="The style of reformat code after applying fixes", ) parser.add_argument( "-p", @@ -330,14 +329,14 @@ def main(): dest="extra_arg", action="append", default=[], - help="Additional argument to append to the compiler " "command line.", + help="Additional argument to append to the compiler command line.", ) parser.add_argument( "-extra-arg-before", dest="extra_arg_before", action="append", default=[], - help="Additional argument to prepend to the compiler " "command line.", + help="Additional argument to prepend to the compiler command line.", ) parser.add_argument( "-quiet", action="store_true", help="Run clang-tidy in quiet mode" diff --git a/tools/codestyle/copyright.py b/tools/codestyle/copyright.py index d3f7e2b0d..675a286e1 100644 --- a/tools/codestyle/copyright.py +++ b/tools/codestyle/copyright.py @@ -67,20 +67,16 @@ def _get_comment_mark(path): def _check_copyright(path): head = [] try: - with open(path, "r", encoding="utf-8") as f: + with open(path, encoding="utf-8") as f: head = [next(f) for x in range(4)] except StopIteration: pass - for idx, line in enumerate(head): - if RE_COPYRIGHT.search(line) is not None: - return True - - return False + return any(RE_COPYRIGHT.search(line) is not None for idx, line in enumerate(head)) def generate_copyright(path, comment_mark): - original_contents = open(path, "r", encoding="utf-8").readlines() + original_contents = open(path, encoding="utf-8").readlines() head = original_contents[0:4] insert_line_no = 0 diff --git a/tools/onnx/onnx_infer_shape.py b/tools/onnx/onnx_infer_shape.py index 632ab6163..1b4878dcf 100755 --- a/tools/onnx/onnx_infer_shape.py +++ b/tools/onnx/onnx_infer_shape.py @@ -14,11 +14,11 @@ import argparse import logging + import numpy as np import onnx -from onnx import helper, numpy_helper, shape_inference import sympy - +from onnx import helper, numpy_helper, shape_inference from packaging import version assert version.parse(onnx.__version__) >= version.parse("1.8.0") @@ -51,8 +51,7 @@ def get_shape_from_type_proto(type_proto): assert not is_sequence(type_proto) if type_proto.tensor_type.HasField("shape"): return [get_dim_from_proto(d) for d in type_proto.tensor_type.shape.dim] - else: - return None # note no shape is different from shape without dim (scalar) + return None # note no shape is different from shape without dim (scalar) def get_shape_from_value_info(vi): @@ -60,12 +59,10 @@ def get_shape_from_value_info(vi): if cls_type is None: return None if is_sequence(vi.type): - if "tensor_type" == vi.type.sequence_type.elem_type.WhichOneof("value"): + if vi.type.sequence_type.elem_type.WhichOneof("value") == "tensor_type": return get_shape_from_type_proto(vi.type.sequence_type.elem_type) - else: - return None - else: - return get_shape_from_type_proto(vi.type) + return None + return get_shape_from_type_proto(vi.type) def make_named_value_info(name): @@ -107,21 +104,19 @@ def as_scalar(x): if isinstance(x, list): assert len(x) == 1 return x[0] - elif isinstance(x, np.ndarray): + if isinstance(x, np.ndarray): return x.item() - else: - return x + return x def as_list(x, keep_none): if isinstance(x, list): return x - elif isinstance(x, np.ndarray): + if isinstance(x, np.ndarray): return list(x) - elif keep_none and x is None: + if keep_none and x is None: return None - else: - return [x] + return [x] def sympy_reduce_product(x): @@ -236,10 +231,8 @@ def __init__(self, int_max, auto_merge, guess_output_rank, verbose, prefix=""): def _add_suggested_merge(self, symbols, apply=False): assert all( - [ - (isinstance(s, str) and s in self.symbolic_dims_) or is_literal(s) - for s in symbols - ] + (isinstance(s, str) and s in self.symbolic_dims_) or is_literal(s) + for s in symbols ) symbols = set(symbols) for k, v in self.suggested_merge_.items(): @@ -305,25 +298,18 @@ def _apply_suggested_merge(self, graph_input_only=False): def _preprocess(self, in_mp): self.out_mp_ = onnx.ModelProto() self.out_mp_.CopyFrom(in_mp) - self.graph_inputs_ = dict([(i.name, i) for i in list(self.out_mp_.graph.input)]) - self.initializers_ = dict([(i.name, i) for i in self.out_mp_.graph.initializer]) - self.known_vi_ = dict([(i.name, i) for i in list(self.out_mp_.graph.input)]) + self.graph_inputs_ = {i.name: i for i in list(self.out_mp_.graph.input)} + self.initializers_ = {i.name: i for i in self.out_mp_.graph.initializer} + self.known_vi_ = {i.name: i for i in list(self.out_mp_.graph.input)} self.known_vi_.update( - dict( - [ - ( - i.name, - helper.make_tensor_value_info( - i.name, i.data_type, list(i.dims) - ), - ) - for i in self.out_mp_.graph.initializer - ] - ) + { + i.name: helper.make_tensor_value_info(i.name, i.data_type, list(i.dims)) + for i in self.out_mp_.graph.initializer + } ) def _merge_symbols(self, dims): - if not all([isinstance(d, str) for d in dims]): + if not all(isinstance(d, str) for d in dims): if self.auto_merge_: unique_dims = list(set(dims)) is_int = [is_literal(d) for d in unique_dims] @@ -334,33 +320,23 @@ def _merge_symbols(self, dims): int_dim = is_int.index(1) if self.verbose_ > 0: logger.debug( - "dim {} has been merged with value {}".format( - unique_dims[:int_dim] + unique_dims[int_dim + 1 :], - unique_dims[int_dim], - ) + f"dim {unique_dims[:int_dim] + unique_dims[int_dim + 1 :]} has been merged with value {unique_dims[int_dim]}" ) self._check_merged_dims(unique_dims, allow_broadcast=False) return unique_dims[int_dim] - else: - if self.verbose_ > 0: - logger.debug( - "dim {} has been mergd with dim {}".format( - unique_dims[1:], unique_dims[0] - ) - ) - return dims[0] - else: - return None - if all([d == dims[0] for d in dims]): + if self.verbose_ > 0: + logger.debug( + f"dim {unique_dims[1:]} has been mergd with dim {unique_dims[0]}" + ) + return dims[0] + return None + if all(d == dims[0] for d in dims): return dims[0] - merged = [ - self.suggested_merge_[d] if d in self.suggested_merge_ else d for d in dims - ] - if all([d == merged[0] for d in merged]): + merged = [self.suggested_merge_.get(d, d) for d in dims] + if all(d == merged[0] for d in merged): assert merged[0] in self.symbolic_dims_ return merged[0] - else: - return None + return None # broadcast from right to left, and merge symbolic dims if needed def _broadcast_shapes(self, shape1, shape2): @@ -390,7 +366,7 @@ def _broadcast_shapes(self, shape1, shape2): + " " + str(dim2) ) - new_shape = [new_dim] + new_shape + new_shape = [new_dim, *new_shape] return new_shape def _get_shape(self, node, idx): @@ -398,9 +374,8 @@ def _get_shape(self, node, idx): if name in self.known_vi_: vi = self.known_vi_[name] return get_shape_from_value_info(vi) - else: - assert name in self.initializers_ - return list(self.initializers_[name].dims) + assert name in self.initializers_ + return list(self.initializers_[name].dims) def _get_shape_rank(self, node, idx): return len(self._get_shape(node, idx)) @@ -477,7 +452,7 @@ def _onnx_infer_single_node(self, node): # (2) opset version >= 9. In older version, initializer is required in graph input by onnx spec. # (3) The initializer is not in graph input. The means the node input is "constant" in inference. initializers = [] - if (get_opset(self.out_mp_) >= 9) and node.op_type in ["Unsqueeze"]: + if (get_opset(self.out_mp_) >= 9) and node.op_type == "Unsqueeze": initializers = [ self.initializers_[name] for name in node.input @@ -511,20 +486,18 @@ def _onnx_infer_subgraph( ): if self.verbose_ > 2: logger.debug( - "Inferencing subgraph of node {} with output({}...): {}".format( - node.name, node.output[0], node.op_type - ) + f"Inferencing subgraph of node {node.name} with output({node.output[0]}...): {node.op_type}" ) # node inputs are not passed directly to the subgraph # it's up to the node dispatcher to prepare subgraph input # for example, with Scan/Loop, subgraph input shape would be trimmed from node input shape # besides, inputs in subgraph could shadow implicit inputs - subgraph_inputs = set( - [i.name for i in list(subgraph.initializer) + list(subgraph.input)] - ) - subgraph_implicit_input = set( - [name for name in self.known_vi_.keys() if name not in subgraph_inputs] - ) + subgraph_inputs = { + i.name for i in list(subgraph.initializer) + list(subgraph.input) + } + subgraph_implicit_input = { + name for name in self.known_vi_ if name not in subgraph_inputs + } tmp_graph = helper.make_graph( list(subgraph.node), "tmp", @@ -574,15 +547,13 @@ def _onnx_infer_subgraph( get_shape_from_value_info(o) for o in symbolic_shape_inference.out_mp_.graph.output ] - subgraph_new_symbolic_dims = set( - [ - d - for s in subgraph_shapes - if s - for d in s - if isinstance(d, str) and d not in self.symbolic_dims_ - ] - ) + subgraph_new_symbolic_dims = { + d + for s in subgraph_shapes + if s + for d in s + if isinstance(d, str) and d not in self.symbolic_dims_ + } new_dims = {} for d in subgraph_new_symbolic_dims: assert d in symbolic_shape_inference.symbolic_dims_ @@ -592,7 +563,7 @@ def _onnx_infer_subgraph( def _get_int_values(self, node, broadcast=False): values = [self._try_get_value(node, i) for i in range(len(node.input))] - if all([v is not None for v in values]): + if all(v is not None for v in values): # some shape compute is in floating point, cast to int for sympy for i, v in enumerate(values): if not isinstance(v, np.ndarray): @@ -624,11 +595,13 @@ def _get_int_values(self, node, broadcast=False): def _compute_on_sympy_data(self, node, op_func): assert len(node.output) == 1 values = self._get_int_values(node, broadcast=True) - if all([v is not None for v in values]): + if all(v is not None for v in values): is_list = [isinstance(v, list) for v in values] as_list = any(is_list) if as_list: - self.sympy_data_[node.output[0]] = [op_func(vs) for vs in zip(*values)] + self.sympy_data_[node.output[0]] = [ + op_func(vs) for vs in zip(*values, strict=False) + ] else: self.sympy_data_[node.output[0]] = op_func(values) @@ -651,7 +624,7 @@ def _pass_on_shape_and_type(self, node): ) def _new_symbolic_dim(self, prefix, dim): - new_dim = "{}_d{}".format(prefix, dim) + new_dim = f"{prefix}_d{dim}" if new_dim in self.suggested_merge_: v = self.suggested_merge_[new_dim] new_symbolic_dim = sympy.Integer(int(v)) if is_literal(v) else v @@ -662,12 +635,7 @@ def _new_symbolic_dim(self, prefix, dim): def _new_symbolic_dim_from_output(self, node, out_idx=0, dim=0): return self._new_symbolic_dim( - "{}{}_{}_o{}_".format( - node.op_type, - self.prefix_, - list(self.out_mp_.graph.node).index(node), - out_idx, - ), + f"{node.op_type}{self.prefix_}_{list(self.out_mp_.graph.node).index(node)}_o{out_idx}_", dim, ) @@ -703,7 +671,7 @@ def _compute_conv_pool_shape(self, node): dilations = get_attribute(node, "dilations", [1] * rank) strides = get_attribute(node, "strides", [1] * rank) effective_kernel_shape = [ - (k - 1) * d + 1 for k, d in zip(kernel_shape, dilations) + (k - 1) * d + 1 for k, d in zip(kernel_shape, dilations, strict=False) ] pads = get_attribute(node, "pads") if pads is None: @@ -712,17 +680,19 @@ def _compute_conv_pool_shape(self, node): if auto_pad != "VALID" and auto_pad != "NOTSET": try: residual = [ - sympy.Mod(d, s) for d, s in zip(sympy_shape[-rank:], strides) + sympy.Mod(d, s) + for d, s in zip(sympy_shape[-rank:], strides, strict=False) ] total_pads = [ max(0, (k - s) if r == 0 else (k - r)) - for k, s, r in zip(effective_kernel_shape, strides, residual) + for k, s, r in zip( + effective_kernel_shape, strides, residual, strict=False + ) ] - except ( - TypeError - ): # sympy may throw TypeError: cannot determine truth value of Relational + except TypeError: # sympy may throw TypeError: cannot determine truth value of Relational total_pads = [ - max(0, (k - s)) for k, s in zip(effective_kernel_shape, strides) + max(0, (k - s)) + for k, s in zip(effective_kernel_shape, strides, strict=False) ] # assuming no residual if sympy throws error elif auto_pad == "VALID": total_pads = [] @@ -730,7 +700,9 @@ def _compute_conv_pool_shape(self, node): total_pads = [0] * rank else: assert len(pads) == 2 * rank - total_pads = [p1 + p2 for p1, p2 in zip(pads[:rank], pads[rank:])] + total_pads = [ + p1 + p2 for p1, p2 in zip(pads[:rank], pads[rank:], strict=False) + ] ceil_mode = get_attribute(node, "ceil_mode", 0) for i in range(rank): @@ -751,7 +723,7 @@ def _compute_conv_pool_shape(self, node): def _check_merged_dims(self, dims, allow_broadcast=True): if allow_broadcast: dims = [d for d in dims if not (is_literal(d) and int(d) <= 1)] - if not all([d == dims[0] for d in dims]): + if not all(d == dims[0] for d in dims): self._add_suggested_merge(dims, apply=True) def _compute_matmul_shape(self, node, output_dtype=None): @@ -766,18 +738,18 @@ def _compute_matmul_shape(self, node, output_dtype=None): new_shape = [] elif lhs_rank == 1: rhs_reduce_dim = -2 - new_shape = rhs_shape[:rhs_reduce_dim] + [rhs_shape[-1]] + new_shape = [*rhs_shape[:rhs_reduce_dim], rhs_shape[-1]] elif rhs_rank == 1: lhs_reduce_dim = -1 new_shape = lhs_shape[:lhs_reduce_dim] else: lhs_reduce_dim = -1 rhs_reduce_dim = -2 - new_shape = ( - self._broadcast_shapes(lhs_shape[:-2], rhs_shape[:-2]) - + [lhs_shape[-2]] - + [rhs_shape[-1]] - ) + new_shape = [ + *self._broadcast_shapes(lhs_shape[:-2], rhs_shape[:-2]), + lhs_shape[-2], + rhs_shape[-1], + ] # merge reduce dim self._check_merged_dims( [lhs_shape[lhs_reduce_dim], rhs_shape[rhs_reduce_dim]], @@ -806,7 +778,7 @@ def _fuse_tensor_type(self, node, out_idx, dst_type, src_type): else src_type.tensor_type ) if dst_tensor_type.elem_type != src_tensor_type.elem_type: - node_id = node.name if node.name else node.op_type + node_id = node.name or node.op_type raise ValueError( f"For node {node_id}, dst_tensor_type.elem_type != src_tensor_type.elem_type: " f"{onnx.onnx_pb.TensorProto.DataType.Name(dst_tensor_type.elem_type)} vs " @@ -814,7 +786,7 @@ def _fuse_tensor_type(self, node, out_idx, dst_type, src_type): ) if dst_tensor_type.HasField("shape"): for di, ds in enumerate( - zip(dst_tensor_type.shape.dim, src_tensor_type.shape.dim) + zip(dst_tensor_type.shape.dim, src_tensor_type.shape.dim, strict=False) ): if ds[0] != ds[1]: # create a new symbolic dimension for node/out_idx/mismatch dim id in dst_tensor_type for tensor_type @@ -909,14 +881,14 @@ def _infer_Compress(self, node): ) def _infer_Concat(self, node): - if any([i in self.sympy_data_ or i in self.initializers_ for i in node.input]): + if any(i in self.sympy_data_ or i in self.initializers_ for i in node.input): values = self._get_int_values(node) print("=======", values, node.name, get_attribute(node, "axis")) - if all([v is not None for v in values]): + if all(v is not None for v in values): axis = get_attribute(node, "axis") if axis < 0: axis = axis + len(values[0]) - assert 0 == axis + assert axis == 0 self.sympy_data_[node.output[0]] = [] for i in range(len(node.input)): value = values[i] @@ -941,7 +913,7 @@ def _infer_Concat(self, node): for i_idx in range(len(node.input)) if self._get_shape(node, i_idx) ] - if all([d == dims[0] for d in dims]): + if all(d == dims[0] for d in dims): continue merged = self._merge_symbols(dims) if isinstance(merged, str): @@ -966,7 +938,7 @@ def _infer_ConcatFromSequence(self, node): concat_dim = str(self._new_symbolic_dim_from_output(node, 0, axis)) new_shape = seq_shape if new_axis: - new_shape = seq_shape[:axis] + [concat_dim] + seq_shape[axis:] + new_shape = [*seq_shape[:axis], concat_dim, *seq_shape[axis:]] else: new_shape[axis] = concat_dim vi = self.known_vi_[node.output[0]] @@ -993,7 +965,7 @@ def _infer_ConstantOfShape(self, node): self._update_computed_dims(sympy_shape) # update sympy data if output type is int, and shape is known if vi.type.tensor_type.elem_type == onnx.TensorProto.INT64 and all( - [is_literal(x) for x in sympy_shape] + is_literal(x) for x in sympy_shape ): self.sympy_data_[node.output[0]] = np.ones( [int(x) for x in sympy_shape], dtype=np.int64 @@ -1049,9 +1021,7 @@ def _infer_Einsum(self, node): letter = term[-i] if letter != 46: # letter != b'.' dim = shape[-i] - if letter not in letter_to_dim.keys(): - letter_to_dim[letter] = dim - elif type(dim) != sympy.Symbol: + if letter not in letter_to_dim or type(dim) != sympy.Symbol: letter_to_dim[letter] = dim num_operands = num_operands + 1 @@ -1121,7 +1091,7 @@ def _infer_Gather(self, node): if ( node.input[0] in self.sympy_data_ and len(data_shape) == 1 - and 0 == get_attribute(node, "axis", 0) + and get_attribute(node, "axis", 0) == 0 ): idx = self._try_get_value(node, 1) if idx is not None: @@ -1191,11 +1161,14 @@ def _infer_If(self, node): ) # pass on sympy data from subgraph, if cond is constant - if cond is not None and i_sub == (0 if as_scalar(cond) > 0 else 1): - if subgraph.output[i_out].name in subgraph_infer.sympy_data_: - self.sympy_data_[vi.name] = subgraph_infer.sympy_data_[ - subgraph.output[i_out].name - ] + if ( + cond is not None + and i_sub == (0 if as_scalar(cond) > 0 else 1) + and subgraph.output[i_out].name in subgraph_infer.sympy_data_ + ): + self.sympy_data_[vi.name] = subgraph_infer.sympy_data_[ + subgraph.output[i_out].name + ] def _infer_Loop(self, node): subgraph = get_attribute(node, "body") @@ -1231,7 +1204,7 @@ def _infer_Loop(self, node): else: si = subgraph.input[i_out + 1] si_shape = get_shape_from_value_info(si) - for di, dims in enumerate(zip(si_shape, so_shape)): + for di, dims in enumerate(zip(si_shape, so_shape, strict=False)): if dims[0] != dims[1]: new_dim = onnx.TensorShapeProto.Dimension() new_dim.dim_param = str( @@ -1244,9 +1217,7 @@ def _infer_Loop(self, node): if need_second_infer: if self.verbose_ > 2: logger.debug( - "Rerun Loop: {}({}...), because of sequence in loop carried variables".format( - node.name, node.output[0] - ) + f"Rerun Loop: {node.name}({node.output[0]}...), because of sequence in loop carried variables" ) self._onnx_infer_subgraph(node, subgraph, inc_subgraph_id=False) @@ -1300,15 +1271,13 @@ def _infer_OneHot(self, node): axis = get_attribute(node, "axis", -1) axis = handle_negative_axis(axis, len(sympy_shape) + 1) new_shape = get_shape_from_sympy_shape( - sympy_shape[:axis] - + [ - ( - self._new_symbolic_dim_from_output(node) - if not is_literal(depth) - else depth - ) + [ + *sympy_shape[:axis], + self._new_symbolic_dim_from_output(node) + if not is_literal(depth) + else depth, + *sympy_shape[axis:], ] - + sympy_shape[axis:] ) vi = self.known_vi_[node.output[0]] vi.CopyFrom( @@ -1332,7 +1301,9 @@ def _infer_Pad(self, node): assert len(pads) == 2 * rank new_sympy_shape = [ d + pad_up + pad_down - for d, pad_up, pad_down in zip(sympy_shape, pads[:rank], pads[rank:]) + for d, pad_up, pad_down in zip( + sympy_shape, pads[:rank], pads[rank:], strict=False + ) ] self._update_computed_dims(new_sympy_shape) else: @@ -1414,12 +1385,8 @@ def _infer_aten_multinomial(self, node): assert rank in [1, 2] num_samples = self._try_get_value(node, 1) di = rank - 1 - last_dim = ( - num_samples - if num_samples - else str(self._new_symbolic_dim_from_output(node, 0, di)) - ) - output_shape = sympy_shape[:-1] + [last_dim] + last_dim = num_samples or str(self._new_symbolic_dim_from_output(node, 0, di)) + output_shape = [*sympy_shape[:-1], last_dim] vi = self.known_vi_[node.output[0]] vi.CopyFrom( helper.make_tensor_value_info( @@ -1533,7 +1500,7 @@ def _infer_BatchNormalization(self, node): def _infer_Range(self, node): vi = self.known_vi_[node.output[0]] input_data = self._get_int_values(node) - if all([i is not None for i in input_data]): + if all(i is not None for i in input_data): start = as_scalar(input_data[0]) limit = as_scalar(input_data[1]) delta = as_scalar(input_data[2]) @@ -1613,12 +1580,12 @@ def _infer_Reshape(self, node): ) else: input_sympy_shape = self._get_sympy_shape(node, 0) - total = int(1) + total = 1 for d in input_sympy_shape: total = total * d new_sympy_shape = [] deferred_dim_idx = -1 - non_deferred_size = int(1) + non_deferred_size = 1 for i, d in enumerate(shape_value): if type(d) == sympy.Symbol: new_sympy_shape.append(d) @@ -1656,7 +1623,7 @@ def _infer_Resize(self, node): if scales is not None: new_sympy_shape = [ sympy.simplify(sympy.floor(d * s)) - for d, s in zip(input_sympy_shape, scales) + for d, s in zip(input_sympy_shape, scales, strict=False) ] self._update_computed_dims(new_sympy_shape) vi.CopyFrom( @@ -1689,7 +1656,7 @@ def _infer_Resize(self, node): new_sympy_shape = [ sympy.simplify(sympy.floor(d * (end - start) * scale)) for d, start, end, scale in zip( - input_sympy_shape, roi_start, roi_end, scales + input_sympy_shape, roi_start, roi_end, scales, strict=False ) ] self._update_computed_dims(new_sympy_shape) @@ -1743,7 +1710,7 @@ def _infer_Scan(self, node): new_dim = handle_negative_axis( scan_output_axes[i - num_scan_states], len(shape) + 1 ) - shape = shape[:new_dim] + [scan_input_dim] + shape[new_dim:] + shape = [*shape[:new_dim], scan_input_dim, *shape[new_dim:]] vi.CopyFrom( helper.make_tensor_value_info( o, subgraph.output[i].type.tensor_type.elem_type, shape @@ -1824,7 +1791,7 @@ def handle_negative_index(index, bound): return index return bound + index except TypeError: - logger.warning("Cannot determine if {} < 0".format(index)) + logger.warning(f"Cannot determine if {index} < 0") return index if get_opset(self.out_mp_) <= 9: @@ -1840,7 +1807,7 @@ def handle_negative_index(index, bound): axes = self._try_get_value(node, 3) steps = self._try_get_value(node, 4) if axes is None and not (starts is None and ends is None): - axes = list(range(0, len(starts if starts is not None else ends))) + axes = list(range(len(starts if starts is not None else ends))) if steps is None and not (starts is None and ends is None): steps = [1] * len(starts if starts is not None else ends) axes = as_list(axes, keep_none=True) @@ -1856,7 +1823,7 @@ def handle_negative_index(index, bound): for i in axes: new_sympy_shape[i] = self._new_symbolic_dim_from_output(node, 0, i) else: - for i, s, e, t in zip(axes, starts, ends, steps): + for i, s, e, t in zip(axes, starts, ends, steps, strict=False): e = handle_negative_index(e, new_sympy_shape[i]) if is_literal(e): if e >= self.int_max_: @@ -1881,9 +1848,7 @@ def handle_negative_index(index, bound): e = new_sympy_shape[i] except Exception: logger.warning( - "Unable to determine if {} <= {}, treat as equal".format( - e, new_sympy_shape[i] - ) + f"Unable to determine if {e} <= {new_sympy_shape[i]}, treat as equal" ) e = new_sympy_shape[i] @@ -1909,7 +1874,7 @@ def handle_negative_index(index, bound): # handle sympy_data if needed, for slice in shape computation if ( node.input[0] in self.sympy_data_ - and [0] == axes + and axes == [0] and len(starts) == 1 and len(ends) == 1 and len(steps) == 1 @@ -1954,9 +1919,11 @@ def _infer_Split_Common(self, node, make_value_info_func): node.output[i_o], self.known_vi_[node.input[0]].type.tensor_type.elem_type, get_shape_from_sympy_shape( - input_sympy_shape[:axis] - + [split[i_o]] - + input_sympy_shape[axis + 1 :] + [ + *input_sympy_shape[:axis], + split[i_o], + *input_sympy_shape[axis + 1 :], + ] ), ) ) @@ -2048,10 +2015,7 @@ def _infer_TopK(self, node): else: k = self._get_int_values(node)[1] - if k is None: - k = self._new_symbolic_dim_from_output(node) - else: - k = as_scalar(k) + k = self._new_symbolic_dim_from_output(node) if k is None else as_scalar(k) if type(k) in [int, str]: new_shape[axis] = k @@ -2129,9 +2093,7 @@ def _infer_ZipMap(self, node): assert map_key_type is not None new_vi = onnx.ValueInfoProto() new_vi.name = node.output[0] - new_vi.type.sequence_type.elem_type.map_type.value_type.tensor_type.elem_type = ( - onnx.TensorProto.FLOAT - ) + new_vi.type.sequence_type.elem_type.map_type.value_type.tensor_type.elem_type = onnx.TensorProto.FLOAT new_vi.type.sequence_type.elem_type.map_type.key_type = map_key_type vi = self.known_vi_[node.output[0]] vi.CopyFrom(new_vi) @@ -2189,7 +2151,7 @@ def _infer_EmbedLayerNormalization(self, node): input_ids_shape = self._get_shape(node, 0) word_embedding_shape = self._get_shape(node, 2) assert len(input_ids_shape) == 2 and len(word_embedding_shape) == 2 - output_shape = input_ids_shape + [word_embedding_shape[1]] + output_shape = [*input_ids_shape, word_embedding_shape[1]] word_embedding_dtype = self.known_vi_[node.input[2]].type.tensor_type.elem_type vi = self.known_vi_[node.output[0]] @@ -2262,9 +2224,7 @@ def _is_none_dim(self, dim_value): return False if "unk__" not in dim_value: return False - if dim_value in self.symbolic_dims_.keys(): - return False - return True + return dim_value not in self.symbolic_dims_ def _is_shape_contains_none_dim(self, out_shape): for out in out_shape: @@ -2313,14 +2273,12 @@ def _infer_impl(self, start_sympy_data=None): # compute prerequesite for node for topological sort # node with subgraphs may have dependency on implicit inputs, which will affect topological sort - prereq_for_node = ( - {} - ) # map from node to all its inputs, including implicit ones in subgraph + prereq_for_node = {} # map from node to all its inputs, including implicit ones in subgraph def get_prereq(node): - names = set(i for i in node.input if i) + names = {i for i in node.input if i} subgraphs = [] - if "If" == node.op_type: + if node.op_type == "If": subgraphs = [ get_attribute(node, "then_branch"), get_attribute(node, "else_branch"), @@ -2343,8 +2301,7 @@ def get_prereq(node): names.update(g_prereq) # remove subgraph inputs from g_prereq since those are local-only for i in g.input: - if i.name in names: - names.remove(i.name) + names.discard(i.name) return names for n in self.tmp_mp_.graph.node: @@ -2352,43 +2309,37 @@ def get_prereq(node): # topological sort nodes, note there might be dead nodes so we check if all graph outputs are reached to terminate sorted_nodes = [] - sorted_known_vi = set( - [ - i.name - for i in list(self.out_mp_.graph.input) - + list(self.out_mp_.graph.initializer) - ] - ) - if any([o.name in sorted_known_vi for o in self.out_mp_.graph.output]): + sorted_known_vi = { + i.name + for i in list(self.out_mp_.graph.input) + + list(self.out_mp_.graph.initializer) + } + if any(o.name in sorted_known_vi for o in self.out_mp_.graph.output): # Loop/Scan will have some graph output in graph inputs, so don't do topological sort sorted_nodes = self.out_mp_.graph.node else: - while not all( - [o.name in sorted_known_vi for o in self.out_mp_.graph.output] - ): + while not all(o.name in sorted_known_vi for o in self.out_mp_.graph.output): old_sorted_nodes_len = len(sorted_nodes) for node in self.out_mp_.graph.node: if (node.output[0] not in sorted_known_vi) and all( - [ - i in sorted_known_vi - for i in prereq_for_node[node.output[0]] - if i - ] + i in sorted_known_vi + for i in prereq_for_node[node.output[0]] + if i ): sorted_known_vi.update(node.output) sorted_nodes.append(node) if old_sorted_nodes_len == len(sorted_nodes) and not all( - [o.name in sorted_known_vi for o in self.out_mp_.graph.output] + o.name in sorted_known_vi for o in self.out_mp_.graph.output ): raise Exception("Invalid model with cyclic graph") for node in sorted_nodes: - assert all([i in self.known_vi_ for i in node.input if i]) + assert all(i in self.known_vi_ for i in node.input if i) self._onnx_infer_single_node(node) known_aten_op = False if node.op_type in self.dispatcher_: self.dispatcher_[node.op_type](node) - elif node.op_type in ["ConvTranspose"]: + elif node.op_type == "ConvTranspose": # onnx shape inference ops like ConvTranspose may have empty shape for symbolic input # before adding symbolic compute for them # mark the output type as UNDEFINED to allow guessing of rank @@ -2463,7 +2414,7 @@ def get_prereq(node): seq_cls_type = out_type.sequence_type.elem_type.WhichOneof( "value" ) - if "tensor_type" == seq_cls_type: + if seq_cls_type == "tensor_type": logger.debug( " {}: sequence of {} {}".format( node.output[i_o], @@ -2475,14 +2426,10 @@ def get_prereq(node): ) else: logger.debug( - " {}: sequence of {}".format( - node.output[i_o], seq_cls_type - ) + f" {node.output[i_o]}: sequence of {seq_cls_type}" ) else: - logger.debug( - " {}: {}".format(node.output[i_o], out_type_kind) - ) + logger.debug(f" {node.output[i_o]}: {out_type_kind}") continue out_shape = get_shape_from_value_info(vi) @@ -2536,29 +2483,28 @@ def get_prereq(node): "MatMul", "MatMulInteger", "MatMulInteger16", - ]: - if ( - None in out_shape - or self._is_shape_contains_none_dim(out_shape) - ): - if None in out_shape: - idx = out_shape.index(None) - else: - idx = out_shape.index( - self._is_shape_contains_none_dim(out_shape) - ) - dim_idx = [ - len(s) - len(out_shape) + idx for s in shapes - ] - # only support auto merge for MatMul for dim < rank-2 when rank > 2 - assert ( - len(shapes[0]) > 2 - and dim_idx[0] < len(shapes[0]) - 2 - ) - assert ( - len(shapes[1]) > 2 - and dim_idx[1] < len(shapes[1]) - 2 + ] and ( + None in out_shape + or self._is_shape_contains_none_dim(out_shape) + ): + if None in out_shape: + idx = out_shape.index(None) + else: + idx = out_shape.index( + self._is_shape_contains_none_dim(out_shape) ) + dim_idx = [ + len(s) - len(out_shape) + idx for s in shapes + ] + # only support auto merge for MatMul for dim < rank-2 when rank > 2 + assert ( + len(shapes[0]) > 2 + and dim_idx[0] < len(shapes[0]) - 2 + ) + assert ( + len(shapes[1]) > 2 + and dim_idx[1] < len(shapes[1]) - 2 + ) elif node.op_type == "Expand": # auto merge for cases like Expand([min(batch, 1), min(seq, 512)], [batch, seq]) shapes = [ @@ -2583,7 +2529,9 @@ def get_prereq(node): self._add_suggested_merge( [ s[i] if is_literal(s[i]) else str(s[i]) - for s, i in zip(shapes, dim_idx) + for s, i in zip( + shapes, dim_idx, strict=False + ) if i >= 0 ] ) @@ -2635,17 +2583,11 @@ def get_prereq(node): if self.verbose_ > 0: if is_unknown_op: logger.debug( - "Possible unknown op: {} node: {}, guessing {} shape".format( - node.op_type, node.name, vi.name - ) + f"Possible unknown op: {node.op_type} node: {node.name}, guessing {vi.name} shape" ) if self.verbose_ > 2: logger.debug( - " {}: {} {}".format( - node.output[i_o], - str(new_shape), - vi.type.tensor_type.elem_type, - ) + f" {node.output[i_o]}: {new_shape!s} {vi.type.tensor_type.elem_type}" ) self.run_ = True diff --git a/tools/onnx/prune_onnx_model.py b/tools/onnx/prune_onnx_model.py index 32712093a..47c37ed68 100755 --- a/tools/onnx/prune_onnx_model.py +++ b/tools/onnx/prune_onnx_model.py @@ -43,9 +43,7 @@ def parse_arguments(): for output_name in args.output_names: if output_name not in output_tensor_names: print( - "[ERROR] Cannot find output tensor name '{}' in onnx model graph.".format( - output_name - ) + f"[ERROR] Cannot find output tensor name '{output_name}' in onnx model graph." ) sys.exit(-1) if len(set(args.output_names)) < len(args.output_names): @@ -55,7 +53,7 @@ def parse_arguments(): sys.exit(-1) output_node_indices = set() - output_to_node = dict() + output_to_node = {} for i, node in enumerate(model.graph.node): for out in node.output: output_to_node[out] = i @@ -91,7 +89,7 @@ def parse_arguments(): del model.graph.input[idx] for out in args.output_names: model.graph.output.extend([onnx.ValueInfoProto(name=out)]) - for i in range(num_outputs): + for _i in range(num_outputs): del model.graph.output[0] from onnx_infer_shape import SymbolicShapeInference @@ -99,14 +97,10 @@ def parse_arguments(): model = SymbolicShapeInference.infer_shapes(model, 2**31 - 1, True, False, 1) onnx.checker.check_model(model) onnx.save(model, args.save_file) - print("[Finished] The new model saved in {}.".format(args.save_file)) + print(f"[Finished] The new model saved in {args.save_file}.") print( - "[DEBUG INFO] The inputs of new model: {}".format( - [x.name for x in model.graph.input] - ) + f"[DEBUG INFO] The inputs of new model: {[x.name for x in model.graph.input]}" ) print( - "[DEBUG INFO] The outputs of new model: {}".format( - [x.name for x in model.graph.output] - ) + f"[DEBUG INFO] The outputs of new model: {[x.name for x in model.graph.output]}" ) diff --git a/tools/onnx/rename_onnx_model.py b/tools/onnx/rename_onnx_model.py index 9fbc2631a..6108883b4 100755 --- a/tools/onnx/rename_onnx_model.py +++ b/tools/onnx/rename_onnx_model.py @@ -54,9 +54,7 @@ def parse_arguments(): for origin_name in args.origin_names: if origin_name not in output_tensor_names: print( - "[ERROR] Cannot find tensor name '{}' in onnx model graph.".format( - origin_name - ) + f"[ERROR] Cannot find tensor name '{origin_name}' in onnx model graph." ) sys.exit(-1) if len(set(args.origin_names)) < len(args.origin_names): @@ -101,14 +99,10 @@ def parse_arguments(): onnx.checker.check_model(model) onnx.save(model, args.save_file) - print("[Finished] The new model saved in {}.".format(args.save_file)) + print(f"[Finished] The new model saved in {args.save_file}.") print( - "[DEBUG INFO] The inputs of new model: {}".format( - [x.name for x in model.graph.input] - ) + f"[DEBUG INFO] The inputs of new model: {[x.name for x in model.graph.input]}" ) print( - "[DEBUG INFO] The outputs of new model: {}".format( - [x.name for x in model.graph.output] - ) + f"[DEBUG INFO] The outputs of new model: {[x.name for x in model.graph.output]}" ) diff --git a/tools/paddle/infer_paddle_model_shape.py b/tools/paddle/infer_paddle_model_shape.py index 6afd45b49..cae58921a 100755 --- a/tools/paddle/infer_paddle_model_shape.py +++ b/tools/paddle/infer_paddle_model_shape.py @@ -13,6 +13,7 @@ # limitations under the License. import argparse + import paddle import paddle.base as base import paddle.static as static @@ -20,9 +21,10 @@ def process_old_ops_desc(program): for i in range(len(program.blocks[0].ops)): - if program.blocks[0].ops[i].type == "matmul": - if not program.blocks[0].ops[i].has_attr("head_number"): - program.blocks[0].ops[i]._set_attr("head_number", 1) + if program.blocks[0].ops[i].type == "matmul" and not program.blocks[0].ops[ + i + ].has_attr("head_number"): + program.blocks[0].ops[i]._set_attr("head_number", 1) def infer_shape(program, input_shape_dict): @@ -65,12 +67,10 @@ def infer_shape(program, input_shape_dict): major_ver = model_version // 1000000 minor_ver = (model_version - major_ver * 1000000) // 1000 patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000 - model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver) + model_version = f"{major_ver}.{minor_ver}.{patch_ver}" if model_version != paddle_version: print( - "[WARNING] The model is saved by paddlepaddle v{}, but now your paddlepaddle is version of {}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model".format( - model_version, paddle_version - ) + f"[WARNING] The model is saved by paddlepaddle v{model_version}, but now your paddlepaddle is version of {paddle_version}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model" ) for k, v in input_shape_dict.items(): program.blocks[0].var(k).desc.set_shape(v) diff --git a/tools/paddle/merge_params.py b/tools/paddle/merge_params.py index 80d84539a..e3ee852fb 100755 --- a/tools/paddle/merge_params.py +++ b/tools/paddle/merge_params.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import paddle.fluid as fluid import sys + +import paddle.fluid as fluid + import paddle paddle.enable_static() diff --git a/tools/paddle/prune_paddle_model.py b/tools/paddle/prune_paddle_model.py index f259816a7..4987ff4a3 100755 --- a/tools/paddle/prune_paddle_model.py +++ b/tools/paddle/prune_paddle_model.py @@ -13,11 +13,12 @@ # limitations under the License. import argparse +import os import sys + import paddle import paddle.base.core as core import paddle.static as static -import os def prepend_feed_ops(program, feed_target_names): @@ -32,9 +33,7 @@ def prepend_feed_ops(program, feed_target_names): for i, name in enumerate(feed_target_names): if not global_block.has_var(name): print( - "The input[{i}]: '{name}' doesn't exist in pruned inference program, which will be ignored in new saved model.".format( - i=i, name=name - ) + f"The input[{i}]: '{name}' doesn't exist in pruned inference program, which will be ignored in new saved model." ) continue out = global_block.var(name) @@ -54,7 +53,7 @@ def append_fetch_ops(program, fetch_target_names): fetch_var = global_block.create_var( name="fetch", type=core.VarDesc.VarType.FETCH_LIST, persistable=True ) - print("the len of fetch_target_names:%d" % (len(fetch_target_names))) + print(f"the len of fetch_target_names:{len(fetch_target_names)}") for i, name in enumerate(fetch_target_names): global_block.append_op( type="fetch", @@ -66,7 +65,7 @@ def append_fetch_ops(program, fetch_target_names): def insert_by_op_type(program, op_names, op_type): global_block = program.global_block() - need_to_remove_op_index = list() + need_to_remove_op_index = [] for i, op in enumerate(global_block.ops): if op.type == op_type: need_to_remove_op_index.append(i) @@ -133,7 +132,7 @@ def parse_arguments(): program.global_block().var(out_name) for out_name in args.output_names ] else: - fetch_vars = [out_var for out_var in fetch_targets] + fetch_vars = list(fetch_targets) model_name = args.model_filename.split(".")[0] path_prefix = os.path.join(args.save_dir, model_name)