diff --git a/.github/workflows/deploy-python.yml b/.github/workflows/deploy-python.yml index feb0a948c..2dae24c7c 100644 --- a/.github/workflows/deploy-python.yml +++ b/.github/workflows/deploy-python.yml @@ -154,7 +154,7 @@ jobs: - name: Install Dependencies run: | pip install -U pip - pip install -U wheel setuptools packaging twine + pip install -U wheel setuptools packaging twine - name: Download Artifacts uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # 4.1.4 diff --git a/.github/workflows/get-envs.py b/.github/workflows/get-envs.py index e02f103ac..f5ef93693 100755 --- a/.github/workflows/get-envs.py +++ b/.github/workflows/get-envs.py @@ -25,7 +25,7 @@ def main(f): filtered_envs = environments[GROUP_NUMBER::TOTAL_GROUPS] joined_envs = ",".join(filtered_envs) - assert joined_envs, ( + assert joined_envs, ( # noqa: S101 f"No environments found.\nenvironments = {str(environments)}\nGROUP_NUMBER = {GROUP_NUMBER + 1}\nTOTAL_GROUPS = {TOTAL_GROUPS}" ) print(joined_envs) diff --git a/.github/workflows/mega-linter.yml b/.github/workflows/mega-linter.yml index 61ccac356..198bf0bad 100644 --- a/.github/workflows/mega-linter.yml +++ b/.github/workflows/mega-linter.yml @@ -28,6 +28,7 @@ jobs: contents: write issues: write pull-requests: write + statuses: write steps: # Git Checkout - name: Checkout Code @@ -47,6 +48,8 @@ jobs: VALIDATE_ALL_CODEBASE: "true" GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # ADD YOUR CUSTOM ENV VARIABLES HERE OR DEFINE THEM IN A FILE .mega-linter.yml AT THE ROOT OF YOUR REPOSITORY + GITHUB_STATUS_REPORTER: "true" + GITHUB_COMMENT_REPORTER: "true" PYTHON_RUFF_ARGUMENTS: --config pyproject.toml --config 'output-format="github"' PYTHON_RUFF_FORMAT_ARGUMENTS: --config pyproject.toml --config 'output-format="github"' diff --git a/.mega-linter.yml b/.mega-linter.yml index 790e6b5a7..8b6e79510 100644 --- a/.mega-linter.yml +++ b/.mega-linter.yml @@ -20,9 +20,7 @@ ENABLE_LINTERS: # If you use ENABLE_LINTERS variable, all other linters will be - YAML_YAMLLINT PYTHON_RUFF_CONFIG_FILE: pyproject.toml -PYTHON_RUFF_CLI_LINT_MODE: project PYTHON_RUFF_FORMAT_CONFIG_FILE: pyproject.toml -PYTHON_RUFF_FORMAT_CLI_LINT_MODE: project MARKDOWN_MARKDOWN_LINK_CHECK_FILTER_REGEX_EXCLUDE: "tests/.*" MARKDOWN_MARKDOWNLINT_FILTER_REGEX_EXCLUDE: "tests/.*" MARKDOWN_MARKDOWNLINT_ARGUMENTS: "--disable=MD041" diff --git a/newrelic/admin/run_program.py b/newrelic/admin/run_program.py index fd7e2c65c..fb6171731 100644 --- a/newrelic/admin/run_program.py +++ b/newrelic/admin/run_program.py @@ -107,4 +107,4 @@ def log_message(text, *args): log_message("program_exe_path = %r", program_exe_path) log_message("execl_arguments = %r", [program_exe_path] + args) - os.execl(program_exe_path, *args) + os.execl(program_exe_path, *args) # noqa: S606 diff --git a/newrelic/admin/run_python.py b/newrelic/admin/run_python.py index dec8d8f70..dc7acfe9b 100644 --- a/newrelic/admin/run_python.py +++ b/newrelic/admin/run_python.py @@ -105,4 +105,4 @@ def log_message(text, *args): log_message("python_exe_path = %r", python_exe_path) log_message("execl_arguments = %r", [python_exe_path, python_exe_path] + args) - os.execl(python_exe_path, python_exe_path, *args) + os.execl(python_exe_path, python_exe_path, *args) # noqa: S606 diff --git a/newrelic/admin/validate_config.py b/newrelic/admin/validate_config.py index f368f0aab..a0c80f093 100644 --- a/newrelic/admin/validate_config.py +++ b/newrelic/admin/validate_config.py @@ -37,7 +37,7 @@ def _function1(): @function_trace() def _function2(): - for i in range(10): + for _ in range(10): _function1() @error_trace() @@ -68,7 +68,7 @@ def _wsgi_application(environ, start_response): response_headers = [("Content-type", "text/plain"), ("Content-Length", str(len(output)))] start_response(status, response_headers) - for i in range(10): + for _ in range(10): _function1() _function2() @@ -84,7 +84,7 @@ def _wsgi_application(environ, start_response): @background_task() def _background_task(): - for i in range(10): + for _ in range(10): _function1() _function2() @@ -154,7 +154,7 @@ def validate_config(args): if len(args) >= 2: log_file = args[1] else: - log_file = "/tmp/python-agent-test.log" # nosec + log_file = "/tmp/python-agent-test.log" log_level = logging.DEBUG diff --git a/newrelic/api/application.py b/newrelic/api/application.py index 0dc5575b0..121938151 100644 --- a/newrelic/api/application.py +++ b/newrelic/api/application.py @@ -115,6 +115,7 @@ def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_er warnings.warn( ("The record_exception function is deprecated. Please use the new api named notice_error instead."), DeprecationWarning, + stacklevel=2, ) self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) diff --git a/newrelic/api/asgi_application.py b/newrelic/api/asgi_application.py index 9334ffd9d..ed0675afc 100644 --- a/newrelic/api/asgi_application.py +++ b/newrelic/api/asgi_application.py @@ -156,7 +156,7 @@ async def send_inject_browser_agent(self, message): # check to see if we have to modify the content-length # header headers = self.initial_message["headers"] - for header_index, header_data in enumerate(headers): + for header_index, header_data in enumerate(headers): # noqa: B007 header_name, header_value = header_data if header_name.lower() == b"content-length": break diff --git a/newrelic/api/database_trace.py b/newrelic/api/database_trace.py index abddacd06..75fb41fb8 100644 --- a/newrelic/api/database_trace.py +++ b/newrelic/api/database_trace.py @@ -25,8 +25,11 @@ def register_database_client( - dbapi2_module, database_product, quoting_style="single", explain_query=None, explain_stmts=[], instance_info=None + dbapi2_module, database_product, quoting_style="single", explain_query=None, explain_stmts=None, instance_info=None ): + if explain_stmts is None: + explain_stmts = [] + _logger.debug( "Registering database client module %r where database " "is %r, quoting style is %r, explain query statement is %r and " diff --git a/newrelic/api/error_trace.py b/newrelic/api/error_trace.py index f93d91ced..b67261d90 100644 --- a/newrelic/api/error_trace.py +++ b/newrelic/api/error_trace.py @@ -20,7 +20,9 @@ class ErrorTrace: - def __init__(self, ignore_errors=[], ignore=None, expected=None, status_code=None, parent=None): + def __init__(self, ignore_errors=None, ignore=None, expected=None, status_code=None, parent=None): + if ignore_errors is None: + ignore_errors = [] if parent is None: parent = current_trace() @@ -33,6 +35,7 @@ def __init__(self, ignore_errors=[], ignore=None, expected=None, status_code=Non warnings.warn( ("The ignore_errors argument is deprecated. Please use the new ignore argument instead."), DeprecationWarning, + stacklevel=2, ) def __enter__(self): @@ -50,7 +53,10 @@ def __exit__(self, exc, value, tb): ) -def ErrorTraceWrapper(wrapped, ignore_errors=[], ignore=None, expected=None, status_code=None): +def ErrorTraceWrapper(wrapped, ignore_errors=None, ignore=None, expected=None, status_code=None): + if ignore_errors is None: + ignore_errors = [] + def wrapper(wrapped, instance, args, kwargs): parent = current_trace() @@ -63,11 +69,17 @@ def wrapper(wrapped, instance, args, kwargs): return FunctionWrapper(wrapped, wrapper) -def error_trace(ignore_errors=[], ignore=None, expected=None, status_code=None): +def error_trace(ignore_errors=None, ignore=None, expected=None, status_code=None): + if ignore_errors is None: + ignore_errors = [] + return functools.partial( ErrorTraceWrapper, ignore_errors=ignore_errors, ignore=ignore, expected=expected, status_code=status_code ) -def wrap_error_trace(module, object_path, ignore_errors=[], ignore=None, expected=None, status_code=None): +def wrap_error_trace(module, object_path, ignore_errors=None, ignore=None, expected=None, status_code=None): + if ignore_errors is None: + ignore_errors = [] + wrap_object(module, object_path, ErrorTraceWrapper, (ignore_errors, ignore, expected, status_code)) diff --git a/newrelic/api/lambda_handler.py b/newrelic/api/lambda_handler.py index 67fbed4b7..97897c6ae 100644 --- a/newrelic/api/lambda_handler.py +++ b/newrelic/api/lambda_handler.py @@ -155,6 +155,7 @@ def LambdaHandlerWrapper(*args, **kwargs): "APIs provided in the newrelic-lambda package." ), DeprecationWarning, + stacklevel=2, ) return _LambdaHandlerWrapper(*args, **kwargs) @@ -164,6 +165,7 @@ def lambda_handler(application=None, name=None, group=None): warnings.warn( ("The lambda_handler API has been deprecated. Please use the APIs provided in the newrelic-lambda package."), DeprecationWarning, + stacklevel=2, ) return functools.partial(_LambdaHandlerWrapper, application=application, name=name, group=group) diff --git a/newrelic/api/log.py b/newrelic/api/log.py index 3a9a0ed48..7dff13864 100644 --- a/newrelic/api/log.py +++ b/newrelic/api/log.py @@ -216,6 +216,7 @@ def __init__( "please use newrelic.api.log.NewRelicLogForwardingHandler to take advantage of all the " "features included in application log forwarding such as proper batching.", DeprecationWarning, + stacklevel=2, ) super(NewRelicLogHandler, self).__init__(level=level) self.license_key = license_key or self.settings.license_key diff --git a/newrelic/api/message_trace.py b/newrelic/api/message_trace.py index 7279b2a19..853fa94e3 100644 --- a/newrelic/api/message_trace.py +++ b/newrelic/api/message_trace.py @@ -88,8 +88,11 @@ def create_node(self): def MessageTraceWrapper( - wrapped, library, operation, destination_type, destination_name, params={}, terminal=True, async_wrapper=None + wrapped, library, operation, destination_type, destination_name, params=None, terminal=True, async_wrapper=None ): + if params is None: + params = {} + def _nr_message_trace_wrapper_(wrapped, instance, args, kwargs): wrapper = async_wrapper if async_wrapper is not None else get_async_wrapper(wrapped) if not wrapper: @@ -151,7 +154,12 @@ def _nr_message_trace_wrapper_(wrapped, instance, args, kwargs): return FunctionWrapper(wrapped, _nr_message_trace_wrapper_) -def message_trace(library, operation, destination_type, destination_name, params={}, terminal=True, async_wrapper=None): +def message_trace( + library, operation, destination_type, destination_name, params=None, terminal=True, async_wrapper=None +): + if params is None: + params = {} + return functools.partial( MessageTraceWrapper, library=library, @@ -171,10 +179,13 @@ def wrap_message_trace( operation, destination_type, destination_name, - params={}, + params=None, terminal=True, async_wrapper=None, ): + if params is None: + params = {} + wrap_object( module, object_path, diff --git a/newrelic/api/ml_model.py b/newrelic/api/ml_model.py index e9a21e673..364530162 100644 --- a/newrelic/api/ml_model.py +++ b/newrelic/api/ml_model.py @@ -47,7 +47,8 @@ def record_llm_feedback_event(trace_id, rating, category=None, message=None, met if not transaction: warnings.warn( "No message feedback events will be recorded. record_llm_feedback_event must be called within the " - "scope of a transaction." + "scope of a transaction.", + stacklevel=2, ) return diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 29a522d03..a0e5fc6c7 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -454,6 +454,7 @@ def record_exception(self, exc_info=None, params=None, ignore_errors=None): warnings.warn( ("The record_exception function is deprecated. Please use the new api named notice_error instead."), DeprecationWarning, + stacklevel=2, ) self.notice_error(error=exc_info, attributes=params, ignore=ignore_errors) @@ -700,6 +701,7 @@ def record_exception(exc=None, value=None, tb=None, params=None, ignore_errors=N warnings.warn( ("The record_exception function is deprecated. Please use the new api named notice_error instead."), DeprecationWarning, + stacklevel=2, ) notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors, application=application) diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index 9d8b9e3f8..9f094cf42 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -339,7 +339,8 @@ def __del__(self): self.__exit__(None, None, None) def __enter__(self): - assert self._state == self.STATE_PENDING + if self._state != self.STATE_PENDING: + raise RuntimeError("Attempting to start Transaction that's in an invalid state.") # Bail out if the transaction is not enabled. @@ -994,7 +995,7 @@ def user_attributes(self): def _compute_sampled_and_priority(self): if self._priority is None: # truncate priority field to 6 digits past the decimal - self._priority = float(f"{random.random():.6f}") # nosec + self._priority = float(f"{random.random():.6f}") # noqa: S311 if self._sampled is None: self._sampled = self._application.compute_sampled() @@ -1110,6 +1111,7 @@ def create_distributed_trace_payload(self): "Please use the insert_distributed_trace_headers API." ), DeprecationWarning, + stacklevel=2, ) return self._create_distributed_trace_payload() @@ -1235,6 +1237,7 @@ def accept_distributed_trace_payload(self, *args, **kwargs): "Please use the accept_distributed_trace_headers API." ), DeprecationWarning, + stacklevel=2, ) if not self._can_accept_distributed_trace_headers(): return False @@ -1591,6 +1594,7 @@ def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_er warnings.warn( ("The record_exception function is deprecated. Please use the new api named notice_error instead."), DeprecationWarning, + stacklevel=2, ) self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) @@ -1779,6 +1783,7 @@ def add_custom_parameter(self, name, value): # pragma: no cover warnings.warn( ("The add_custom_parameter API has been deprecated. Please use the add_custom_attribute API."), DeprecationWarning, + stacklevel=2, ) return self.add_custom_attribute(name, value) @@ -1789,6 +1794,7 @@ def add_custom_parameters(self, items): # pragma: no cover warnings.warn( ("The add_custom_parameters API has been deprecated. Please use the add_custom_attributes API."), DeprecationWarning, + stacklevel=2, ) return self.add_custom_attributes(items) @@ -1899,6 +1905,7 @@ def add_custom_parameter(key, value): # pragma: no cover warnings.warn( ("The add_custom_parameter API has been deprecated. Please use the add_custom_attribute API."), DeprecationWarning, + stacklevel=2, ) return add_custom_attribute(key, value) @@ -1910,6 +1917,7 @@ def add_custom_parameters(items): # pragma: no cover warnings.warn( ("The add_custom_parameters API has been deprecated. Please use the add_custom_attributes API."), DeprecationWarning, + stacklevel=2, ) return add_custom_attributes(items) @@ -1946,6 +1954,7 @@ def get_browser_timing_footer(nonce=None): warnings.warn( "The get_browser_timing_footer function is deprecated. Please migrate to only using the get_browser_timing_header API instead.", DeprecationWarning, + stacklevel=2, ) return "" diff --git a/newrelic/api/web_transaction.py b/newrelic/api/web_transaction.py index 444a2ff83..e4ecb022d 100644 --- a/newrelic/api/web_transaction.py +++ b/newrelic/api/web_transaction.py @@ -489,6 +489,7 @@ def browser_timing_footer(self, nonce=None): warnings.warn( "The browser_timing_footer function is deprecated. Please migrate to only using the browser_timing_header api instead.", DeprecationWarning, + stacklevel=2, ) return "" diff --git a/newrelic/common/agent_http.py b/newrelic/common/agent_http.py index 92bf09115..88694d996 100644 --- a/newrelic/common/agent_http.py +++ b/newrelic/common/agent_http.py @@ -399,11 +399,11 @@ def send_request( response = self._connection.request_encode_url( method, path, fields=params, body=body, headers=merged_headers, **self._urlopen_kwargs ) - except urllib3.exceptions.HTTPError as e: + except urllib3.exceptions.HTTPError as exc: self.log_response(self._audit_log_fp, request_id, 0, None, None, connection) # All urllib3 HTTP errors should be treated as a network # interface exception. - raise NetworkInterfaceException(e) + raise NetworkInterfaceException(exc) from exc self.log_response(self._audit_log_fp, request_id, response.status, response.headers, response.data, connection) diff --git a/newrelic/common/coroutine.py b/newrelic/common/coroutine.py index 04a2ecb08..165110ba3 100644 --- a/newrelic/common/coroutine.py +++ b/newrelic/common/coroutine.py @@ -33,7 +33,7 @@ def _iscoroutinefunction_tornado(fn): def is_coroutine_callable(wrapped): - return is_coroutine_function(wrapped) or is_coroutine_function(getattr(wrapped, "__call__", None)) + return is_coroutine_function(wrapped) or is_coroutine_function(getattr(wrapped, "__call__", None)) # noqa: B004 if hasattr(inspect, "isasyncgenfunction"): diff --git a/newrelic/common/encoding_utils.py b/newrelic/common/encoding_utils.py index f1eb72fc7..fd62ebcad 100644 --- a/newrelic/common/encoding_utils.py +++ b/newrelic/common/encoding_utils.py @@ -241,7 +241,7 @@ def generate_path_hash(name, seed): if not isinstance(name, bytes): name = name.encode("UTF-8") - path_hash = rotated ^ int(hashlib.md5(name).hexdigest()[-8:], base=16) # nosec + path_hash = rotated ^ int(hashlib.md5(name).hexdigest()[-8:], base=16) # noqa: S324 return f"{path_hash:08x}" diff --git a/newrelic/common/object_names.py b/newrelic/common/object_names.py index 54078f602..19b42a0a1 100644 --- a/newrelic/common/object_names.py +++ b/newrelic/common/object_names.py @@ -122,7 +122,7 @@ def _object_context(object): # noqa: A002 # If it's not a class method, it must be an instance method. if cname is None: - cname = getattr(object.__self__.__class__, "__qualname__") + cname = object.__self__.__class__.__qualname__ path = f"{cname}.{object.__name__}" @@ -140,7 +140,7 @@ def _object_context(object): # noqa: A002 # qualified name. if path is None and hasattr(object, "__class__"): - path = getattr(object.__class__, "__qualname__") + path = object.__class__.__qualname__ # Now calculate the name of the module object is defined in. diff --git a/newrelic/common/object_wrapper.py b/newrelic/common/object_wrapper.py index 7a009bfdf..ec099041e 100644 --- a/newrelic/common/object_wrapper.py +++ b/newrelic/common/object_wrapper.py @@ -130,6 +130,7 @@ def __init__(self, wrapped, instance, wrapper): "The ObjectWrapper API is deprecated. Please use one of ObjectProxy, FunctionWrapper, or CallableObjectProxy instead." ), DeprecationWarning, + stacklevel=2, ) super(ObjectWrapper, self).__init__(wrapped, wrapper) diff --git a/newrelic/common/package_version_utils.py b/newrelic/common/package_version_utils.py index 278e51368..18dbfcf0d 100644 --- a/newrelic/common/package_version_utils.py +++ b/newrelic/common/package_version_utils.py @@ -17,8 +17,8 @@ from functools import lru_cache # Need to account for 4 possible variations of version declaration specified in (rejected) PEP 396 -VERSION_ATTRS = ("__version__", "version", "__version_tuple__", "version_tuple") # nosec -NULL_VERSIONS = frozenset((None, "", "0", "0.0", "0.0.0", "0.0.0.0", (0,), (0, 0), (0, 0, 0), (0, 0, 0, 0))) # nosec +VERSION_ATTRS = ("__version__", "version", "__version_tuple__", "version_tuple") +NULL_VERSIONS = frozenset((None, "", "0", "0.0", "0.0.0", "0.0.0.0", (0,), (0, 0), (0, 0, 0), (0, 0, 0, 0))) # noqa: S104 def get_package_version(name): diff --git a/newrelic/common/system_info.py b/newrelic/common/system_info.py index 155989757..4a7636c05 100644 --- a/newrelic/common/system_info.py +++ b/newrelic/common/system_info.py @@ -38,7 +38,7 @@ [ "localhost", "127.0.0.1", - "0.0.0.0", # nosec + "0.0.0.0", # noqa: S104 "0:0:0:0:0:0:0:0", "0:0:0:0:0:0:0:1", "::1", @@ -173,14 +173,14 @@ def _darwin_physical_processor_count(): physical_processor_cmd = ["/usr/sbin/sysctl", "-n", "hw.packages"] try: - num_physical_processors = int(_execute_program(physical_processor_cmd, stderr=subprocess.PIPE)) # nosec + num_physical_processors = int(_execute_program(physical_processor_cmd, stderr=subprocess.PIPE)) # noqa: S603 except (subprocess.CalledProcessError, ValueError): num_physical_processors = None physical_core_cmd = ["/usr/sbin/sysctl", "-n", "hw.physicalcpu"] try: - num_physical_cores = int(_execute_program(physical_core_cmd, stderr=subprocess.PIPE)) # nosec + num_physical_cores = int(_execute_program(physical_core_cmd, stderr=subprocess.PIPE)) # noqa: S603 except (subprocess.CalledProcessError, ValueError): num_physical_cores = None @@ -233,7 +233,7 @@ def _darwin_total_physical_memory(): command = ["/usr/sbin/sysctl", "-n", "hw.memsize"] try: - return float(_execute_program(command, stderr=subprocess.PIPE)) / (1024 * 1024) # nosec + return float(_execute_program(command, stderr=subprocess.PIPE)) / (1024 * 1024) # noqa: S603 except subprocess.CalledProcessError: pass except ValueError: diff --git a/newrelic/common/utilization.py b/newrelic/common/utilization.py index 826270fcd..e4f4ac0f6 100644 --- a/newrelic/common/utilization.py +++ b/newrelic/common/utilization.py @@ -157,7 +157,7 @@ class AWSUtilization(CommonUtilization): EXPECTED_KEYS = ("availabilityZone", "instanceId", "instanceType") METADATA_HOST = "169.254.169.254" METADATA_PATH = "/latest/dynamic/instance-identity/document" - METADATA_TOKEN_PATH = "/latest/api/token" + METADATA_TOKEN_PATH = "/latest/api/token" # noqa: S105 HEADERS = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"} VENDOR_NAME = "aws" diff --git a/newrelic/config.py b/newrelic/config.py index 1bebd7918..8b46d7d7a 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -927,10 +927,10 @@ def _load_configuration(config_file=None, environment=None, ignore_errors=True, if config_file.endswith(".toml"): try: import tomllib - except ImportError: + except ImportError as exc: raise newrelic.api.exceptions.ConfigurationError( "TOML configuration file can only be used if tomllib is available (Python 3.11+)." - ) + ) from exc with open(config_file, "rb") as f: content = tomllib.load(f) newrelic_section = content.get("tool", {}).get("newrelic") @@ -1290,7 +1290,7 @@ def _process_background_task_configuration(): if name and name.startswith("lambda "): callable_vars = {"callable_name": callable_name} - name = eval(name, callable_vars) # nosec, pylint: disable=W0123 + name = eval(name, callable_vars) # noqa: S307 _logger.debug("register background-task %s", ((module, object_path, application, name, group),)) @@ -1340,7 +1340,7 @@ def _process_database_trace_configuration(): if sql.startswith("lambda "): callable_vars = {"callable_name": callable_name} - sql = eval(sql, callable_vars) # nosec, pylint: disable=W0123 + sql = eval(sql, callable_vars) # noqa: S307 _logger.debug("register database-trace %s", ((module, object_path, sql),)) @@ -1395,11 +1395,11 @@ def _process_external_trace_configuration(): if url.startswith("lambda "): callable_vars = {"callable_name": callable_name} - url = eval(url, callable_vars) # nosec, pylint: disable=W0123 + url = eval(url, callable_vars) # noqa: S307 if method and method.startswith("lambda "): callable_vars = {"callable_name": callable_name} - method = eval(method, callable_vars) # nosec, pylint: disable=W0123 + method = eval(method, callable_vars) # noqa: S307 _logger.debug("register external-trace %s", ((module, object_path, library, url, method),)) @@ -1467,7 +1467,7 @@ def _process_function_trace_configuration(): if name and name.startswith("lambda "): callable_vars = {"callable_name": callable_name} - name = eval(name, callable_vars) # nosec, pylint: disable=W0123 + name = eval(name, callable_vars) # noqa: S307 _logger.debug( "register function-trace %s", ((module, object_path, name, group, label, params, terminal, rollup),) @@ -1525,7 +1525,7 @@ def _process_generator_trace_configuration(): if name and name.startswith("lambda "): callable_vars = {"callable_name": callable_name} - name = eval(name, callable_vars) # nosec, pylint: disable=W0123 + name = eval(name, callable_vars) # noqa: S307 _logger.debug("register generator-trace %s", ((module, object_path, name, group),)) @@ -1584,7 +1584,7 @@ def _process_profile_trace_configuration(): if name and name.startswith("lambda "): callable_vars = {"callable_name": callable_name} - name = eval(name, callable_vars) # nosec, pylint: disable=W0123 + name = eval(name, callable_vars) # noqa: S307 _logger.debug("register profile-trace %s", ((module, object_path, name, group, depth),)) @@ -1634,7 +1634,7 @@ def _process_memcache_trace_configuration(): if command.startswith("lambda "): callable_vars = {"callable_name": callable_name} - command = eval(command, callable_vars) # nosec, pylint: disable=W0123 + command = eval(command, callable_vars) # noqa: S307 _logger.debug("register memcache-trace %s", (module, object_path, command)) @@ -1694,7 +1694,7 @@ def _process_transaction_name_configuration(): if name and name.startswith("lambda "): callable_vars = {"callable_name": callable_name} - name = eval(name, callable_vars) # nosec, pylint: disable=W0123 + name = eval(name, callable_vars) # noqa: S307 _logger.debug("register transaction-name %s", ((module, object_path, name, group, priority),)) diff --git a/newrelic/core/adaptive_sampler.py b/newrelic/core/adaptive_sampler.py index 925055b59..5c58c3648 100644 --- a/newrelic/core/adaptive_sampler.py +++ b/newrelic/core/adaptive_sampler.py @@ -50,11 +50,11 @@ def compute_sampled(self): return False elif self.sampled_count < self.sampling_target: - sampled = random.randrange(self.computed_count_last) < self.sampling_target + sampled = random.randrange(self.computed_count_last) < self.sampling_target # noqa: S311 if sampled: self.sampled_count += 1 else: - sampled = random.randrange(self.computed_count) < self.adaptive_target + sampled = random.randrange(self.computed_count) < self.adaptive_target # noqa: S311 if sampled: self.sampled_count += 1 diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index 8edacac9d..b712b129e 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -478,6 +478,7 @@ def record_exception(self, app_name, exc=None, value=None, tb=None, params=None, warnings.warn( ("The record_exception function is deprecated. Please use the new api named notice_error instead."), DeprecationWarning, + stacklevel=2, ) self.notice_error(app_name, error=(exc, value, tb), attributes=params, ignore=ignore_errors) diff --git a/newrelic/core/agent_protocol.py b/newrelic/core/agent_protocol.py index 1ca57cf8a..b51d6f09b 100644 --- a/newrelic/core/agent_protocol.py +++ b/newrelic/core/agent_protocol.py @@ -218,7 +218,7 @@ def send(self, method, payload=(), path="/agent_listener/invoke_raw_method"): response = self.client.send_request(path=path, params=params, headers=headers, payload=payload) except NetworkInterfaceException: # All HTTP errors are currently retried - raise RetryDataForRequest + raise RetryDataForRequest # noqa: B904 status, data = response diff --git a/newrelic/core/application.py b/newrelic/core/application.py index c057f9f0a..301c4962d 100644 --- a/newrelic/core/application.py +++ b/newrelic/core/application.py @@ -807,6 +807,7 @@ def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_er warnings.warn( ("The record_exception function is deprecated. Please use the new api named notice_error instead."), DeprecationWarning, + stacklevel=2, ) self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index d1ecdd462..59726c306 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -371,8 +371,8 @@ def sanitize(value): try: value = str(value) - except Exception: - raise CastingFailureException + except Exception as exc: + raise CastingFailureException from exc else: _logger.debug("Attribute value is of type: %r. Casting %r to string: %s", type(original), original, value) diff --git a/newrelic/core/code_level_metrics.py b/newrelic/core/code_level_metrics.py index 0543a2bee..303ef09a9 100644 --- a/newrelic/core/code_level_metrics.py +++ b/newrelic/core/code_level_metrics.py @@ -66,7 +66,7 @@ def extract_code_from_callable(func): if inspect.isclass(func): # For class types don't change anything pass - elif hasattr(func, "__call__"): + elif hasattr(func, "__call__"): # noqa: B004 # For callable object, use the __call__ attribute func = func.__call__ module_name, func_path = object_context(func) diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 65af62a82..475145385 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -1295,7 +1295,7 @@ def apply_server_side_settings(server_side_config=None, settings=_settings): derived_vals = settings_snapshot.cross_process_id.split("#") if len(derived_vals) == 2: - for idx, val in enumerate(derived_vals): + for idx, _val in enumerate(derived_vals): # only override the value if the server side does not provide # the value specifically if vals[idx] is None: diff --git a/newrelic/core/database_utils.py b/newrelic/core/database_utils.py index be08d7519..0f04a0968 100644 --- a/newrelic/core/database_utils.py +++ b/newrelic/core/database_utils.py @@ -531,7 +531,10 @@ def __init__(self, database, connection): self.connection = connection self.cursors = {} - def cursor(self, args=(), kwargs={}): + def cursor(self, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + key = (args, frozenset(kwargs.items())) cursor = self.cursors.get(key) @@ -625,7 +628,7 @@ def cleanup(self): if settings.debug.log_explain_plan_queries: _logger.debug("Cleaning up SQL connections cache %r.", self) - for key, connection in self.connections: + for _key, connection in self.connections: connection.cleanup() self.connections = [] diff --git a/newrelic/core/environment.py b/newrelic/core/environment.py index 7a945b048..be6e59bf6 100644 --- a/newrelic/core/environment.py +++ b/newrelic/core/environment.py @@ -217,7 +217,7 @@ def plugins(): try: if not module: continue - except Exception: # nosec B112 + except Exception: # noqa: S112 # if the application uses generalimport to manage optional depedencies, # it's possible that generalimport.MissingOptionalDependency is raised. # In this case, we should not report the module as it is not actually loaded and diff --git a/newrelic/core/profile_sessions.py b/newrelic/core/profile_sessions.py index f3dead8a0..dd16cb452 100644 --- a/newrelic/core/profile_sessions.py +++ b/newrelic/core/profile_sessions.py @@ -92,7 +92,7 @@ def collect_stack_traces(include_nr_threads=False): python threads. """ - for txn, thread_id, thread_category, frame in trace_cache().active_threads(): + for _txn, _thread_id, thread_category, frame in trace_cache().active_threads(): # Skip NR Threads unless explicitly requested. if (thread_category == "AGENT") and (not include_nr_threads): diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index 1f47cac3e..fe5cb9f9e 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -419,7 +419,7 @@ def add(self, sample, priority=None): # pylint: disable=E0202 self.num_seen += 1 if priority is None: - priority = random.random() # nosec + priority = random.random() # noqa: S311 entry = (priority, self.num_seen, sample) if self.num_seen == self.capacity: @@ -437,7 +437,7 @@ def merge(self, other_data_set, priority=None): if priority is None: priority = -1 - for original_priority, seen_at, sample in other_data_set.pq: + for original_priority, seen_at, sample in other_data_set.pq: # noqa: B007 self.add(sample, max(priority, original_priority)) # Merge the num_seen from the other_data_set, but take care not to @@ -681,6 +681,7 @@ def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_er warnings.warn( ("The record_exception function is deprecated. Please use the new api named notice_error instead."), DeprecationWarning, + stacklevel=2, ) self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) @@ -1280,7 +1281,7 @@ def record_log_event(self, message, level=None, timestamp=None, attributes=None, if priority is None: # Base priority for log events outside transactions is below those inside transactions - priority = random.random() - 1 # nosec + priority = random.random() - 1 # noqa: S311 self._log_events.add(event, priority=priority) diff --git a/newrelic/hooks/adapter_gunicorn.py b/newrelic/hooks/adapter_gunicorn.py index c3840ba87..a8a55f0f4 100644 --- a/newrelic/hooks/adapter_gunicorn.py +++ b/newrelic/hooks/adapter_gunicorn.py @@ -40,7 +40,7 @@ def _nr_wrapper_Application_wsgi_(application): if is_coroutine(application): return application - elif hasattr(application, "__call__") and is_coroutine(application.__call__): + elif hasattr(application, "__call__") and is_coroutine(application.__call__): # noqa: B004 return application else: return WSGIApplicationWrapper(application) diff --git a/newrelic/hooks/database_aiomysql.py b/newrelic/hooks/database_aiomysql.py index 6d366995a..1f0ced3ab 100644 --- a/newrelic/hooks/database_aiomysql.py +++ b/newrelic/hooks/database_aiomysql.py @@ -57,7 +57,9 @@ def __await__(self): except StopIteration as e: # Catch the StopIteration and wrap the return value. cursor = e.value - wrapped_cursor = self.__cursor_wrapper__(cursor, self._nr_dbapi2_module, self._nr_connect_params, self._nr_cursor_args) + wrapped_cursor = self.__cursor_wrapper__( + cursor, self._nr_dbapi2_module, self._nr_connect_params, self._nr_cursor_args + ) return wrapped_cursor # Return here instead of raising StopIteration to properly follow generator protocol diff --git a/newrelic/hooks/datastore_aioredis.py b/newrelic/hooks/datastore_aioredis.py index 7954062b4..660a9ee50 100644 --- a/newrelic/hooks/datastore_aioredis.py +++ b/newrelic/hooks/datastore_aioredis.py @@ -199,7 +199,7 @@ def wrap_RedisConnection_execute(wrapped, instance, args, kwargs): # pragma: no def instrument_aioredis_client(module): # StrictRedis is just an alias of Redis, no need to wrap it as well. if hasattr(module, "Redis"): - class_ = getattr(module, "Redis") + class_ = module.Redis for operation in _redis_client_methods: if hasattr(class_, operation): _wrap_AioRedis_method_wrapper(module, "Redis", operation) diff --git a/newrelic/hooks/datastore_redis.py b/newrelic/hooks/datastore_redis.py index 9b849e1af..ec97d05b3 100644 --- a/newrelic/hooks/datastore_redis.py +++ b/newrelic/hooks/datastore_redis.py @@ -664,7 +664,7 @@ def instrument_redis_client(module): def instrument_asyncio_redis_client(module): if hasattr(module, "Redis"): - class_ = getattr(module, "Redis") + class_ = module.Redis for operation in _redis_client_async_methods: if hasattr(class_, operation): _wrap_asyncio_Redis_method_wrapper(module, "Redis", operation) diff --git a/newrelic/hooks/datastore_valkey.py b/newrelic/hooks/datastore_valkey.py index c90e53ab3..2bc76f569 100644 --- a/newrelic/hooks/datastore_valkey.py +++ b/newrelic/hooks/datastore_valkey.py @@ -664,7 +664,7 @@ def instrument_valkey_client(module): def instrument_asyncio_valkey_client(module): if hasattr(module, "Valkey"): - class_ = getattr(module, "Valkey") + class_ = module.Valkey for operation in _valkey_client_async_methods: if hasattr(class_, operation): _wrap_asyncio_Valkey_method_wrapper(module, "Valkey", operation) diff --git a/newrelic/hooks/external_aiobotocore.py b/newrelic/hooks/external_aiobotocore.py index af861f9a3..abed73271 100644 --- a/newrelic/hooks/external_aiobotocore.py +++ b/newrelic/hooks/external_aiobotocore.py @@ -13,20 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import traceback import sys -from aiobotocore.response import StreamingBody +import traceback from io import BytesIO +from aiobotocore.response import StreamingBody + from newrelic.api.external_trace import ExternalTrace from newrelic.common.object_wrapper import wrap_function_wrapper from newrelic.hooks.external_botocore import ( + EMBEDDING_STREAMING_UNSUPPORTED_LOG_MESSAGE, + RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, AsyncEventStreamWrapper, handle_bedrock_exception, - run_bedrock_response_extractor, run_bedrock_request_extractor, - EMBEDDING_STREAMING_UNSUPPORTED_LOG_MESSAGE, - RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, + run_bedrock_response_extractor, ) _logger = logging.getLogger(__name__) @@ -160,7 +161,7 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction) except Exception: - _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) return response diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 52dbbaf58..01e10a234 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import inspect +import json import logging import re import sys @@ -30,11 +30,7 @@ from newrelic.api.time_trace import current_trace, get_trace_linking_metadata from newrelic.api.transaction import current_transaction from newrelic.common.async_wrapper import async_wrapper as get_async_wrapper -from newrelic.common.object_wrapper import ( - ObjectProxy, - function_wrapper, - wrap_function_wrapper, -) +from newrelic.common.object_wrapper import ObjectProxy, function_wrapper, wrap_function_wrapper from newrelic.common.package_version_utils import get_package_version from newrelic.common.signature import bind_args from newrelic.core.config import global_settings @@ -561,7 +557,7 @@ def handle_bedrock_exception( except json.decoder.JSONDecodeError: pass except Exception: - _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) error_attributes = bedrock_error_attributes(exc, bedrock_attrs) notice_error_attributes = { @@ -588,7 +584,7 @@ def handle_bedrock_exception( else: handle_chat_completion_event(transaction, error_attributes) except Exception: - _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) raise @@ -598,7 +594,7 @@ def run_bedrock_response_extractor(response_extractor, response_body, bedrock_at try: response_extractor(response_body, bedrock_attrs) except Exception: - _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) if is_embedding: handle_embedding_event(transaction, bedrock_attrs) @@ -612,7 +608,7 @@ def run_bedrock_request_extractor(request_extractor, request_body, bedrock_attrs except json.decoder.JSONDecodeError: pass except Exception: - _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) def wrap_bedrock_runtime_invoke_model(response_streaming=False): @@ -644,7 +640,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): is_embedding = "embed" in model # Determine extractor by model type - for extractor_name, request_extractor, response_extractor, stream_extractor in MODEL_EXTRACTORS: + for extractor_name, request_extractor, response_extractor, stream_extractor in MODEL_EXTRACTORS: # noqa: B007 if model.startswith(extractor_name): break else: @@ -739,7 +735,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction) except Exception: - _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) return response @@ -830,7 +826,7 @@ def record_stream_chunk(self, return_val, transaction): if _type == "content_block_stop": record_events_on_stop_iteration(self, transaction) except Exception: - _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) def record_events_on_stop_iteration(self, transaction): @@ -846,7 +842,7 @@ def record_events_on_stop_iteration(self, transaction): bedrock_attrs["duration"] = self._nr_ft.duration * 1000 handle_chat_completion_event(transaction, bedrock_attrs) except Exception: - _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) # Clear cached data as this can be very large. self._nr_bedrock_attrs.clear() @@ -882,7 +878,7 @@ def record_error(self, transaction, exc): # Clear cached data as this can be very large. error_attributes.clear() except Exception: - _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE % traceback.format_exception(*sys.exc_info())) + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) def handle_embedding_event(transaction, bedrock_attrs): @@ -898,7 +894,7 @@ def handle_embedding_event(transaction, bedrock_attrs): trace_id = bedrock_attrs.get("trace_id", None) request_id = bedrock_attrs.get("request_id", None) model = bedrock_attrs.get("model", None) - input = bedrock_attrs.get("input") + input_ = bedrock_attrs.get("input") embedding_dict = { "vendor": "bedrock", @@ -907,7 +903,7 @@ def handle_embedding_event(transaction, bedrock_attrs): "span_id": span_id, "trace_id": trace_id, "token_count": ( - settings.ai_monitoring.llm_token_count_callback(model, input) + settings.ai_monitoring.llm_token_count_callback(model, input_) if settings.ai_monitoring.llm_token_count_callback else None ), @@ -920,7 +916,7 @@ def handle_embedding_event(transaction, bedrock_attrs): embedding_dict.update(llm_metadata_dict) if settings.ai_monitoring.record_content.enabled: - embedding_dict["input"] = input + embedding_dict["input"] = input_ embedding_dict = {k: v for k, v in embedding_dict.items() if v is not None} transaction.record_custom_event("LlmEmbedding", embedding_dict) @@ -1101,12 +1097,15 @@ def _nr_dynamodb_datastore_trace_wrapper_(wrapped, instance, args, kwargs): def aws_function_trace( operation, destination_name=None, - params={}, + params=None, terminal=False, async_wrapper=None, extract_agent_attrs=None, library=None, ): + if params is None: + params = {} + @function_wrapper def _nr_aws_function_trace_wrapper_(wrapped, instance, args, kwargs): wrapper = async_wrapper if async_wrapper is not None else get_async_wrapper(wrapped) @@ -1146,12 +1145,15 @@ def aws_message_trace( operation, destination_type, destination_name, - params={}, + params=None, terminal=True, async_wrapper=None, extract_agent_attrs=None, library=None, ): + if params is None: + params = {} + @function_wrapper def _nr_aws_message_trace_wrapper_(wrapped, instance, args, kwargs): wrapper = async_wrapper if async_wrapper is not None else get_async_wrapper(wrapped) diff --git a/newrelic/hooks/framework_aiohttp.py b/newrelic/hooks/framework_aiohttp.py index 9d45a9e28..69bb3b54b 100644 --- a/newrelic/hooks/framework_aiohttp.py +++ b/newrelic/hooks/framework_aiohttp.py @@ -113,7 +113,7 @@ def _nr_aiohttp_wrap_view_(wrapped, instance, args, kwargs): if inspect.isclass(instance._handler): try: - init = getattr(instance._handler, "__init__") + init = instance._handler.__init__ except AttributeError: def init(*args, **kwargs): diff --git a/newrelic/hooks/framework_bottle.py b/newrelic/hooks/framework_bottle.py index 0bb4c6760..355c745db 100644 --- a/newrelic/hooks/framework_bottle.py +++ b/newrelic/hooks/framework_bottle.py @@ -140,12 +140,12 @@ def instrument_bottle(module): global module_bottle module_bottle = module - framework_details = ("Bottle", getattr(module, "__version__")) + framework_details = ("Bottle", getattr(module, "__version__", None)) # version >= 0.9 if hasattr(module.Bottle, "wsgi"): # pragma: no cover wrap_wsgi_application(module, "Bottle.wsgi", framework=framework_details) # version < 0.9 - elif hasattr(module.Bottle, "__call__"): # pragma: no cover + elif hasattr(module.Bottle, "__call__"): # noqa: B004 # pragma: no cover wrap_wsgi_application(module, "Bottle.__call__", framework=framework_details) # version >= 0.10 if hasattr(module, "Route") and hasattr(module.Route, "_make_callback"): # pragma: no cover diff --git a/newrelic/hooks/framework_django.py b/newrelic/hooks/framework_django.py index 1c5b29035..dd4f7e42f 100644 --- a/newrelic/hooks/framework_django.py +++ b/newrelic/hooks/framework_django.py @@ -184,15 +184,16 @@ def newrelic_browser_timing_header(): from django.utils.safestring import mark_safe transaction = current_transaction() - return transaction and mark_safe(transaction.browser_timing_header()) or "" # nosec + return transaction and mark_safe(transaction.browser_timing_header()) or "" # noqa: S308 def newrelic_browser_timing_footer(): warnings.warn( "The newrelic_browser_timing_footer function is deprecated. Please migrate to only using the newrelic_browser_timing_header API instead.", DeprecationWarning, + stacklevel=2, ) - return "" # nosec + return "" # Addition of instrumentation for middleware. Can only do this diff --git a/newrelic/hooks/framework_pyramid.py b/newrelic/hooks/framework_pyramid.py index 4a743ff32..4a95ce956 100644 --- a/newrelic/hooks/framework_pyramid.py +++ b/newrelic/hooks/framework_pyramid.py @@ -157,7 +157,7 @@ def _wrapper(context, request): tracer.name = name tracer.add_code_level_metrics(handler) else: - method = getattr(inst, "__call__") + method = inst.__call__ if method: name = callable_name(method) transaction.set_transaction_name(name, priority=2) diff --git a/newrelic/hooks/framework_sanic.py b/newrelic/hooks/framework_sanic.py index 4941751c2..7b6543e5e 100644 --- a/newrelic/hooks/framework_sanic.py +++ b/newrelic/hooks/framework_sanic.py @@ -144,13 +144,13 @@ def error_response(wrapped, instance, args, kwargs): def _sanic_app_init(wrapped, instance, args, kwargs): result = wrapped(*args, **kwargs) - error_handler = getattr(instance, "error_handler") + error_handler = instance.error_handler if hasattr(error_handler, "response"): instance.error_handler.response = error_response(error_handler.response) if hasattr(error_handler, "add"): error_handler.add = _nr_sanic_error_handlers(error_handler.add) - router = getattr(instance, "router") + router = instance.router if hasattr(router, "add"): router.add = _nr_sanic_router_add(router.add) if hasattr(router, "get"): diff --git a/newrelic/hooks/framework_tornado.py b/newrelic/hooks/framework_tornado.py index 751d51bfa..245565fd7 100644 --- a/newrelic/hooks/framework_tornado.py +++ b/newrelic/hooks/framework_tornado.py @@ -240,8 +240,6 @@ def _prepare_request(request, raise_error=True, **kwargs): def create_client_wrapper(wrapped, trace): - values = {"wrapper": None, "wrapped": wrapped, "trace": trace, "functools": functools} - wrapper = textwrap.dedent(""" @functools.wraps(wrapped) async def wrapper(req, raise_error): with trace: @@ -249,15 +247,14 @@ async def wrapper(req, raise_error): try: response = await wrapped(req, raise_error=raise_error) except Exception as e: - response = getattr(e, 'response', None) + response = getattr(e, "response", None) raise finally: if response: trace.process_response_headers(response.headers.get_all()) return response - """) - exec(wrapper, values) - return values["wrapper"] + + return wrapper def wrap_httpclient_fetch(wrapped, instance, args, kwargs): diff --git a/newrelic/hooks/logger_loguru.py b/newrelic/hooks/logger_loguru.py index 71466ebbb..1457131c1 100644 --- a/newrelic/hooks/logger_loguru.py +++ b/newrelic/hooks/logger_loguru.py @@ -78,7 +78,8 @@ def wrap_log(wrapped, instance, args, kwargs): try: bound_args = bind_args(wrapped, args, kwargs) options = bound_args["options"] = list(bound_args["options"]) - assert len(options) in ALLOWED_LOGURU_OPTIONS_LENGTHS # Assert the options signature we expect + if len(options) not in ALLOWED_LOGURU_OPTIONS_LENGTHS: # Assert the options signature we expect + raise RuntimeError("Unexpected number of options in loguru call. Instrumentation may be out of date.") options[-2] = nr_log_patcher(options[-2]) # Loguru looks into the stack trace to find the caller's module and function names. diff --git a/newrelic/hooks/messagebroker_confluentkafka.py b/newrelic/hooks/messagebroker_confluentkafka.py index a98f57282..312f30498 100644 --- a/newrelic/hooks/messagebroker_confluentkafka.py +++ b/newrelic/hooks/messagebroker_confluentkafka.py @@ -72,7 +72,7 @@ def wrap_Producer_produce(wrapped, instance, args, kwargs): dt_headers.update(dict(headers)) try: - return wrapped(topic, headers=dt_headers, *args, **kwargs) + return wrapped(topic, *args, **kwargs, headers=dt_headers) except Exception as error: # Unwrap kafka errors while hasattr(error, "exception"): diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py index 0209d82cd..f4f338bfc 100644 --- a/newrelic/hooks/mlmodel_langchain.py +++ b/newrelic/hooks/mlmodel_langchain.py @@ -265,8 +265,8 @@ def _record_vector_search_success(transaction, linking_metadata, ft, search_id, for index, doc in enumerate(response): sequence = index - page_content = getattr(doc, "page_content") - metadata = getattr(doc, "metadata") or {} + page_content = doc.page_content + metadata = doc.metadata or {} metadata_dict = {f"metadata.{key}": value for key, value in metadata.items()} @@ -839,16 +839,16 @@ def _capture_chain_run_id(transaction, run_manager, completion_id): def instrument_langchain_runnables_chains_base(module): - if hasattr(getattr(module, "RunnableSequence"), "invoke"): + if hasattr(module.RunnableSequence, "invoke"): wrap_function_wrapper(module, "RunnableSequence.invoke", wrap_chain_sync_run) - if hasattr(getattr(module, "RunnableSequence"), "ainvoke"): + if hasattr(module.RunnableSequence, "ainvoke"): wrap_function_wrapper(module, "RunnableSequence.ainvoke", wrap_chain_async_run) def instrument_langchain_chains_base(module): - if hasattr(getattr(module, "Chain"), "invoke"): + if hasattr(module.Chain, "invoke"): wrap_function_wrapper(module, "Chain.invoke", wrap_chain_sync_run) - if hasattr(getattr(module, "Chain"), "ainvoke"): + if hasattr(module.Chain, "ainvoke"): wrap_function_wrapper(module, "Chain.ainvoke", wrap_chain_async_run) @@ -870,20 +870,20 @@ def _instrument_class(module, vector_class): def instrument_langchain_core_tools(module): - if hasattr(getattr(module, "BaseTool"), "run"): + if hasattr(module.BaseTool, "run"): wrap_function_wrapper(module, "BaseTool.run", wrap_tool_sync_run) - if hasattr(getattr(module, "BaseTool"), "arun"): + if hasattr(module.BaseTool, "arun"): wrap_function_wrapper(module, "BaseTool.arun", wrap_tool_async_run) def instrument_langchain_callbacks_manager(module): - if hasattr(getattr(module, "CallbackManager"), "on_tool_start"): + if hasattr(module.CallbackManager, "on_tool_start"): wrap_function_wrapper(module, "CallbackManager.on_tool_start", wrap_on_tool_start_sync) - if hasattr(getattr(module, "AsyncCallbackManager"), "on_tool_start"): + if hasattr(module.AsyncCallbackManager, "on_tool_start"): wrap_function_wrapper(module, "AsyncCallbackManager.on_tool_start", wrap_on_tool_start_async) - if hasattr(getattr(module, "CallbackManager"), "on_chain_start"): + if hasattr(module.CallbackManager, "on_chain_start"): wrap_function_wrapper(module, "CallbackManager.on_chain_start", wrap_on_chain_start) - if hasattr(getattr(module, "AsyncCallbackManager"), "on_chain_start"): + if hasattr(module.AsyncCallbackManager, "on_chain_start"): wrap_function_wrapper(module, "AsyncCallbackManager.on_chain_start", wrap_async_on_chain_start) diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index 14fa25a22..8c32a575f 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -446,8 +446,8 @@ def _handle_completion_success(transaction, linking_metadata, completion_id, kwa try: # The function trace will be exited when in the final iteration of the response # generator. - setattr(return_val, "_nr_ft", ft) - setattr(return_val, "_nr_openai_attrs", getattr(return_val, "_nr_openai_attrs", {})) + return_val._nr_ft = ft + return_val._nr_openai_attrs = getattr(return_val, "_nr_openai_attrs", {}) return_val._nr_openai_attrs["messages"] = kwargs.get("messages", []) return_val._nr_openai_attrs["temperature"] = kwargs.get("temperature") return_val._nr_openai_attrs["max_tokens"] = kwargs.get("max_tokens") @@ -676,7 +676,7 @@ def wrap_convert_to_openai_object(wrapped, instance, args, kwargs): if isinstance(returned_response, openai.openai_object.OpenAIObject) and isinstance( resp, openai.openai_response.OpenAIResponse ): - setattr(returned_response, "_nr_response_headers", getattr(resp, "_headers", {})) + returned_response._nr_response_headers = getattr(resp, "_headers", {}) return returned_response @@ -940,7 +940,7 @@ def instrument_openai_api_resources_embedding(module): wrap_function_wrapper(module, "Embedding.acreate", wrap_embedding_async) # This is to mark where we instrument so the SDK knows not to instrument them # again. - setattr(module.Embedding, "_nr_wrapped", True) + module.Embedding._nr_wrapped = True def instrument_openai_api_resources_chat_completion(module): @@ -951,7 +951,7 @@ def instrument_openai_api_resources_chat_completion(module): wrap_function_wrapper(module, "ChatCompletion.acreate", wrap_chat_completion_async) # This is to mark where we instrument so the SDK knows not to instrument them # again. - setattr(module.ChatCompletion, "_nr_wrapped", True) + module.ChatCompletion._nr_wrapped = True def instrument_openai_resources_chat_completions(module): @@ -976,7 +976,7 @@ def instrument_openai_util(module): wrap_function_wrapper(module, "convert_to_openai_object", wrap_convert_to_openai_object) # This is to mark where we instrument so the SDK knows not to instrument them # again. - setattr(module.convert_to_openai_object, "_nr_wrapped", True) + module.convert_to_openai_object._nr_wrapped = True def instrument_openai_base_client(module): diff --git a/newrelic/hooks/mlmodel_sklearn.py b/newrelic/hooks/mlmodel_sklearn.py index 437832ff5..ed76448ff 100644 --- a/newrelic/hooks/mlmodel_sklearn.py +++ b/newrelic/hooks/mlmodel_sklearn.py @@ -89,7 +89,7 @@ def _nr_wrapper_method(wrapped, instance, args, kwargs): # If this is the fit method, increment the training_step counter. if method in ("fit", "fit_predict"): training_step = getattr(instance, "_nr_wrapped_training_step", -1) - setattr(instance, "_nr_wrapped_training_step", training_step + 1) + instance._nr_wrapped_training_step = training_step + 1 # If this is the predict method, wrap the return type in an nr type with # _nr_wrapped attrs that will attach model info to the data. diff --git a/pyproject.toml b/pyproject.toml index 7ba49b11a..c87f95fa3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,6 +2,7 @@ output-format = "grouped" line-length = 120 target-version = "py37" +force-exclude = true # Fixes issue with megalinter config preventing exclusion of files extend-exclude = [ "newrelic/packages/", "setup.py", @@ -26,10 +27,10 @@ select = [ # "UP", # pyupgrade "YTT", # flake8-2020 "ASYNC", # flake8-async - # "S", # flake8-bandit + "S", # flake8-bandit # "BLE", # flake8-blind-except # "FBT", # flake8-boolean-trap - # "B", # flake8-bugbear + "B", # flake8-bugbear "A", # flake8-builtins "COM", # flake8-commas # "C4", # flake8-comprehensions @@ -88,6 +89,7 @@ ignore = [ # Temporarily disabled rules "RUF100", # unused-noqa (TODO: remove this once all linters are enabled) "PERF203", # try-except-in-loop (most of these are unavoidable) + "S110", # try-except-pass (Bandit wants us to log the exception, which is usually pointless. Spot check these later) # Permanently disabled rules "D203", # incorrect-blank-line-before-class "D213", # multi-line-summary-second-line @@ -111,11 +113,16 @@ ignore = [ [tool.ruff.lint.per-file-ignores] "tests/*" = [ # Disabled rules in tests + "S", # flake8-bandit (security checks are not necessary in tests) "F401", # unused-import "F811", # redefined-while-unused (pytest fixtures trigger this) "PLR2004", # magic-value-comparison (comparing to constant values) - "S101", # assert (acceptable in tests) "ASYNC251", # blocking-sleep-in-async-function (acceptable in tests) + "B904", # raise-without-from-inside-except (not necessary in tests) +] +"newrelic/admin/*" = [ + # Disabled rules in admin scripts + "S108", # flake8-bandit (hardcoded log files are never used as input) ] # Alternate linters and formatters diff --git a/tests/adapter_gevent/test_pywsgi.py b/tests/adapter_gevent/test_pywsgi.py index 1e911eb6b..f435756c1 100644 --- a/tests/adapter_gevent/test_pywsgi.py +++ b/tests/adapter_gevent/test_pywsgi.py @@ -14,13 +14,13 @@ def test_pywsgi_application_index(target_application): - for i in range(3): + for _ in range(3): response = target_application.get("/pywsgi") response.mustcontain("WSGI RESPONSE") def test_pywsgi_request_timeout_application(target_application): - for i in range(3): + for _ in range(3): response = target_application.get("/request-timeout-application/pywsgi", status=500) @@ -35,7 +35,7 @@ def test_pywsgi_request_timeout_response(target_application): # as expected. It may all be done to some issue with how coroutines # are schedule when done within the one process. - for i in range(3): + for _ in range(3): response = target_application.get("/request-timeout-response/pywsgi", status=500) # response.mustcontain('WSGI') @@ -45,21 +45,21 @@ def test_pywsgi_request_timeout_finalize(target_application): # where the results isn't what we expect when things run in the same # process. - for i in range(3): + for _ in range(3): response = target_application.get("/request-timeout-finalize/pywsgi", status=500) # response.mustcontain('WSGI RESPONSE') def test_pywsgi_raise_exception_application(target_application): - for i in range(3): + for _ in range(3): response = target_application.get("/raise-exception-application/pywsgi", status=500) def test_pywsgi_raise_exception_response(target_application): - for i in range(3): + for _ in range(3): response = target_application.get("/raise-exception-response/pywsgi", status=500) def test_pywsgi_raise_exception_finalize(target_application): - for i in range(3): + for _ in range(3): response = target_application.get("/raise-exception-finalize/pywsgi", status=500) diff --git a/tests/adapter_gunicorn/test_aiohttp_app_factory.py b/tests/adapter_gunicorn/test_aiohttp_app_factory.py index 0479adfce..39e3f84de 100644 --- a/tests/adapter_gunicorn/test_aiohttp_app_factory.py +++ b/tests/adapter_gunicorn/test_aiohttp_app_factory.py @@ -84,4 +84,4 @@ def test_aiohttp_app_factory(nr_enabled): # test passed break else: - assert False, "Gunicorn test did not run" + raise AssertionError("Gunicorn test did not run") diff --git a/tests/adapter_gunicorn/test_asgi_app.py b/tests/adapter_gunicorn/test_asgi_app.py index 324f711ea..a2fa332c6 100644 --- a/tests/adapter_gunicorn/test_asgi_app.py +++ b/tests/adapter_gunicorn/test_asgi_app.py @@ -74,4 +74,4 @@ def test_asgi_app(nr_enabled): # test passed break else: - assert False, "Gunicorn test did not run" + raise AssertionError("Gunicorn test did not run") diff --git a/tests/adapter_gunicorn/test_gaiohttp.py b/tests/adapter_gunicorn/test_gaiohttp.py index 33b5e3592..d6a624478 100644 --- a/tests/adapter_gunicorn/test_gaiohttp.py +++ b/tests/adapter_gunicorn/test_gaiohttp.py @@ -75,4 +75,4 @@ def test_gunicorn_gaiohttp_worker(nr_enabled): # test passed break else: - assert False, "Gunicorn test did not run" + raise AssertionError("Gunicorn test did not run") diff --git a/tests/agent_features/test_asgi_browser.py b/tests/agent_features/test_asgi_browser.py index ce007cab4..cf5ac7053 100644 --- a/tests/agent_features/test_asgi_browser.py +++ b/tests/agent_features/test_asgi_browser.py @@ -66,7 +66,7 @@ def test_header_attributes(): assert settings.beacon assert settings.error_beacon - token = "0123456789ABCDEF" # nosec + token = "0123456789ABCDEF" headers = {"Cookie": f"NRAGENT=tk={token}"} response = target_application_manual_rum.get("/", headers=headers) diff --git a/tests/agent_features/test_asgi_distributed_tracing.py b/tests/agent_features/test_asgi_distributed_tracing.py index 85371eb89..c7ff8acb5 100644 --- a/tests/agent_features/test_asgi_distributed_tracing.py +++ b/tests/agent_features/test_asgi_distributed_tracing.py @@ -184,7 +184,7 @@ def _test(): if gen_error: try: - 1 / 0 + 1 / 0 # noqa: B018 except ZeroDivisionError: transaction.notice_error() diff --git a/tests/agent_features/test_asgi_w3c_trace_context.py b/tests/agent_features/test_asgi_w3c_trace_context.py index 5000ef7dc..a3bd9a003 100644 --- a/tests/agent_features/test_asgi_w3c_trace_context.py +++ b/tests/agent_features/test_asgi_w3c_trace_context.py @@ -94,11 +94,11 @@ def _test(): response = _test() - for header_name, header_value in response.headers.items(): + for header_name, header_value in response.headers.items(): # noqa: B007 if header_name == "tracestate": break else: - assert False, "tracestate header not propagated" + raise AssertionError("tracestate header not propagated") header_value = header_value.split(",", 1)[0] key, value = header_value.split("=", 2) @@ -136,11 +136,11 @@ def _test(): def test_tracestate_propagation(inbound_tracestate, expected): headers = {"traceparent": INBOUND_TRACEPARENT, "tracestate": inbound_tracestate} response = test_asgi_application.make_request("GET", "/", headers=headers) - for header_name, header_value in response.headers.items(): + for header_name, header_value in response.headers.items(): # noqa: B007 if header_name == "tracestate": break else: - assert False, "tracestate header not propagated" + raise AssertionError("tracestate header not propagated") assert not header_value.endswith(",") if inbound_tracestate: @@ -162,11 +162,11 @@ def _test(): return test_asgi_application.make_request("GET", "/", headers=headers) response = _test() - for header_name, header_value in response.headers.items(): + for header_name, header_value in response.headers.items(): # noqa: B007 if header_name == "traceparent": break else: - assert False, "traceparent header not present" + raise AssertionError("traceparent header not present") assert len(header_value) == 55 assert header_value.startswith("00-") diff --git a/tests/agent_features/test_browser.py b/tests/agent_features/test_browser.py index 71f6e7dc8..92f0d74f7 100644 --- a/tests/agent_features/test_browser.py +++ b/tests/agent_features/test_browser.py @@ -66,7 +66,7 @@ def test_header_attributes(): assert settings.beacon assert settings.error_beacon - token = "0123456789ABCDEF" # nosec + token = "0123456789ABCDEF" headers = {"Cookie": f"NRAGENT=tk={token}"} response = target_application_manual_rum.get("/", headers=headers) diff --git a/tests/agent_features/test_browser_middleware.py b/tests/agent_features/test_browser_middleware.py index 66589f455..d1be90743 100644 --- a/tests/agent_features/test_browser_middleware.py +++ b/tests/agent_features/test_browser_middleware.py @@ -63,7 +63,7 @@ def _app_list_exc_1(environ, start_response): status = "200 OK" response_headers = [("Content-type", "text/plain")] start_response(status, response_headers) - 1 / 0 + 1 / 0 # noqa: B018 return [PAGE_CONTENTS] @@ -74,7 +74,7 @@ def _app_list_exc_1(environ, start_response): def _app_list_exc_2(environ, start_response): status = "200 OK" response_headers = [("Content-type", "text/plain")] - 1 / 0 + 1 / 0 # noqa: B018 start_response(status, response_headers) return [PAGE_CONTENTS] @@ -87,7 +87,7 @@ def _app_iter_exc_1(environ, start_response): status = "200 OK" response_headers = [("Content-type", "text/plain")] start_response(status, response_headers) - 1 / 0 + 1 / 0 # noqa: B018 yield PAGE_CONTENTS @@ -98,7 +98,7 @@ def _app_iter_exc_1(environ, start_response): def _app_iter_exc_2(environ, start_response): status = "200 OK" response_headers = [("Content-type", "text/plain")] - 1 / 0 + 1 / 0 # noqa: B018 start_response(status, response_headers) yield PAGE_CONTENTS diff --git a/tests/agent_features/test_distributed_tracing.py b/tests/agent_features/test_distributed_tracing.py index 6a943d992..cbf336eed 100644 --- a/tests/agent_features/test_distributed_tracing.py +++ b/tests/agent_features/test_distributed_tracing.py @@ -264,7 +264,7 @@ def _test(): if gen_error: try: - 1 / 0 + 1 / 0 # noqa: B018 except ZeroDivisionError: transaction.notice_error() diff --git a/tests/agent_features/test_supportability_metrics.py b/tests/agent_features/test_supportability_metrics.py index 6d4ed1120..1d5234fd4 100644 --- a/tests/agent_features/test_supportability_metrics.py +++ b/tests/agent_features/test_supportability_metrics.py @@ -62,7 +62,7 @@ def test_uses_api_twice(): @newrelic.agent.background_task() def test_notice_error(): try: - 1 / 0 + 1 / 0 # noqa: B018 except ZeroDivisionError: newrelic.agent.notice_error(sys.exc_info()) diff --git a/tests/agent_features/test_synthetics.py b/tests/agent_features/test_synthetics.py index 0bcb4b126..f584a1223 100644 --- a/tests/agent_features/test_synthetics.py +++ b/tests/agent_features/test_synthetics.py @@ -266,7 +266,7 @@ def _synthetics_limit_test(num_requests, num_events, num_transactions): # Send requests headers = _make_synthetics_headers() - for i in range(num_requests): + for _ in range(num_requests): response = target_application.get("/", headers=headers) # Check that we've saved the right number events and traces diff --git a/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py b/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py index 99caa2612..93cd1cefe 100644 --- a/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py +++ b/tests/agent_features/test_transaction_event_data_and_some_browser_stuff_too.py @@ -265,7 +265,7 @@ def test_capture_attributes_disabled(): @transient_function_wrapper("newrelic.core.stats_engine", "SampledDataSet.add") def validate_no_analytics_sample_data(wrapped, instance, args, kwargs): - assert False, "Should not be recording analytic event." + raise AssertionError("Should not be recording analytic event.") return wrapped(*args, **kwargs) diff --git a/tests/agent_features/test_w3c_trace_context.py b/tests/agent_features/test_w3c_trace_context.py index c5e78a02a..2cd22a48d 100644 --- a/tests/agent_features/test_w3c_trace_context.py +++ b/tests/agent_features/test_w3c_trace_context.py @@ -85,11 +85,11 @@ def _test(): return test_application.get("/", headers=headers) response = _test() - for header_name, header_value in response.json: + for header_name, header_value in response.json: # noqa: B007 if header_name == "tracestate": break else: - assert False, "tracestate header not propagated" + raise AssertionError("tracestate header not propagated") header_value = header_value.split(",", 1)[0] key, value = header_value.split("=", 2) @@ -127,11 +127,11 @@ def _test(): def test_tracestate_propagation(inbound_tracestate, expected): headers = {"traceparent": INBOUND_TRACEPARENT, "tracestate": inbound_tracestate} response = test_application.get("/", headers=headers) - for header_name, header_value in response.json: + for header_name, header_value in response.json: # noqa: B007 if header_name == "tracestate": break else: - assert False, "tracestate header not propagated" + raise AssertionError("tracestate header not propagated") assert not header_value.endswith(",") if inbound_tracestate: @@ -153,11 +153,11 @@ def _test(): return test_application.get("/", headers=headers) response = _test() - for header_name, header_value in response.json: + for header_name, header_value in response.json: # noqa: B007 if header_name == "traceparent": break else: - assert False, "traceparent header not present" + raise AssertionError("traceparent header not present") assert len(header_value) == 55 assert header_value.startswith("00-") diff --git a/tests/agent_unittests/test_agent_protocol.py b/tests/agent_unittests/test_agent_protocol.py index cc168ed9e..867026fe0 100644 --- a/tests/agent_unittests/test_agent_protocol.py +++ b/tests/agent_unittests/test_agent_protocol.py @@ -111,7 +111,7 @@ def detect(cls): elif name.startswith("Kubernetes"): output = KUBERNETES else: - assert False, "Unknown utilization class" + raise AssertionError("Unknown utilization class") if output is Exception: raise Exception diff --git a/tests/agent_unittests/test_environment.py b/tests/agent_unittests/test_environment.py index 2a3d51719..7705a5c8b 100644 --- a/tests/agent_unittests/test_environment.py +++ b/tests/agent_unittests/test_environment.py @@ -38,7 +38,7 @@ def test_plugin_list(): for name, version, _ in plugins(): if name == "newrelic.hooks.newrelic": - assert False, "Bogus plugin found" + raise AssertionError("Bogus plugin found") if name == "pytest": # Check that plugin that should get reported has version info. assert version == pytest.__version__ diff --git a/tests/agent_unittests/test_harvest_loop.py b/tests/agent_unittests/test_harvest_loop.py index f3e71d24e..dca02db20 100644 --- a/tests/agent_unittests/test_harvest_loop.py +++ b/tests/agent_unittests/test_harvest_loop.py @@ -168,7 +168,11 @@ def transaction_node(request): return node -def validate_metric_payload(metrics=[], endpoints_called=[]): +def validate_metric_payload(metrics=None, endpoints_called=None): + if endpoints_called is None: + endpoints_called = [] + if metrics is None: + metrics = [] sent_metrics = {} @transient_function_wrapper("newrelic.core.agent_protocol", "AgentProtocol.send") @@ -236,7 +240,10 @@ def _bind_params(method, payload=(), *args, **kwargs): return _wrapper -def validate_error_event_sampling(events_seen, reservoir_size, endpoints_called=[]): +def validate_error_event_sampling(events_seen, reservoir_size, endpoints_called=None): + if endpoints_called is None: + endpoints_called = [] + @transient_function_wrapper("newrelic.core.data_collector", "AgentProtocol.send") def send_request_wrapper(wrapped, instance, args, kwargs): def _bind_params(method, payload=(), *args, **kwargs): diff --git a/tests/agent_unittests/test_http_client.py b/tests/agent_unittests/test_http_client.py index 4a752f690..2c64d08e2 100644 --- a/tests/agent_unittests/test_http_client.py +++ b/tests/agent_unittests/test_http_client.py @@ -307,7 +307,7 @@ def test_http_payload_compression(server, client_cls, method, threshold): content_encoding = content_encoding.strip() break else: - assert False, "Missing content-encoding header" + raise AssertionError("Missing content-encoding header") assert content_encoding == expected_content_encoding assert sent_payload == payload diff --git a/tests/agent_unittests/test_package_version_utils.py b/tests/agent_unittests/test_package_version_utils.py index f83294fd4..32934923b 100644 --- a/tests/agent_unittests/test_package_version_utils.py +++ b/tests/agent_unittests/test_package_version_utils.py @@ -124,7 +124,7 @@ def test_pkg_resources_metadata(): def _getattr_deprecation_warning(attr): if attr == "__version__": - warnings.warn("Testing deprecation warnings.", DeprecationWarning) + warnings.warn("Testing deprecation warnings.", DeprecationWarning, stacklevel=2) return "3.2.1" else: raise NotImplementedError diff --git a/tests/agent_unittests/test_trace_cache.py b/tests/agent_unittests/test_trace_cache.py index 4a53d2a6e..9c2d0ebbc 100644 --- a/tests/agent_unittests/test_trace_cache.py +++ b/tests/agent_unittests/test_trace_cache.py @@ -60,11 +60,11 @@ def _iterate_trace_cache(shutdown): while True: if shutdown.is_set(): return - for k, v in trace_cache.items(): + for _, _ in trace_cache.items(): pass - for v in trace_cache.values(): + for _ in trace_cache.values(): pass - for v in trace_cache.keys(): + for _ in trace_cache.keys(): pass return _iterate_trace_cache diff --git a/tests/agent_unittests/test_utilization_settings.py b/tests/agent_unittests/test_utilization_settings.py index 634830bf0..0e5fbdacf 100644 --- a/tests/agent_unittests/test_utilization_settings.py +++ b/tests/agent_unittests/test_utilization_settings.py @@ -17,6 +17,7 @@ from importlib import reload import pytest +from testing_support.fixtures import Environ # these will be reloaded for each test import newrelic.config @@ -65,25 +66,9 @@ "NEW_RELIC_HEROKU_DYNO_NAME_PREFIXES_TO_SHORTEN": "meow wruff", } -INITIAL_ENV = os.environ # Tests for loading settings and testing for values precedence - -class Environ: - def __init__(self, env_dict): - self.env_dict = {} - for key in env_dict.keys(): - self.env_dict[key] = str(env_dict[key]) - - def __enter__(self): - os.environ.update(self.env_dict) - - def __exit__(self, *args, **kwargs): - os.environ.clear() - os.environ = INITIAL_ENV - - def reset_agent_config(ini_contents, env_dict): @function_wrapper def reset(wrapped, instance, args, kwargs): diff --git a/tests/component_flask_rest/test_application.py b/tests/component_flask_rest/test_application.py index be846d80a..b29e74a04 100644 --- a/tests/component_flask_rest/test_application.py +++ b/tests/component_flask_rest/test_application.py @@ -31,7 +31,7 @@ def application(request): elif request.param == "flask_restx": import flask_restx as module else: - assert False + raise AssertionError if "propagate_exceptions" in request.fixturenames: propagate_exceptions = request.getfixturevalue("propagate_exceptions") diff --git a/tests/component_tastypie/api.py b/tests/component_tastypie/api.py index ce2101f09..6ebe72e02 100644 --- a/tests/component_tastypie/api.py +++ b/tests/component_tastypie/api.py @@ -28,6 +28,6 @@ def obj_get(self, *args, **kwargs): elif pk == "ObjectDoesNotExist": raise ObjectDoesNotExist("Object does not exist.") elif pk == "ZeroDivisionError": - 1 / 0 + 1 / 0 # noqa: B018 else: raise NotImplemented diff --git a/tests/cross_agent/test_cat_map.py b/tests/cross_agent/test_cat_map.py index afe1926bf..e52e40320 100644 --- a/tests/cross_agent/test_cat_map.py +++ b/tests/cross_agent/test_cat_map.py @@ -113,7 +113,7 @@ def target_wsgi_application(environ, start_response): else: if "X-NewRelic-Transaction" in generated_outbound_header: status = "500 Outbound Headers Check Failed." - r = urlopen(environ["server_url"]) # nosec B310 + r = urlopen(environ["server_url"]) r.read(10) # Set the final transaction name. diff --git a/tests/cross_agent/test_distributed_tracing.py b/tests/cross_agent/test_distributed_tracing.py index aabcec1d9..ce84594f3 100644 --- a/tests/cross_agent/test_distributed_tracing.py +++ b/tests/cross_agent/test_distributed_tracing.py @@ -114,7 +114,7 @@ def target_wsgi_application(environ, start_response): if test_settings["raises_exception"]: try: - 1 / 0 + 1 / 0 # noqa: B018 except ZeroDivisionError: txn.notice_error() diff --git a/tests/cross_agent/test_pcf_utilization_data.py b/tests/cross_agent/test_pcf_utilization_data.py index 94696c1b5..ffa18adb6 100644 --- a/tests/cross_agent/test_pcf_utilization_data.py +++ b/tests/cross_agent/test_pcf_utilization_data.py @@ -16,12 +16,12 @@ import os import pytest +from testing_support.fixtures import Environ from testing_support.validators.validate_internal_metrics import validate_internal_metrics from newrelic.common.utilization import PCFUtilization CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) -INITIAL_ENV = os.environ FIXTURE = os.path.normpath(os.path.join(CURRENT_DIR, "fixtures", "utilization_vendor_specific", "pcf.json")) _parameters_list = ["testname", "env_vars", "expected_vendors_hash", "expected_metrics"] @@ -42,27 +42,6 @@ def _parametrize_test(test): _pcf_tests = [_parametrize_test(t) for t in _load_tests()] -class Environ: - def __init__(self, env_dict): - env_dict = env_dict or {} - cleaned_env_dict = {} - for key, val in env_dict.items(): - if val is None: - continue - elif not isinstance(val, str): - cleaned_env_dict[key] = val.encode("utf-8") - else: - cleaned_env_dict[key] = val - self.env_dict = cleaned_env_dict - - def __enter__(self): - os.environ.update(self.env_dict) - - def __exit__(self, *args, **kwargs): - os.environ.clear() - os.environ = INITIAL_ENV - - class MockResponse: def __init__(self, code, body): self.code = code diff --git a/tests/datastore_aiomysql/test_sqlalchemy.py b/tests/datastore_aiomysql/test_sqlalchemy.py index aaec893f7..5583df628 100644 --- a/tests/datastore_aiomysql/test_sqlalchemy.py +++ b/tests/datastore_aiomysql/test_sqlalchemy.py @@ -13,18 +13,13 @@ # limitations under the License. from aiomysql.sa import create_engine +from sqlalchemy import Column, Float, Integer, String from sqlalchemy.orm import declarative_base -from sqlalchemy import Integer, String, Column, Float from sqlalchemy.schema import CreateTable, DropTable - from testing_support.db_settings import mysql_settings from testing_support.util import instance_hostname -from testing_support.validators.validate_database_trace_inputs import ( - validate_database_trace_inputs, -) -from testing_support.validators.validate_transaction_metrics import ( - validate_transaction_metrics, -) +from testing_support.validators.validate_database_trace_inputs import validate_database_trace_inputs +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task @@ -38,13 +33,14 @@ Base = declarative_base() + class ABCModel(Base): __tablename__ = TABLE_NAME a = Column(Integer, primary_key=True) b = Column(Float) c = Column(String(100)) - + ABCTable = ABCModel.__table__ @@ -59,9 +55,7 @@ async def exercise(engine): await conn.execute(ABCTable.insert().values(input_rows)) cursor = await conn.execute(ABCTable.select()) - rows = [] - async for row in cursor: - rows.append(row) + rows = [row async for row in cursor] assert rows == input_rows, f"Expected: {input_rows}, Got: {rows}" @@ -101,6 +95,7 @@ async def exercise(engine): (f"Datastore/instance/MySQL/{HOST}/{PORT}", 8), ] + @validate_transaction_metrics( "test_sqlalchemy:test_execute_via_engine", scoped_metrics=SCOPED_METRICS, diff --git a/tests/datastore_aioredis/test_instance_info.py b/tests/datastore_aioredis/test_instance_info.py index 1cbc783a6..ed257e6e0 100644 --- a/tests/datastore_aioredis/test_instance_info.py +++ b/tests/datastore_aioredis/test_instance_info.py @@ -117,7 +117,7 @@ def test_strict_redis_connection_from_url(client_cls, args, kwargs, expected, lo elif r.connection_pool.connection_class is aioredis.UnixDomainSocketConnection: r.connection_pool.connection_class = DisabledUnixConnection else: - assert False, r.connection_pool.connection_class + raise AssertionError(r.connection_pool.connection_class) connection = loop.run_until_complete(r.connection_pool.get_connection("SELECT")) try: diff --git a/tests/datastore_aredis/test_instance_info.py b/tests/datastore_aredis/test_instance_info.py index 432af84f2..aa432351e 100644 --- a/tests/datastore_aredis/test_instance_info.py +++ b/tests/datastore_aredis/test_instance_info.py @@ -108,7 +108,7 @@ def test_strict_redis_connection_from_url(args, kwargs, expected): elif r.connection_pool.connection_class is aredis.UnixDomainSocketConnection: r.connection_pool.connection_class = DisabledUnixConnection else: - assert False, r.connection_pool.connection_class + raise AssertionError(r.connection_pool.connection_class) connection = r.connection_pool.get_connection("SELECT") try: conn_kwargs = _conn_attrs_to_dict(connection) diff --git a/tests/datastore_asyncpg/test_query.py b/tests/datastore_asyncpg/test_query.py index af8f19174..0e7e5ef72 100644 --- a/tests/datastore_asyncpg/test_query.py +++ b/tests/datastore_asyncpg/test_query.py @@ -166,7 +166,7 @@ async def amain(): def test_cursor(event_loop, conn): async def amain(): async with conn.transaction(): - async for record in conn.cursor("SELECT generate_series(0, 0)", prefetch=1): + async for _record in conn.cursor("SELECT generate_series(0, 0)", prefetch=1): pass await conn.cursor("SELECT 0") diff --git a/tests/datastore_memcache/conftest.py b/tests/datastore_memcache/conftest.py index 06fa79cdf..458da40f5 100644 --- a/tests/datastore_memcache/conftest.py +++ b/tests/datastore_memcache/conftest.py @@ -46,9 +46,9 @@ def memcached_multi(): client_all = memcache.Client(db_servers) num_servers = len(db_servers) - for try_num in range(10 * num_servers): + for _try_num in range(10 * num_servers): multi_dict = {} - for i in range(num_servers): + for _ in range(num_servers): random_chars = (random.choice(string.ascii_uppercase) for _ in range(10)) key_candidate = "".join(random_chars) multi_dict[key_candidate] = key_candidate @@ -65,6 +65,6 @@ def memcached_multi(): if all(server_hit): break else: - assert False, "memcached_multi failed to map keys to multiple servers." + raise AssertionError("memcached_multi failed to map keys to multiple servers.") return multi_dict diff --git a/tests/datastore_mysql/test_database.py b/tests/datastore_mysql/test_database.py index 57eb54251..786a3a5ce 100644 --- a/tests/datastore_mysql/test_database.py +++ b/tests/datastore_mysql/test_database.py @@ -106,7 +106,7 @@ def test_execute_via_cursor(table_name): cursor.execute(f"""select * from {table_name}""") - for row in cursor: + for _row in cursor: pass cursor.execute( @@ -198,7 +198,7 @@ def test_connect_using_alias(table_name): cursor.execute(f"""select * from {table_name}""") - for row in cursor: + for _row in cursor: pass cursor.execute( diff --git a/tests/datastore_psycopg2cffi/test_database.py b/tests/datastore_psycopg2cffi/test_database.py index bc248553f..377cae367 100644 --- a/tests/datastore_psycopg2cffi/test_database.py +++ b/tests/datastore_psycopg2cffi/test_database.py @@ -101,7 +101,7 @@ def test_execute_via_cursor(): cursor.execute(f"""select * from {DB_SETTINGS["table_name"]}""") - for row in cursor: + for _row in cursor: pass cursor.execute(f"update {DB_SETTINGS['table_name']} set a=%s, b=%s, c=%s where a=%s", (4, 4.0, "4.0", 1)) diff --git a/tests/datastore_pymssql/test_database.py b/tests/datastore_pymssql/test_database.py index f18503162..de2c9d330 100644 --- a/tests/datastore_pymssql/test_database.py +++ b/tests/datastore_pymssql/test_database.py @@ -35,7 +35,7 @@ def execute_db_calls_with_cursor(cursor): cursor.execute(f"""select * from {TABLE_NAME}""") - for row in cursor: + for _row in cursor: pass cursor.execute(f"update {TABLE_NAME} set a=%s, b=%s, c=%s where a=%s", (4, 4.0, "4.0", 1)) diff --git a/tests/datastore_pymysql/test_database.py b/tests/datastore_pymysql/test_database.py index 7bef2e34a..6be867808 100644 --- a/tests/datastore_pymysql/test_database.py +++ b/tests/datastore_pymysql/test_database.py @@ -39,7 +39,7 @@ def execute_db_calls_with_cursor(cursor): cursor.execute(f"""select * from {TABLE_NAME}""") - for row in cursor: + for _row in cursor: pass cursor.execute(f"update {TABLE_NAME} set a=%s, b=%s, c=%s where a=%s", (4, 4.0, "4.0", 1)) diff --git a/tests/datastore_pyodbc/test_pyodbc.py b/tests/datastore_pyodbc/test_pyodbc.py index 899717ce8..f391c5166 100644 --- a/tests/datastore_pyodbc/test_pyodbc.py +++ b/tests/datastore_pyodbc/test_pyodbc.py @@ -57,7 +57,7 @@ def test_execute_via_cursor(pyodbc_driver): [(1, 1.0, "1.0"), (2, 2.2, "2.2"), (3, 3.3, "3.3")], ) cursor.execute(f"""select * from {DB_SETTINGS["table_name"]}""") - for row in cursor: + for _row in cursor: pass cursor.execute(f"update {DB_SETTINGS['table_name']} set a=?, b=?, c=? where a=?", (4, 4.0, "4.0", 1)) cursor.execute(f"""delete from {DB_SETTINGS["table_name"]} where a=2""") diff --git a/tests/datastore_redis/test_instance_info.py b/tests/datastore_redis/test_instance_info.py index 211e96169..e841072ac 100644 --- a/tests/datastore_redis/test_instance_info.py +++ b/tests/datastore_redis/test_instance_info.py @@ -144,7 +144,7 @@ def test_redis_connection_from_url(args, kwargs, expected): elif r.connection_pool.connection_class is redis.SSLConnection: r.connection_pool.connection_class = DisabledSSLConnection else: - assert False, r.connection_pool.connection_class + raise AssertionError(r.connection_pool.connection_class) connection = r.connection_pool.get_connection("SELECT") try: conn_kwargs = _conn_attrs_to_dict(connection) @@ -164,7 +164,7 @@ def test_strict_redis_connection_from_url(args, kwargs, expected): elif r.connection_pool.connection_class is redis.SSLConnection: r.connection_pool.connection_class = DisabledSSLConnection else: - assert False, r.connection_pool.connection_class + raise AssertionError(r.connection_pool.connection_class) connection = r.connection_pool.get_connection("SELECT") try: conn_kwargs = _conn_attrs_to_dict(connection) diff --git a/tests/datastore_redis/test_uninstrumented_methods.py b/tests/datastore_redis/test_uninstrumented_methods.py index 8c95bdedc..283bf4e21 100644 --- a/tests/datastore_redis/test_uninstrumented_methods.py +++ b/tests/datastore_redis/test_uninstrumented_methods.py @@ -105,7 +105,7 @@ def test_uninstrumented_methods(client): if hasattr(client, module): module_client = getattr(client, module)() module_methods = {m for m in dir(module_client) if not m[0] == "_"} - is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") + is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") # noqa: B023 uninstrumented |= {m for m in module_methods - IGNORED_METHODS if not is_wrapped(m)} assert not uninstrumented, f"Uninstrumented methods: {sorted(uninstrumented)}" diff --git a/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py b/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py index 7414d3ecc..afa4677cf 100644 --- a/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py +++ b/tests/datastore_rediscluster/test_uninstrumented_rediscluster_methods.py @@ -152,7 +152,7 @@ def test_uninstrumented_methods(): if hasattr(client, module): module_client = getattr(client, module)() module_methods = {m for m in dir(module_client) if not m[0] == "_"} - is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") + is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") # noqa: B023 uninstrumented |= {m for m in module_methods - IGNORED_METHODS if not is_wrapped(m)} assert not uninstrumented, f"Uninstrumented methods: {sorted(uninstrumented)}" diff --git a/tests/datastore_solrpy/test_solr.py b/tests/datastore_solrpy/test_solr.py index d5a3b7230..61244391f 100644 --- a/tests/datastore_solrpy/test_solr.py +++ b/tests/datastore_solrpy/test_solr.py @@ -32,7 +32,7 @@ def _exercise_solr(solr): solr.add_many([{"id": x} for x in documents]) solr.commit() - solr.query(f"id:{documents[0]}").results + solr.query(f"id:{documents[0]}").results # noqa: B018 solr.delete(f"id:*_{DB_SETTINGS['namespace']}") solr.commit() diff --git a/tests/datastore_valkey/test_instance_info.py b/tests/datastore_valkey/test_instance_info.py index 7ce1b2204..01f90f819 100644 --- a/tests/datastore_valkey/test_instance_info.py +++ b/tests/datastore_valkey/test_instance_info.py @@ -131,7 +131,7 @@ def test_valkey_connection_from_url(args, kwargs, expected): elif r.connection_pool.connection_class is valkey.SSLConnection: r.connection_pool.connection_class = DisabledSSLConnection else: - assert False, r.connection_pool.connection_class + raise AssertionError(r.connection_pool.connection_class) connection = r.connection_pool.get_connection("SELECT") try: conn_kwargs = _conn_attrs_to_dict(connection) @@ -150,7 +150,7 @@ def test_strict_valkey_connection_from_url(args, kwargs, expected): elif r.connection_pool.connection_class is valkey.SSLConnection: r.connection_pool.connection_class = DisabledSSLConnection else: - assert False, r.connection_pool.connection_class + raise AssertionError(r.connection_pool.connection_class) connection = r.connection_pool.get_connection("SELECT") try: conn_kwargs = _conn_attrs_to_dict(connection) diff --git a/tests/datastore_valkey/test_uninstrumented_methods.py b/tests/datastore_valkey/test_uninstrumented_methods.py index d6166a68c..5e7316adf 100644 --- a/tests/datastore_valkey/test_uninstrumented_methods.py +++ b/tests/datastore_valkey/test_uninstrumented_methods.py @@ -108,7 +108,7 @@ def test_uninstrumented_methods(client): if hasattr(client, module): module_client = getattr(client, module)() module_methods = {m for m in dir(module_client) if not m[0] == "_"} - is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") + is_wrapped = lambda m: hasattr(getattr(module_client, m), "__wrapped__") # noqa: B023 uninstrumented |= {m for m in module_methods - IGNORED_METHODS if not is_wrapped(m)} assert not uninstrumented, f"Uninstrumented methods: {sorted(uninstrumented)}" diff --git a/tests/external_aiobotocore/conftest.py b/tests/external_aiobotocore/conftest.py index 672b9766f..b7fddfc4d 100644 --- a/tests/external_aiobotocore/conftest.py +++ b/tests/external_aiobotocore/conftest.py @@ -17,23 +17,20 @@ import socket import threading -import pytest import moto.server +import pytest import werkzeug.serving +from external_botocore._mock_external_bedrock_server import MockExternalBedrockServer from testing_support.fixture.event_loop import event_loop as loop from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture -from newrelic.common.package_version_utils import ( - get_package_version, - get_package_version_tuple, -) -from external_botocore._mock_external_bedrock_server import MockExternalBedrockServer +from newrelic.common.package_version_utils import get_package_version, get_package_version_tuple BOTOCORE_VERSION = get_package_version("botocore") PORT = 4443 AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" HOST = "127.0.0.1" diff --git a/tests/external_aiobotocore/test_aiobotocore_s3.py b/tests/external_aiobotocore/test_aiobotocore_s3.py index aa45f6622..fbc7e5c47 100644 --- a/tests/external_aiobotocore/test_aiobotocore_s3.py +++ b/tests/external_aiobotocore/test_aiobotocore_s3.py @@ -65,7 +65,7 @@ async def _test(): async with MotoService("s3", port=PORT): session = aiobotocore.session.get_session() - async with session.create_client( # nosec + async with session.create_client( "s3", region_name="us-east-1", endpoint_url=f"http://localhost:{PORT}", diff --git a/tests/external_aiobotocore/test_bedrock_chat_completion.py b/tests/external_aiobotocore/test_bedrock_chat_completion.py index cddbbae06..dc1904914 100644 --- a/tests/external_aiobotocore/test_bedrock_chat_completion.py +++ b/tests/external_aiobotocore/test_bedrock_chat_completion.py @@ -19,6 +19,7 @@ import botocore.eventstream import botocore.exceptions import pytest +from conftest import BOTOCORE_VERSION # pylint: disable=E0611 from external_botocore._test_bedrock_chat_completion import ( chat_completion_expected_events, chat_completion_expected_malformed_request_body_events, @@ -31,12 +32,7 @@ chat_completion_payload_templates, chat_completion_streaming_expected_events, ) -from conftest import BOTOCORE_VERSION # pylint: disable=E0611 -from testing_support.fixtures import ( - override_llm_token_callback_settings, - reset_core_stats_engine, - validate_attributes, -) +from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( # noqa: F401 add_token_count_to_events, disabled_ai_monitoring_record_content_settings, @@ -50,12 +46,8 @@ ) from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events -from testing_support.validators.validate_error_trace_attributes import ( - validate_error_trace_attributes, -) -from testing_support.validators.validate_transaction_metrics import ( - validate_transaction_metrics, -) +from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes diff --git a/tests/external_aiobotocore/test_bedrock_embeddings.py b/tests/external_aiobotocore/test_bedrock_embeddings.py index e61cd34e5..fd566f2ca 100644 --- a/tests/external_aiobotocore/test_bedrock_embeddings.py +++ b/tests/external_aiobotocore/test_bedrock_embeddings.py @@ -17,6 +17,7 @@ import botocore.exceptions import pytest +from conftest import BOTOCORE_VERSION # pylint: disable=E0611 from external_botocore._test_bedrock_embeddings import ( embedding_expected_events, embedding_expected_malformed_request_body_events, @@ -24,12 +25,7 @@ embedding_invalid_access_key_error_events, embedding_payload_templates, ) -from conftest import BOTOCORE_VERSION # pylint: disable=E0611 -from testing_support.fixtures import ( - override_llm_token_callback_settings, - reset_core_stats_engine, - validate_attributes, -) +from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes from testing_support.ml_testing_utils import ( # noqa: F401 add_token_count_to_events, disabled_ai_monitoring_record_content_settings, @@ -41,12 +37,8 @@ ) from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events -from testing_support.validators.validate_error_trace_attributes import ( - validate_error_trace_attributes, -) -from testing_support.validators.validate_transaction_metrics import ( - validate_transaction_metrics, -) +from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task from newrelic.api.transaction import add_custom_attribute diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py index 806054a26..f190e323a 100644 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -6743,7 +6743,7 @@ class MockExternalBedrockServer(MockExternalHTTPServer): # calls. def __init__(self, handler=simple_get, port=None, *args, **kwargs): - super(MockExternalBedrockServer, self).__init__(handler=handler, port=port, *args, **kwargs) + super(MockExternalBedrockServer, self).__init__(handler=handler, port=port, *args, **kwargs) # noqa: B026 if __name__ == "__main__": diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index 2df833461..97d81d39f 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -72,7 +72,7 @@ def bedrock_server(): if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): # Use mocked Bedrock backend and prerecorded responses with MockExternalBedrockServer() as server: - client = boto3.client( # nosec + client = boto3.client( "bedrock-runtime", "us-east-1", endpoint_url=f"http://localhost:{server.port}", diff --git a/tests/external_botocore/test_boto3_firehose.py b/tests/external_botocore/test_boto3_firehose.py index f20975d6c..f7e59f536 100644 --- a/tests/external_botocore/test_boto3_firehose.py +++ b/tests/external_botocore/test_boto3_firehose.py @@ -41,7 +41,7 @@ } AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION = "us-east-1" _firehose_scoped_metrics = [ diff --git a/tests/external_botocore/test_boto3_iam.py b/tests/external_botocore/test_boto3_iam.py index a4bf26425..b611f0efa 100644 --- a/tests/external_botocore/test_boto3_iam.py +++ b/tests/external_botocore/test_boto3_iam.py @@ -27,7 +27,7 @@ MOTO_VERSION = get_package_version_tuple("moto") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" TEST_USER = f"python-agent-test-{uuid.uuid4()}" diff --git a/tests/external_botocore/test_boto3_kinesis.py b/tests/external_botocore/test_boto3_kinesis.py index 5ac1fb14f..bacfb4cb6 100644 --- a/tests/external_botocore/test_boto3_kinesis.py +++ b/tests/external_botocore/test_boto3_kinesis.py @@ -31,7 +31,7 @@ BOTOCORE_VERSION = get_package_version_tuple("boto3") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION = "us-east-1" AWS_ACCOUNT_ID = 123456789012 diff --git a/tests/external_botocore/test_boto3_lambda.py b/tests/external_botocore/test_boto3_lambda.py index ee5278b97..bd39da28b 100644 --- a/tests/external_botocore/test_boto3_lambda.py +++ b/tests/external_botocore/test_boto3_lambda.py @@ -30,7 +30,7 @@ BOTOCORE_VERSION = get_package_version_tuple("botocore") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION_NAME = "us-west-2" LAMBDA_URL = "lambda.us-west-2.amazonaws.com" diff --git a/tests/external_botocore/test_boto3_s3.py b/tests/external_botocore/test_boto3_s3.py index 90aafa2a8..d8b0b02b1 100644 --- a/tests/external_botocore/test_boto3_s3.py +++ b/tests/external_botocore/test_boto3_s3.py @@ -28,7 +28,7 @@ BOTOCORE_VERSION = get_package_version_tuple("botocore") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION_NAME = "us-west-2" TEST_BUCKET = f"python-agent-test-{uuid.uuid4()}" diff --git a/tests/external_botocore/test_boto3_sns.py b/tests/external_botocore/test_boto3_sns.py index ba83e7988..453289c61 100644 --- a/tests/external_botocore/test_boto3_sns.py +++ b/tests/external_botocore/test_boto3_sns.py @@ -25,7 +25,7 @@ MOTO_VERSION = get_package_version_tuple("moto") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION_NAME = "us-east-1" SNS_URL = "sns-us-east-1.amazonaws.com" TOPIC = "arn:aws:sns:us-east-1:123456789012:some-topic" diff --git a/tests/external_botocore/test_botocore_dynamodb.py b/tests/external_botocore/test_botocore_dynamodb.py index cdb70911d..2820edb5a 100644 --- a/tests/external_botocore/test_botocore_dynamodb.py +++ b/tests/external_botocore/test_botocore_dynamodb.py @@ -27,7 +27,7 @@ MOTO_VERSION = get_package_version_tuple("moto") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION = "us-east-1" TEST_TABLE = f"python-agent-test-{uuid.uuid4()}" diff --git a/tests/external_botocore/test_botocore_ec2.py b/tests/external_botocore/test_botocore_ec2.py index 68ca90032..a29c1fb29 100644 --- a/tests/external_botocore/test_botocore_ec2.py +++ b/tests/external_botocore/test_botocore_ec2.py @@ -26,7 +26,7 @@ MOTO_VERSION = get_package_version_tuple("moto") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec (This is fine for testing purposes) +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION = "us-east-1" UBUNTU_14_04_PARAVIRTUAL_AMI = "ami-c65be9ae" diff --git a/tests/external_botocore/test_botocore_s3.py b/tests/external_botocore/test_botocore_s3.py index 6b6edb8ff..b676e8c92 100644 --- a/tests/external_botocore/test_botocore_s3.py +++ b/tests/external_botocore/test_botocore_s3.py @@ -28,7 +28,7 @@ BOTOCORE_VERSION = get_package_version_tuple("botocore") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION = "us-east-1" TEST_BUCKET = f"python-agent-test-{uuid.uuid4()}" diff --git a/tests/external_botocore/test_botocore_sqs.py b/tests/external_botocore/test_botocore_sqs.py index 2fb89efb5..9575f9fa2 100644 --- a/tests/external_botocore/test_botocore_sqs.py +++ b/tests/external_botocore/test_botocore_sqs.py @@ -63,7 +63,7 @@ EXPECTED_SEND_MESSAGE_BATCH_AGENT_ATTRS = {"exact_agents": {"aws.operation": "SendMessageBatch"}} AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION = "us-east-1" TEST_QUEUE = f"python-agent-test-{uuid.uuid4()}" diff --git a/tests/external_botocore/test_s3transfer.py b/tests/external_botocore/test_s3transfer.py index a6aebcf4f..bd77101c1 100644 --- a/tests/external_botocore/test_s3transfer.py +++ b/tests/external_botocore/test_s3transfer.py @@ -29,7 +29,7 @@ BOTOCORE_VERSION = get_package_version_tuple("botocore") AWS_ACCESS_KEY_ID = "AAAAAAAAAAAACCESSKEY" -AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" # nosec +AWS_SECRET_ACCESS_KEY = "AAAAAASECRETKEY" AWS_REGION_NAME = "us-west-2" TEST_BUCKET = f"python-agent-test-{uuid.uuid4()}" diff --git a/tests/external_httplib/test_httplib.py b/tests/external_httplib/test_httplib.py index 722d818d3..64b7c94f7 100644 --- a/tests/external_httplib/test_httplib.py +++ b/tests/external_httplib/test_httplib.py @@ -70,7 +70,7 @@ def test_httplib_https_request(server): @background_task(name="test_httplib:test_httplib_https_request") def _test(): # fix HTTPSConnection: https://wiki.openstack.org/wiki/OSSN/OSSN-0033 - connection = httplib.HTTPSConnection("localhost", server.port) # nosec + connection = httplib.HTTPSConnection("localhost", server.port) # It doesn't matter that a SSL exception is raised here because the # agent still records this as an external request try: diff --git a/tests/external_requests/test_requests.py b/tests/external_requests/test_requests.py index 892d7667e..bb6769145 100644 --- a/tests/external_requests/test_requests.py +++ b/tests/external_requests/test_requests.py @@ -75,7 +75,7 @@ def test_https_request_get(server, metrics): @background_task(name="test_requests:test_https_request_get") def _test(): try: - requests.get(f"https://localhost:{server.port}/", verify=False) # nosec + requests.get(f"https://localhost:{server.port}/", verify=False) except Exception: pass diff --git a/tests/framework_aiohttp/_target_application.py b/tests/framework_aiohttp/_target_application.py index 610b9cc0c..96279e58f 100644 --- a/tests/framework_aiohttp/_target_application.py +++ b/tests/framework_aiohttp/_target_application.py @@ -93,8 +93,8 @@ async def _respond(self): await asyncio.sleep(0) except KnownException: pass - finally: - return web.Response(text="Hello Aiohttp!") + + return web.Response(text="Hello Aiohttp!") get = _respond post = _respond diff --git a/tests/framework_aiohttp/test_server.py b/tests/framework_aiohttp/test_server.py index 544ecb454..1f27d2081 100644 --- a/tests/framework_aiohttp/test_server.py +++ b/tests/framework_aiohttp/test_server.py @@ -198,7 +198,7 @@ async def fetch(): await asyncio.sleep(0) return - assert False, "Request did not time out" + raise AssertionError("Request did not time out") transactions = [] @@ -217,7 +217,7 @@ def test_work_after_request_not_recorded(aiohttp_app): async def timeout(): await asyncio.sleep(1) aiohttp_app.loop.stop() - assert False + raise AssertionError task = aiohttp_app.loop.create_task(timeout()) aiohttp_app.loop.run_forever() diff --git a/tests/framework_grpc/_test_common.py b/tests/framework_grpc/_test_common.py index a71bb2f50..bf2cbdaf9 100644 --- a/tests/framework_grpc/_test_common.py +++ b/tests/framework_grpc/_test_common.py @@ -22,7 +22,7 @@ def create_request(streaming_request, count=1, timesout=False): from sample_application import Message def _message_stream(): - for i in range(count): + for _ in range(count): yield Message(text="Hello World", count=count, timesout=timesout) if streaming_request: diff --git a/tests/framework_grpc/sample_application/__init__.py b/tests/framework_grpc/sample_application/__init__.py index e9e397681..388222ae1 100644 --- a/tests/framework_grpc/sample_application/__init__.py +++ b/tests/framework_grpc/sample_application/__init__.py @@ -52,7 +52,7 @@ def DoUnaryStream(self, request, context): if request.timesout: while context.is_active(): time.sleep(0.1) - for i in range(request.count): + for _ in range(request.count): yield Message(text=f"unary_stream: {request.text}") def DoStreamUnary(self, request_iter, context): @@ -162,5 +162,5 @@ def DtNoTxnStreamUnary(self, request_iter, context): def DtNoTxnStreamStream(self, request_iter, context): current_transaction().ignore_transaction = True - for request in request_iter: + for _request in request_iter: yield self.extract_dt_value(context.invocation_metadata()) diff --git a/tests/framework_grpc/test_clients.py b/tests/framework_grpc/test_clients.py index f2006f9f1..dcf18ac61 100644 --- a/tests/framework_grpc/test_clients.py +++ b/tests/framework_grpc/test_clients.py @@ -269,7 +269,7 @@ def _test_future_cancel(): request = create_request(streaming_request, count=3, timesout=False) reply = service_method_method(request) - for result in reply: + for _result in reply: reply.cancel() break diff --git a/tests/framework_grpc/test_server.py b/tests/framework_grpc/test_server.py index 65f119ad7..311cedc65 100644 --- a/tests/framework_grpc/test_server.py +++ b/tests/framework_grpc/test_server.py @@ -151,7 +151,7 @@ def test_no_exception_client_close(mock_grpc_server): with channel: request = create_request(False, timesout=True) - method = getattr(stub, "DoUnaryUnary") + method = stub.DoUnaryUnary @validate_transaction_errors(errors=[]) @wait_for_transaction_completion @@ -170,7 +170,7 @@ def test_newrelic_disabled_no_transaction(mock_grpc_server, stub): port = mock_grpc_server request = create_request(False) - method = getattr(stub, "DoUnaryUnary") + method = stub.DoUnaryUnary @override_generic_settings(global_settings(), {"enabled": False}) @function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") diff --git a/tests/framework_sanic/_target_application.py b/tests/framework_sanic/_target_application.py index ead7a6513..0c057d568 100644 --- a/tests/framework_sanic/_target_application.py +++ b/tests/framework_sanic/_target_application.py @@ -127,7 +127,7 @@ async def not_found(request): @app.route("/zero") async def zero_division_error(request): - 1 / 0 + 1 / 0 # noqa: B018 @app.middleware("request") @@ -169,7 +169,7 @@ async def streaming(request): # Fake websocket endpoint to enable websockets on the server @app.websocket("/socket") async def socket(request, ws): - assert False + raise AssertionError @app.route("/custom-header//") diff --git a/tests/framework_sanic/test_cross_application.py b/tests/framework_sanic/test_cross_application.py index c1879f1d3..a32716641 100644 --- a/tests/framework_sanic/test_cross_application.py +++ b/tests/framework_sanic/test_cross_application.py @@ -64,7 +64,7 @@ def test_inbound_distributed_trace(app): assert response.status == 200 -ENCODING_KEY = "".join(random.choice(string.ascii_lowercase) for _ in range(40)) # nosec +ENCODING_KEY = "".join(random.choice(string.ascii_lowercase) for _ in range(40)) _cat_response_header_urls_to_test = ( ("/", "_target_application:index"), ("/streaming", "_target_application:streaming"), diff --git a/tests/messagebroker_kafkapython/test_serialization.py b/tests/messagebroker_kafkapython/test_serialization.py index 11f676363..3561fd4b4 100644 --- a/tests/messagebroker_kafkapython/test_serialization.py +++ b/tests/messagebroker_kafkapython/test_serialization.py @@ -79,7 +79,7 @@ def test(): attempts = 0 record = None while not record and attempts < timeout: - for record in consumer: + for _record in consumer: pass attempts += 1 diff --git a/tests/messagebroker_pika/test_pika_async_connection_consume.py b/tests/messagebroker_pika/test_pika_async_connection_consume.py index e35831417..478efd653 100644 --- a/tests/messagebroker_pika/test_pika_async_connection_consume.py +++ b/tests/messagebroker_pika/test_pika_async_connection_consume.py @@ -215,7 +215,7 @@ def test_async_connection_basic_get_empty(ConnectionClass, callback_as_partial): QUEUE = "test_async_empty" def on_message(channel, method_frame, header_frame, body): - assert False, body.decode("UTF-8") + raise AssertionError(body.decode("UTF-8")) if callback_as_partial: on_message = functools.partial(on_message) diff --git a/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py b/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py index 12e4aab11..465c1758a 100644 --- a/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py +++ b/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py @@ -48,7 +48,7 @@ def test_blocking_connection_consume_break(producer): with pika.BlockingConnection(pika.ConnectionParameters(DB_SETTINGS["host"])) as connection: channel = connection.channel() - for method_frame, properties, body in channel.consume(QUEUE): + for method_frame, _properties, body in channel.consume(QUEUE): assert hasattr(method_frame, "_nr_start_time") assert body == BODY break @@ -67,7 +67,7 @@ def test_blocking_connection_consume_connection_close(producer): channel = connection.channel() try: - for method_frame, properties, body in channel.consume(QUEUE): + for method_frame, _properties, body in channel.consume(QUEUE): assert hasattr(method_frame, "_nr_start_time") assert body == BODY channel.close() @@ -117,15 +117,15 @@ def test_blocking_connection_consume_exception_in_for_loop(producer): try: # We should still create the metric in this case even if there is # an exception - for result in channel.consume(QUEUE): - 1 / 0 + for _result in channel.consume(QUEUE): + 1 / 0 # noqa: B018 except ZeroDivisionError: # Expected error pass except Exception as e: - assert False, f"Wrong exception was raised: {e}" + raise AssertionError(f"Wrong exception was raised: {e}") else: - assert False, "No exception was raised!" + raise AssertionError("No exception was raised!") _test_blocking_connection_consume_empty_metrics = [ @@ -149,15 +149,15 @@ def test_blocking_connection_consume_exception_in_generator(): try: # Since the pytest fixture is not used, the QUEUE will not exist - for result in channel.consume(QUEUE): + for _result in channel.consume(QUEUE): pass except pika.exceptions.ChannelClosed: # Expected error pass except Exception as e: - assert False, f"Wrong exception was raised: {e}" + raise AssertionError(f"Wrong exception was raised: {e}") else: - assert False, "No exception was raised!" + raise AssertionError("No exception was raised!") _test_blocking_connection_consume_many_metrics = [ @@ -216,7 +216,7 @@ def test_blocking_connection_consume_using_methods(producer): pass else: # this is not - assert False, "No exception was raised!" + raise AssertionError("No exception was raised!") result = consumer.close() assert result is None @@ -236,7 +236,7 @@ def test_blocking_connection_consume_outside_txn(producer): consumer = channel.consume(QUEUE) try: - for method_frame, properties, body in consumer: + for method_frame, _properties, body in consumer: assert hasattr(method_frame, "_nr_start_time") assert body == BODY break @@ -268,7 +268,7 @@ def consume_it(consumer, up_next=None): consumer = channel.consume(QUEUE) up_next = None - for i in range(6): + for _ in range(6): try: up_next = consume_it(consumer, up_next=up_next) except StopIteration: @@ -305,7 +305,7 @@ def test_blocking_connection_consume_using_methods_outside_txn(producer): pass else: # this is not - assert False, "No exception was raised!" + raise AssertionError("No exception was raised!") result = consumer.close() assert result is None @@ -329,4 +329,4 @@ def test_blocking_connection_consume_exception_on_creation(): pass else: # this is not - assert False, "TypeError was not raised" + raise AssertionError("TypeError was not raised") diff --git a/tests/mlmodel_langchain/_mock_external_openai_server.py b/tests/mlmodel_langchain/_mock_external_openai_server.py index 4b16b2d10..f56269b46 100644 --- a/tests/mlmodel_langchain/_mock_external_openai_server.py +++ b/tests/mlmodel_langchain/_mock_external_openai_server.py @@ -501,7 +501,7 @@ class _MockExternalOpenAIServer(MockExternalHTTPServer): # calls. def __init__(self, handler=simple_get, port=None, *args, **kwargs): - super(_MockExternalOpenAIServer, self).__init__(handler=handler, port=port, *args, **kwargs) + super(_MockExternalOpenAIServer, self).__init__(handler=handler, port=port, *args, **kwargs) # noqa: B026 return _MockExternalOpenAIServer diff --git a/tests/mlmodel_langchain/conftest.py b/tests/mlmodel_langchain/conftest.py index 6224511a4..9c72b0992 100644 --- a/tests/mlmodel_langchain/conftest.py +++ b/tests/mlmodel_langchain/conftest.py @@ -139,7 +139,7 @@ def _wrap_httpx_client_send(wrapped, instance, args, kwargs): if response.status_code >= 400 or response.status_code < 200: prompt = "error" - rheaders = getattr(response, "headers") + rheaders = response.headers headers = dict( filter( diff --git a/tests/mlmodel_langchain/test_vectorstore.py b/tests/mlmodel_langchain/test_vectorstore.py index 54690d3bb..11b78e5a3 100644 --- a/tests/mlmodel_langchain/test_vectorstore.py +++ b/tests/mlmodel_langchain/test_vectorstore.py @@ -116,9 +116,9 @@ def test_vectorstore_modules_instrumented(): # also be found, so separate logic is not necessary to check this. continue - if not hasattr(getattr(class_, "similarity_search"), "__wrapped__"): + if not hasattr(class_.similarity_search, "__wrapped__"): uninstrumented_sync_classes.append(class_name) - if not hasattr(getattr(class_, "asimilarity_search"), "__wrapped__"): + if not hasattr(class_.asimilarity_search, "__wrapped__"): uninstrumented_async_classes.append(class_name) assert not uninstrumented_sync_classes, f"Uninstrumented sync classes found: {str(uninstrumented_sync_classes)}" diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index 93f70cab1..f33c65fa6 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -726,7 +726,7 @@ class _MockExternalOpenAIServer(MockExternalHTTPServer): # calls. def __init__(self, handler=simple_get, port=None, *args, **kwargs): - super(_MockExternalOpenAIServer, self).__init__(handler=handler, port=port, *args, **kwargs) + super(_MockExternalOpenAIServer, self).__init__(handler=handler, port=port, *args, **kwargs) # noqa: B026 return _MockExternalOpenAIServer diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index c9410ec5f..6d0e27332 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -188,7 +188,7 @@ def _wrap_httpx_client_send(wrapped, instance, args, kwargs): if response.status_code >= 500 or response.status_code < 200: prompt = "error" - rheaders = getattr(response, "headers") + rheaders = response.headers headers = dict( filter( diff --git a/tests/mlmodel_sklearn/test_inference_events.py b/tests/mlmodel_sklearn/test_inference_events.py index e169562e7..d1fc0762b 100644 --- a/tests/mlmodel_sklearn/test_inference_events.py +++ b/tests/mlmodel_sklearn/test_inference_events.py @@ -47,7 +47,7 @@ def test_pandas_df_categorical_feature_event(): def _test(): import sklearn.tree - clf = getattr(sklearn.tree, "DecisionTreeClassifier")(random_state=0) + clf = sklearn.tree.DecisionTreeClassifier(random_state=0) model = clf.fit( pd.DataFrame({"col1": [27.0, 24.0], "col2": [23.0, 25.0]}, dtype="category"), pd.DataFrame({"label": [27.0, 28.0]}), @@ -92,7 +92,7 @@ def _test(): y_train = pd.DataFrame({"label": [True, False]}, dtype=dtype_name) x_test = pd.DataFrame({"col1": [True], "col2": [True]}, dtype=dtype_name) - clf = getattr(sklearn.tree, "DecisionTreeClassifier")(random_state=0) + clf = sklearn.tree.DecisionTreeClassifier(random_state=0) model = clf.fit(x_train, y_train) labels = model.predict(x_test) @@ -130,7 +130,7 @@ def _test(): y_train = pd.DataFrame({"label": [345.6, 456.7]}, dtype="float64") x_test = pd.DataFrame({"col1": [100.0], "col2": [300.0]}, dtype="float64") - clf = getattr(sklearn.tree, "DecisionTreeRegressor")(random_state=0) + clf = sklearn.tree.DecisionTreeRegressor(random_state=0) model = clf.fit(x_train, y_train) labels = model.predict(x_test) @@ -169,7 +169,7 @@ def _test(): y_train = [0, 1] x_test = [[1, 2]] - clf = getattr(sklearn.tree, "ExtraTreeRegressor")(random_state=0) + clf = sklearn.tree.ExtraTreeRegressor(random_state=0) model = clf.fit(x_train, y_train) labels = model.predict(x_test) @@ -207,7 +207,7 @@ def _test(): y_train = np.array([10, 11], dtype="int") x_test = np.array([[12, 13]], dtype="int") - clf = getattr(sklearn.tree, "ExtraTreeRegressor")(random_state=0) + clf = sklearn.tree.ExtraTreeRegressor(random_state=0) model = clf.fit(x_train, y_train) labels = model.predict(x_test) @@ -257,7 +257,7 @@ def _test(): x_train = np.array([[20, 20], [21, 21]], dtype="