From bd67c219ad0da9dca3a8bbd02a9921d63c776ae5 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Mon, 30 Dec 2024 14:50:58 +0100 Subject: [PATCH 1/8] botocore: add extension for bedrock runtime api --- .../botocore/extensions/__init__.py | 1 + .../botocore/extensions/bedrock.py | 384 ++++++++++++++++++ 2 files changed, 385 insertions(+) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py index 85a4904022..c4624ababd 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/__init__.py @@ -32,6 +32,7 @@ def loader(): _KNOWN_EXTENSIONS = { + "bedrock-runtime": _lazy_load(".bedrock", "_BedrockRuntimeExtension"), "dynamodb": _lazy_load(".dynamodb", "_DynamoDbExtension"), "lambda": _lazy_load(".lmbd", "_LambdaExtension"), "sns": _lazy_load(".sns", "_SnsExtension"), diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py new file mode 100644 index 0000000000..098572fb08 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -0,0 +1,384 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Includes work from: +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import io +import json +import logging +import math +from typing import Any + +from botocore.response import StreamingBody + +from opentelemetry.instrumentation.botocore.extensions.types import ( + _AttributeMapT, + _AwsSdkExtension, +) +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( + GEN_AI_OPERATION_NAME, + GEN_AI_REQUEST_MAX_TOKENS, + GEN_AI_REQUEST_MODEL, + GEN_AI_REQUEST_TEMPERATURE, + GEN_AI_REQUEST_TOP_P, + GEN_AI_RESPONSE_FINISH_REASONS, + GEN_AI_SYSTEM, + GEN_AI_USAGE_INPUT_TOKENS, + GEN_AI_USAGE_OUTPUT_TOKENS, + GenAiOperationNameValues, + GenAiSystemValues, +) +from opentelemetry.trace.span import Span + +_logger = logging.getLogger(__name__) + +_MODEL_ID_KEY: str = "modelId" + + +class _BedrockRuntimeExtension(_AwsSdkExtension): + """ + This class is an extension for + Amazon Bedrock Runtime. + """ + + def extract_attributes(self, attributes: _AttributeMapT): + attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK + attributes[GEN_AI_OPERATION_NAME] = GenAiOperationNameValues.CHAT + + model_id = self._call_context.params.get(_MODEL_ID_KEY) + if model_id: + attributes[GEN_AI_REQUEST_MODEL] = model_id + + # Get the request body if it exists + body = self._call_context.params.get("body") + if body: + try: + request_body = json.loads(body) + + if "amazon.titan" in model_id: + self._extract_titan_attributes( + attributes, request_body + ) + if "amazon.nova" in model_id: + self._extract_nova_attributes(attributes, request_body) + elif "anthropic.claude" in model_id: + self._extract_claude_attributes( + attributes, request_body + ) + elif "meta.llama" in model_id: + self._extract_llama_attributes( + attributes, request_body + ) + elif "cohere.command" in model_id: + self._extract_cohere_attributes( + attributes, request_body + ) + elif "ai21.jamba" in model_id: + self._extract_ai21_attributes(attributes, request_body) + elif "mistral" in model_id: + self._extract_mistral_attributes( + attributes, request_body + ) + + except json.JSONDecodeError: + _logger.debug("Error: Unable to parse the body as JSON") + + def _extract_titan_attributes(self, attributes, request_body): + config = request_body.get("textGenerationConfig", {}) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, config.get("topP") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("maxTokenCount") + ) + + def _extract_nova_attributes(self, attributes, request_body): + config = request_body.get("inferenceConfig", {}) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, config.get("top_p") + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("max_new_tokens") + ) + + def _extract_claude_attributes(self, attributes, request_body): + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + request_body.get("max_tokens"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + request_body.get("temperature"), + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") + ) + + def _extract_cohere_attributes(self, attributes, request_body): + prompt = request_body.get("message") + if prompt: + attributes[GEN_AI_USAGE_INPUT_TOKENS] = math.ceil(len(prompt) / 6) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + request_body.get("max_tokens"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + request_body.get("temperature"), + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, request_body.get("p") + ) + + def _extract_ai21_attributes(self, attributes, request_body): + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + request_body.get("max_tokens"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + request_body.get("temperature"), + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") + ) + + def _extract_llama_attributes(self, attributes, request_body): + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + request_body.get("max_gen_len"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + request_body.get("temperature"), + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") + ) + + def _extract_mistral_attributes(self, attributes, request_body): + prompt = request_body.get("prompt") + if prompt: + attributes[GEN_AI_USAGE_INPUT_TOKENS] = math.ceil(len(prompt) / 6) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + request_body.get("max_tokens"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + request_body.get("temperature"), + ) + self._set_if_not_none( + attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") + ) + + @staticmethod + def _set_if_not_none(attributes, key, value): + if value is not None: + attributes[key] = value + + # pylint: disable=too-many-branches + def on_success(self, span: Span, result: dict[str, Any]): + model_id = self._call_context.params.get(_MODEL_ID_KEY) + + if not model_id: + return + + if "body" in result and isinstance(result["body"], StreamingBody): + original_body = None + try: + original_body = result["body"] + body_content = original_body.read() + + # Use one stream for telemetry + stream = io.BytesIO(body_content) + telemetry_content = stream.read() + response_body = json.loads(telemetry_content.decode("utf-8")) + if "amazon.titan" in model_id: + self._handle_amazon_titan_response(span, response_body) + if "amazon.nova" in model_id: + self._handle_amazon_nova_response(span, response_body) + elif "anthropic.claude" in model_id: + self._handle_anthropic_claude_response(span, response_body) + elif "meta.llama" in model_id: + self._handle_meta_llama_response(span, response_body) + elif "cohere.command" in model_id: + self._handle_cohere_command_response(span, response_body) + elif "ai21.jamba" in model_id: + self._handle_ai21_jamba_response(span, response_body) + elif "mistral" in model_id: + self._handle_mistral_mistral_response(span, response_body) + # Replenish stream for downstream application use + new_stream = io.BytesIO(body_content) + result["body"] = StreamingBody(new_stream, len(body_content)) + + except json.JSONDecodeError: + _logger.debug( + "Error: Unable to parse the response body as JSON" + ) + except Exception as e: # pylint: disable=broad-exception-caught, invalid-name + _logger.debug("Error processing response: %s", e) + finally: + if original_body is not None: + original_body.close() + + # pylint: disable=no-self-use + def _handle_amazon_titan_response( + self, span: Span, response_body: dict[str, Any] + ): + if "inputTextTokenCount" in response_body: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, response_body["inputTextTokenCount"] + ) + if "results" in response_body and response_body["results"]: + result = response_body["results"][0] + if "tokenCount" in result: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, result["tokenCount"] + ) + if "completionReason" in result: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, + [result["completionReason"]], + ) + + # pylint: disable=no-self-use + def _handle_amazon_nova_response( + self, span: Span, response_body: dict[str, Any] + ): + if "usage" in response_body: + usage = response_body["usage"] + if "inputTokens" in usage: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, usage["inputTokens"] + ) + if "outputTokens" in usage: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, usage["outputTokens"] + ) + if "stopReason" in response_body: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stopReason"]] + ) + + # pylint: disable=no-self-use + def _handle_anthropic_claude_response( + self, span: Span, response_body: dict[str, Any] + ): + if "usage" in response_body: + usage = response_body["usage"] + if "input_tokens" in usage: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, usage["input_tokens"] + ) + if "output_tokens" in usage: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, usage["output_tokens"] + ) + if "stop_reason" in response_body: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stop_reason"]] + ) + + # pylint: disable=no-self-use + def _handle_cohere_command_response( + self, span: Span, response_body: dict[str, Any] + ): + # Output tokens: Approximate from the response text + if "text" in response_body: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, + math.ceil(len(response_body["text"]) / 6), + ) + if "finish_reason" in response_body: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, + [response_body["finish_reason"]], + ) + + # pylint: disable=no-self-use + def _handle_ai21_jamba_response( + self, span: Span, response_body: dict[str, Any] + ): + if "usage" in response_body: + usage = response_body["usage"] + if "prompt_tokens" in usage: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, usage["prompt_tokens"] + ) + if "completion_tokens" in usage: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, usage["completion_tokens"] + ) + if "choices" in response_body: + choices = response_body["choices"][0] + if "finish_reason" in choices: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, [choices["finish_reason"]] + ) + + # pylint: disable=no-self-use + def _handle_meta_llama_response( + self, span: Span, response_body: dict[str, Any] + ): + if "prompt_token_count" in response_body: + span.set_attribute( + GEN_AI_USAGE_INPUT_TOKENS, response_body["prompt_token_count"] + ) + if "generation_token_count" in response_body: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, + response_body["generation_token_count"], + ) + if "stop_reason" in response_body: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stop_reason"]] + ) + + # pylint: disable=no-self-use + def _handle_mistral_mistral_response( + self, span: Span, response_body: dict[str, Any] + ): + if "outputs" in response_body: + outputs = response_body["outputs"][0] + if "text" in outputs: + span.set_attribute( + GEN_AI_USAGE_OUTPUT_TOKENS, + math.ceil(len(outputs["text"]) / 6), + ) + if "stop_reason" in outputs: + span.set_attribute( + GEN_AI_RESPONSE_FINISH_REASONS, [outputs["stop_reason"]] + ) From 633613dbb8a3adacce022658c7fbb72826680540 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Mon, 30 Dec 2024 15:39:02 +0100 Subject: [PATCH 2/8] Add tests and handle only non streaming responses --- .../botocore/environment_variables.py | 3 + .../botocore/extensions/bedrock.py | 353 +++--------------- ...quirements.txt => test-requirements-0.txt} | 1 + .../test-requirements-1.txt | 39 ++ ...t_converse_with_content[amazon.titan].yaml | 93 +++++ ...nverse_with_content[anthropic.claude].yaml | 93 +++++ .../tests/conftest.py | 190 ++++++++++ .../tests/test_botocore_bedrock.py | 155 ++++++++ tox.ini | 11 +- 9 files changed, 633 insertions(+), 305 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py rename instrumentation/opentelemetry-instrumentation-botocore/{test-requirements.txt => test-requirements-0.txt} (97%) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[amazon.titan].yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py new file mode 100644 index 0000000000..02bdfe68af --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/environment_variables.py @@ -0,0 +1,3 @@ +OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = ( + "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" +) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index 098572fb08..6798e372b9 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -18,14 +18,9 @@ from __future__ import annotations -import io -import json import logging -import math from typing import Any -from botocore.response import StreamingBody - from opentelemetry.instrumentation.botocore.extensions.types import ( _AttributeMapT, _AwsSdkExtension, @@ -34,6 +29,7 @@ GEN_AI_OPERATION_NAME, GEN_AI_REQUEST_MAX_TOKENS, GEN_AI_REQUEST_MODEL, + GEN_AI_REQUEST_STOP_SEQUENCES, GEN_AI_REQUEST_TEMPERATURE, GEN_AI_REQUEST_TOP_P, GEN_AI_RESPONSE_FINISH_REASONS, @@ -58,327 +54,78 @@ class _BedrockRuntimeExtension(_AwsSdkExtension): """ def extract_attributes(self, attributes: _AttributeMapT): - attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK - attributes[GEN_AI_OPERATION_NAME] = GenAiOperationNameValues.CHAT + attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value model_id = self._call_context.params.get(_MODEL_ID_KEY) if model_id: attributes[GEN_AI_REQUEST_MODEL] = model_id - # Get the request body if it exists - body = self._call_context.params.get("body") - if body: - try: - request_body = json.loads(body) - - if "amazon.titan" in model_id: - self._extract_titan_attributes( - attributes, request_body - ) - if "amazon.nova" in model_id: - self._extract_nova_attributes(attributes, request_body) - elif "anthropic.claude" in model_id: - self._extract_claude_attributes( - attributes, request_body - ) - elif "meta.llama" in model_id: - self._extract_llama_attributes( - attributes, request_body - ) - elif "cohere.command" in model_id: - self._extract_cohere_attributes( - attributes, request_body - ) - elif "ai21.jamba" in model_id: - self._extract_ai21_attributes(attributes, request_body) - elif "mistral" in model_id: - self._extract_mistral_attributes( - attributes, request_body - ) - - except json.JSONDecodeError: - _logger.debug("Error: Unable to parse the body as JSON") - - def _extract_titan_attributes(self, attributes, request_body): - config = request_body.get("textGenerationConfig", {}) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature") - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TOP_P, config.get("topP") - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("maxTokenCount") - ) - - def _extract_nova_attributes(self, attributes, request_body): - config = request_body.get("inferenceConfig", {}) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TEMPERATURE, config.get("temperature") - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TOP_P, config.get("top_p") - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_MAX_TOKENS, config.get("max_new_tokens") - ) - - def _extract_claude_attributes(self, attributes, request_body): - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_MAX_TOKENS, - request_body.get("max_tokens"), - ) - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_TEMPERATURE, - request_body.get("temperature"), - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") - ) - - def _extract_cohere_attributes(self, attributes, request_body): - prompt = request_body.get("message") - if prompt: - attributes[GEN_AI_USAGE_INPUT_TOKENS] = math.ceil(len(prompt) / 6) - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_MAX_TOKENS, - request_body.get("max_tokens"), - ) - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_TEMPERATURE, - request_body.get("temperature"), - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TOP_P, request_body.get("p") - ) - - def _extract_ai21_attributes(self, attributes, request_body): - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_MAX_TOKENS, - request_body.get("max_tokens"), - ) - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_TEMPERATURE, - request_body.get("temperature"), - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") - ) - - def _extract_llama_attributes(self, attributes, request_body): - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_MAX_TOKENS, - request_body.get("max_gen_len"), - ) - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_TEMPERATURE, - request_body.get("temperature"), - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") - ) + # FIXME: add other model patterns + text_model_patterns = ["amazon.titan-text", "anthropic.claude"] + if any(pattern in model_id for pattern in text_model_patterns): + attributes[GEN_AI_OPERATION_NAME] = ( + GenAiOperationNameValues.CHAT.value + ) - def _extract_mistral_attributes(self, attributes, request_body): - prompt = request_body.get("prompt") - if prompt: - attributes[GEN_AI_USAGE_INPUT_TOKENS] = math.ceil(len(prompt) / 6) - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_MAX_TOKENS, - request_body.get("max_tokens"), - ) - self._set_if_not_none( - attributes, - GEN_AI_REQUEST_TEMPERATURE, - request_body.get("temperature"), - ) - self._set_if_not_none( - attributes, GEN_AI_REQUEST_TOP_P, request_body.get("top_p") - ) + if inference_config := self._call_context.params.get( + "inferenceConfig" + ): + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TEMPERATURE, + inference_config.get("temperature"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_TOP_P, + inference_config.get("topP"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_MAX_TOKENS, + inference_config.get("maxTokens"), + ) + self._set_if_not_none( + attributes, + GEN_AI_REQUEST_STOP_SEQUENCES, + inference_config.get("stopSequences"), + ) @staticmethod def _set_if_not_none(attributes, key, value): if value is not None: attributes[key] = value - # pylint: disable=too-many-branches + def before_service_call(self, span: Span): + if not span.is_recording(): + return + + operation_name = span.attributes.get(GEN_AI_OPERATION_NAME, "") + request_model = span.attributes.get(GEN_AI_REQUEST_MODEL, "") + # avoid setting to an empty string if are not available + if operation_name and request_model: + span.update_name(f"{operation_name} {request_model}") + def on_success(self, span: Span, result: dict[str, Any]): model_id = self._call_context.params.get(_MODEL_ID_KEY) if not model_id: return - if "body" in result and isinstance(result["body"], StreamingBody): - original_body = None - try: - original_body = result["body"] - body_content = original_body.read() - - # Use one stream for telemetry - stream = io.BytesIO(body_content) - telemetry_content = stream.read() - response_body = json.loads(telemetry_content.decode("utf-8")) - if "amazon.titan" in model_id: - self._handle_amazon_titan_response(span, response_body) - if "amazon.nova" in model_id: - self._handle_amazon_nova_response(span, response_body) - elif "anthropic.claude" in model_id: - self._handle_anthropic_claude_response(span, response_body) - elif "meta.llama" in model_id: - self._handle_meta_llama_response(span, response_body) - elif "cohere.command" in model_id: - self._handle_cohere_command_response(span, response_body) - elif "ai21.jamba" in model_id: - self._handle_ai21_jamba_response(span, response_body) - elif "mistral" in model_id: - self._handle_mistral_mistral_response(span, response_body) - # Replenish stream for downstream application use - new_stream = io.BytesIO(body_content) - result["body"] = StreamingBody(new_stream, len(body_content)) - - except json.JSONDecodeError: - _logger.debug( - "Error: Unable to parse the response body as JSON" - ) - except Exception as e: # pylint: disable=broad-exception-caught, invalid-name - _logger.debug("Error processing response: %s", e) - finally: - if original_body is not None: - original_body.close() - - # pylint: disable=no-self-use - def _handle_amazon_titan_response( - self, span: Span, response_body: dict[str, Any] - ): - if "inputTextTokenCount" in response_body: - span.set_attribute( - GEN_AI_USAGE_INPUT_TOKENS, response_body["inputTextTokenCount"] - ) - if "results" in response_body and response_body["results"]: - result = response_body["results"][0] - if "tokenCount" in result: - span.set_attribute( - GEN_AI_USAGE_OUTPUT_TOKENS, result["tokenCount"] - ) - if "completionReason" in result: - span.set_attribute( - GEN_AI_RESPONSE_FINISH_REASONS, - [result["completionReason"]], - ) - - # pylint: disable=no-self-use - def _handle_amazon_nova_response( - self, span: Span, response_body: dict[str, Any] - ): - if "usage" in response_body: - usage = response_body["usage"] - if "inputTokens" in usage: - span.set_attribute( - GEN_AI_USAGE_INPUT_TOKENS, usage["inputTokens"] - ) - if "outputTokens" in usage: - span.set_attribute( - GEN_AI_USAGE_OUTPUT_TOKENS, usage["outputTokens"] - ) - if "stopReason" in response_body: - span.set_attribute( - GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stopReason"]] - ) - - # pylint: disable=no-self-use - def _handle_anthropic_claude_response( - self, span: Span, response_body: dict[str, Any] - ): - if "usage" in response_body: - usage = response_body["usage"] - if "input_tokens" in usage: + if usage := result.get("usage"): + if input_tokens := usage.get("inputTokens"): span.set_attribute( - GEN_AI_USAGE_INPUT_TOKENS, usage["input_tokens"] + GEN_AI_USAGE_INPUT_TOKENS, + input_tokens, ) - if "output_tokens" in usage: + if output_tokens := usage.get("outputTokens"): span.set_attribute( - GEN_AI_USAGE_OUTPUT_TOKENS, usage["output_tokens"] + GEN_AI_USAGE_OUTPUT_TOKENS, + output_tokens, ) - if "stop_reason" in response_body: - span.set_attribute( - GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stop_reason"]] - ) - # pylint: disable=no-self-use - def _handle_cohere_command_response( - self, span: Span, response_body: dict[str, Any] - ): - # Output tokens: Approximate from the response text - if "text" in response_body: - span.set_attribute( - GEN_AI_USAGE_OUTPUT_TOKENS, - math.ceil(len(response_body["text"]) / 6), - ) - if "finish_reason" in response_body: + if stop_reason := result.get("stopReason"): span.set_attribute( GEN_AI_RESPONSE_FINISH_REASONS, - [response_body["finish_reason"]], - ) - - # pylint: disable=no-self-use - def _handle_ai21_jamba_response( - self, span: Span, response_body: dict[str, Any] - ): - if "usage" in response_body: - usage = response_body["usage"] - if "prompt_tokens" in usage: - span.set_attribute( - GEN_AI_USAGE_INPUT_TOKENS, usage["prompt_tokens"] - ) - if "completion_tokens" in usage: - span.set_attribute( - GEN_AI_USAGE_OUTPUT_TOKENS, usage["completion_tokens"] - ) - if "choices" in response_body: - choices = response_body["choices"][0] - if "finish_reason" in choices: - span.set_attribute( - GEN_AI_RESPONSE_FINISH_REASONS, [choices["finish_reason"]] - ) - - # pylint: disable=no-self-use - def _handle_meta_llama_response( - self, span: Span, response_body: dict[str, Any] - ): - if "prompt_token_count" in response_body: - span.set_attribute( - GEN_AI_USAGE_INPUT_TOKENS, response_body["prompt_token_count"] - ) - if "generation_token_count" in response_body: - span.set_attribute( - GEN_AI_USAGE_OUTPUT_TOKENS, - response_body["generation_token_count"], - ) - if "stop_reason" in response_body: - span.set_attribute( - GEN_AI_RESPONSE_FINISH_REASONS, [response_body["stop_reason"]] - ) - - # pylint: disable=no-self-use - def _handle_mistral_mistral_response( - self, span: Span, response_body: dict[str, Any] - ): - if "outputs" in response_body: - outputs = response_body["outputs"][0] - if "text" in outputs: - span.set_attribute( - GEN_AI_USAGE_OUTPUT_TOKENS, - math.ceil(len(outputs["text"]) / 6), - ) - if "stop_reason" in outputs: - span.set_attribute( - GEN_AI_RESPONSE_FINISH_REASONS, [outputs["stop_reason"]] + [stop_reason], ) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt similarity index 97% rename from instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt rename to instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt index aa5f89859f..ee28a1f2ba 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt +++ b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt @@ -19,6 +19,7 @@ pluggy==1.5.0 py-cpuinfo==9.0.0 pycparser==2.21 pytest==7.4.4 +pytest-vcr==1.0.2 python-dateutil==2.8.2 pytz==2024.1 PyYAML==6.0.1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt new file mode 100644 index 0000000000..c4695ff27c --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt @@ -0,0 +1,39 @@ +asgiref==3.8.1 +aws-xray-sdk==2.12.1 +boto3==1.35.56 +botocore==1.35.56 +certifi==2024.7.4 +cffi==1.17.0 +charset-normalizer==3.3.2 +cryptography==43.0.1 +Deprecated==1.2.14 +docker==7.0.0 +idna==3.7 +iniconfig==2.0.0 +Jinja2==3.1.4 +jmespath==1.0.1 +MarkupSafe==2.1.5 +moto==5.0.9 +packaging==24.0 +pluggy==1.5.0 +py-cpuinfo==9.0.0 +pycparser==2.21 +pytest==7.4.4 +pytest-vcr==1.0.2 +python-dateutil==2.8.2 +pytz==2024.1 +PyYAML==6.0.1 +requests==2.32.3 +responses==0.25.0 +s3transfer==0.10.0 +six==1.16.0 +tomli==2.0.1 +typing_extensions==4.12.2 +urllib3==1.26.19 +Werkzeug==3.0.6 +wrapt==1.16.0 +xmltodict==0.13.0 +zipp==3.19.2 +-e opentelemetry-instrumentation +-e propagator/opentelemetry-propagator-aws-xray +-e instrumentation/opentelemetry-instrumentation-botocore diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[amazon.titan].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[amazon.titan].yaml new file mode 100644 index 0000000000..38b1357819 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[amazon.titan].yaml @@ -0,0 +1,93 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "role": "user", + "content": [ + { + "text": "Say this is a test" + } + ] + } + ], + "inferenceConfig": { + "maxTokens": 10, + "temperature": 0.8, + "topP": 1, + "stopSequences": [ + "|" + ] + } + } + headers: + Content-Length: + - '170' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNDEyMzFUMTAwMjI1Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLTg5Y2FhMmUwLTUzY2NiZTE0MzQ0MGE4MWRjMTgyYzUwNjtQYXJlbnQ9ODdiZTZmMGI4 + YjlhNzQxZDtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + YjE5MzViZmEtOGI3Ni00ODMwLTkzNDktMjZhODJjNTQ3Mjg0 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/amazon.titan-text-lite-v1/converse + response: + body: + string: |- + { + "metrics": { + "latencyMs": 527 + }, + "output": { + "message": { + "content": [ + { + "text": "" + } + ], + "role": "assistant" + } + }, + "stopReason": "end_turn", + "usage": { + "inputTokens": 8, + "outputTokens": 5, + "totalTokens": 13 + } + } + headers: + Connection: + - keep-alive + Content-Length: + - '179' + Content-Type: + - application/json + Date: + - Tue, 31 Dec 2024 10:02:26 GMT + Set-Cookie: test_set_cookie + x-amzn-RequestId: + - 8e183234-2be0-4d6f-8a0e-ff065d47ea60 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml new file mode 100644 index 0000000000..756c09453c --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml @@ -0,0 +1,93 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "role": "user", + "content": [ + { + "text": "Say this is a test" + } + ] + } + ], + "inferenceConfig": { + "maxTokens": 10, + "temperature": 0.8, + "topP": 1, + "stopSequences": [ + "|" + ] + } + } + headers: + Content-Length: + - '170' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNDEyMzFUMTAwMjI2Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLTc5MDNlZTI5LWFjNTViNDljOWVkZmExNDhhNjVjMDgxNjtQYXJlbnQ9OGZjODcxYmIw + NjI1ZTEwNDtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + OTU5ZDg1MWItNzg3Zi00NjI3LTk0MGQtNzk2MjJmYjE0ZjQ4 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse + response: + body: + string: |- + { + "metrics": { + "latencyMs": 890 + }, + "output": { + "message": { + "content": [ + { + "text": "This is a test." + } + ], + "role": "assistant" + } + }, + "stopReason": "end_turn", + "usage": { + "inputTokens": 12, + "outputTokens": 8, + "totalTokens": 20 + } + } + headers: + Connection: + - keep-alive + Content-Length: + - '195' + Content-Type: + - application/json + Date: + - Tue, 31 Dec 2024 10:02:27 GMT + Set-Cookie: test_set_cookie + x-amzn-RequestId: + - 3bc52e82-cf3a-42a6-a5d6-e4a4fe3dfbbf + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py new file mode 100644 index 0000000000..271c540da7 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/conftest.py @@ -0,0 +1,190 @@ +"""Unit tests configuration module.""" + +import json +import os + +import boto3 +import pytest +import yaml + +from opentelemetry.instrumentation.botocore import BotocoreInstrumentor +from opentelemetry.instrumentation.botocore.environment_variables import ( + OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, +) +from opentelemetry.sdk._events import EventLoggerProvider +from opentelemetry.sdk._logs import LoggerProvider +from opentelemetry.sdk._logs.export import ( + InMemoryLogExporter, + SimpleLogRecordProcessor, +) +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) + + +@pytest.fixture(scope="function", name="span_exporter") +def fixture_span_exporter(): + exporter = InMemorySpanExporter() + yield exporter + + +@pytest.fixture(scope="function", name="log_exporter") +def fixture_log_exporter(): + exporter = InMemoryLogExporter() + yield exporter + + +@pytest.fixture(scope="function", name="tracer_provider") +def fixture_tracer_provider(span_exporter): + provider = TracerProvider() + provider.add_span_processor(SimpleSpanProcessor(span_exporter)) + return provider + + +@pytest.fixture(scope="function", name="event_logger_provider") +def fixture_event_logger_provider(log_exporter): + provider = LoggerProvider() + provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) + event_logger_provider = EventLoggerProvider(provider) + + return event_logger_provider + + +@pytest.fixture +def bedrock_runtime_client(): + return boto3.client("bedrock-runtime") + + +@pytest.fixture(autouse=True) +def environment(): + if not os.getenv("AWS_ACCESS_KEY_ID"): + os.environ["AWS_ACCESS_KEY_ID"] = "test_aws_access_key_id" + if not os.getenv("AWS_SECRET_ACCESS_KEY"): + os.environ["AWS_SECRET_ACCESS_KEY"] = "test_aws_secret_key" + if not os.getenv("AWS_SESSION_TOKEN"): + os.environ["AWS_SESSION_TOKEN"] = "test_aws_session_token" + if not os.getenv("AWS_DEFAULT_REGION"): + os.environ["AWS_DEFAULT_REGION"] = "eu-central-1" + + +@pytest.fixture(scope="module") +def vcr_config(): + return { + "filter_headers": [ + ("cookie", "test_cookie"), + ("authorization", "Bearer test_aws_authorization"), + ("X-Amz-Security-Token", "test_aws_security_token"), + ], + "decode_compressed_response": True, + "before_record_response": scrub_response_headers, + } + + +@pytest.fixture(scope="function") +def instrument_no_content(tracer_provider, event_logger_provider): + os.environ.update( + {OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "False"} + ) + + instrumentor = BotocoreInstrumentor() + instrumentor.instrument( + tracer_provider=tracer_provider, + event_logger_provider=event_logger_provider, + ) + + yield instrumentor + os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None) + instrumentor.uninstrument() + + +@pytest.fixture(scope="function") +def instrument_with_content(tracer_provider, event_logger_provider): + os.environ.update( + {OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "True"} + ) + instrumentor = BotocoreInstrumentor() + instrumentor.instrument( + tracer_provider=tracer_provider, + event_logger_provider=event_logger_provider, + ) + + yield instrumentor + os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None) + instrumentor.uninstrument() + + +class LiteralBlockScalar(str): + """Formats the string as a literal block scalar, preserving whitespace and + without interpreting escape characters""" + + +def literal_block_scalar_presenter(dumper, data): + """Represents a scalar string as a literal block, via '|' syntax""" + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") + + +yaml.add_representer(LiteralBlockScalar, literal_block_scalar_presenter) + + +def process_string_value(string_value): + """Pretty-prints JSON or returns long strings as a LiteralBlockScalar""" + try: + json_data = json.loads(string_value) + return LiteralBlockScalar(json.dumps(json_data, indent=2)) + except (ValueError, TypeError): + if len(string_value) > 80: + return LiteralBlockScalar(string_value) + return string_value + + +def convert_body_to_literal(data): + """Searches the data for body strings, attempting to pretty-print JSON""" + if isinstance(data, dict): + for key, value in data.items(): + # Handle response body case (e.g., response.body.string) + if key == "body" and isinstance(value, dict) and "string" in value: + value["string"] = process_string_value(value["string"]) + + # Handle request body case (e.g., request.body) + elif key == "body" and isinstance(value, str): + data[key] = process_string_value(value) + + else: + convert_body_to_literal(value) + + elif isinstance(data, list): + for idx, choice in enumerate(data): + data[idx] = convert_body_to_literal(choice) + + return data + + +class PrettyPrintJSONBody: + """This makes request and response body recordings more readable.""" + + @staticmethod + def serialize(cassette_dict): + cassette_dict = convert_body_to_literal(cassette_dict) + return yaml.dump( + cassette_dict, default_flow_style=False, allow_unicode=True + ) + + @staticmethod + def deserialize(cassette_string): + return yaml.load(cassette_string, Loader=yaml.Loader) + + +@pytest.fixture(scope="module", autouse=True) +def fixture_vcr(vcr): + vcr.register_serializer("yaml", PrettyPrintJSONBody) + return vcr + + +def scrub_response_headers(response): + """ + This scrubs sensitive response headers. Note they are case-sensitive! + """ + response["headers"]["Set-Cookie"] = "test_set_cookie" + return response diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py new file mode 100644 index 0000000000..3de3223af6 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -0,0 +1,155 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any + +import boto3 +import pytest + +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + +BOTO3_VERSION = tuple(int(x) for x in boto3.__version__.split(".")) + + +@pytest.mark.skipif( + BOTO3_VERSION < (1, 35, 56), reason="Converse API not available" +) +@pytest.mark.vcr() +@pytest.mark.parametrize("llm_model", ["amazon.titan", "anthropic.claude"]) +def test_converse_with_content( + llm_model, + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, +): + llm_model_id = { + "amazon.titan": "amazon.titan-text-lite-v1", + "anthropic.claude": "anthropic.claude-3-haiku-20240307-v1:0", + } + messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] + + llm_model_value = llm_model_id[llm_model] + max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] + response = bedrock_runtime_client.converse( + messages=messages, + modelId=llm_model_value, + inferenceConfig={ + "maxTokens": max_tokens, + "temperature": temperature, + "topP": top_p, + "stopSequences": stop_sequences, + }, + ) + + (span,) = span_exporter.get_finished_spans() + assert_completion_attributes( + span, + llm_model_value, + response, + "chat", + top_p, + temperature, + max_tokens, + stop_sequences, + ) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 + + +def assert_completion_attributes( + span: ReadableSpan, + request_model: str, + response: dict[str, Any], + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: list[str] | None = None, +): + return assert_all_attributes( + span, + request_model, + response["usage"]["inputTokens"], + response["usage"]["outputTokens"], + (response["stopReason"],), + operation_name, + request_top_p, + request_temperature, + request_max_tokens, + tuple(request_stop_sequences), + ) + + +def assert_equal_or_not_present(value, attribute_name, span): + if value: + assert value == span.attributes[attribute_name] + else: + assert attribute_name not in span.attributes + + +def assert_all_attributes( + span: ReadableSpan, + request_model: str, + input_tokens: int | None = None, + output_tokens: int | None = None, + finish_reason: tuple[str] | None = None, + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: tuple[str] | None = None, +): + assert span.name == f"{operation_name} {request_model}" + assert ( + operation_name + == span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + ) + assert ( + GenAIAttributes.GenAiSystemValues.AWS_BEDROCK.value + == span.attributes[GenAIAttributes.GEN_AI_SYSTEM] + ) + assert ( + request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] + ) + + assert_equal_or_not_present( + input_tokens, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, span + ) + assert_equal_or_not_present( + output_tokens, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, span + ) + assert_equal_or_not_present( + finish_reason, GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, span + ) + assert_equal_or_not_present( + request_top_p, GenAIAttributes.GEN_AI_REQUEST_TOP_P, span + ) + assert_equal_or_not_present( + request_temperature, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, span + ) + assert_equal_or_not_present( + request_max_tokens, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, span + ) + assert_equal_or_not_present( + request_stop_sequences, + GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES, + span, + ) diff --git a/tox.ini b/tox.ini index d198c0a836..70c6ae6a28 100644 --- a/tox.ini +++ b/tox.ini @@ -66,7 +66,7 @@ envlist = lint-instrumentation-aws-lambda ; opentelemetry-instrumentation-botocore - py3{8,9,10,11,12,13}-test-instrumentation-botocore + py3{8,9,10,11,12,13}-test-instrumentation-botocore-{0,1} ; FIXME: see https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1736 ; pypy3-test-instrumentation-botocore lint-instrumentation-botocore @@ -414,6 +414,11 @@ test_deps = opentelemetry-semantic-conventions@{env:CORE_REPO}\#egg=opentelemetry-semantic-conventions&subdirectory=opentelemetry-semantic-conventions opentelemetry-sdk@{env:CORE_REPO}\#egg=opentelemetry-sdk&subdirectory=opentelemetry-sdk opentelemetry-test-utils@{env:CORE_REPO}\#egg=opentelemetry-test-utils&subdirectory=tests/opentelemetry-test-utils +pass_env = + AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY + AWS_SESSION_TOKEN + AWS_DEFAULT_REGION deps = lint: -r dev-requirements.txt @@ -518,7 +523,9 @@ deps = lint-instrumentation-urllib3: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-urllib3/test-requirements-1.txt botocore: {[testenv]test_deps} - botocore: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements.txt + botocore-0: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-0.txt + botocore-1: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt + lint-instrumentation-botocore: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-botocore/test-requirements-1.txt cassandra: {[testenv]test_deps} cassandra: -r {toxinidir}/instrumentation/opentelemetry-instrumentation-cassandra/test-requirements.txt From b48323087e951c5f503585a6a0611bfd65bb18ca Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 31 Dec 2024 11:37:12 +0100 Subject: [PATCH 3/8] Make it explicit we are handling only the converse operation --- .../instrumentation/botocore/extensions/bedrock.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index 6798e372b9..dcec3c5e88 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -53,9 +53,14 @@ class _BedrockRuntimeExtension(_AwsSdkExtension): Amazon Bedrock Runtime. """ + _HANDLED_OPERATIONS = {"Converse"} + def extract_attributes(self, attributes: _AttributeMapT): attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + model_id = self._call_context.params.get(_MODEL_ID_KEY) if model_id: attributes[GEN_AI_REQUEST_MODEL] = model_id @@ -97,6 +102,9 @@ def _set_if_not_none(attributes, key, value): attributes[key] = value def before_service_call(self, span: Span): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + if not span.is_recording(): return @@ -107,6 +115,9 @@ def before_service_call(self, span: Span): span.update_name(f"{operation_name} {request_model}") def on_success(self, span: Span, result: dict[str, Any]): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + model_id = self._call_context.params.get(_MODEL_ID_KEY) if not model_id: From e1508703e6e04e486f5f58991c64d02f4f5a6271 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Tue, 31 Dec 2024 14:22:39 +0100 Subject: [PATCH 4/8] Simplify test since all models behaves the same --- .../botocore/extensions/bedrock.py | 6 +- ...].yaml => test_converse_with_content.yaml} | 24 ++--- ...nverse_with_content[anthropic.claude].yaml | 93 ------------------- .../tests/test_botocore_bedrock.py | 8 +- 4 files changed, 18 insertions(+), 113 deletions(-) rename instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/{test_converse_with_content[amazon.titan].yaml => test_converse_with_content.yaml} (78%) delete mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index dcec3c5e88..9d8242216d 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -66,7 +66,11 @@ def extract_attributes(self, attributes: _AttributeMapT): attributes[GEN_AI_REQUEST_MODEL] = model_id # FIXME: add other model patterns - text_model_patterns = ["amazon.titan-text", "anthropic.claude"] + text_model_patterns = [ + "amazon.titan-text", + "anthropic.claude", + "meta.llama", + ] if any(pattern in model_id for pattern in text_model_patterns): attributes[GEN_AI_OPERATION_NAME] = ( GenAiOperationNameValues.CHAT.value diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[amazon.titan].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml similarity index 78% rename from instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[amazon.titan].yaml rename to instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml index 38b1357819..8060f02076 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[amazon.titan].yaml +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content.yaml @@ -34,16 +34,16 @@ interactions: aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 X-Amz-Date: - !!binary | - MjAyNDEyMzFUMTAwMjI1Wg== + MjAyNDEyMzFUMTMyMDQxWg== X-Amz-Security-Token: - test_aws_security_token X-Amzn-Trace-Id: - !!binary | - Um9vdD0xLTg5Y2FhMmUwLTUzY2NiZTE0MzQ0MGE4MWRjMTgyYzUwNjtQYXJlbnQ9ODdiZTZmMGI4 - YjlhNzQxZDtTYW1wbGVkPTE= + Um9vdD0xLWY1MWY4NGM1LTNiZjk4YzY0YWMyNmJhNTk1OWJjODgxNjtQYXJlbnQ9YjNmOGZhM2Mz + MDc1NGEzZjtTYW1wbGVkPTE= amz-sdk-invocation-id: - !!binary | - YjE5MzViZmEtOGI3Ni00ODMwLTkzNDktMjZhODJjNTQ3Mjg0 + OTIyMjczMzItY2I5ZS00NGM1LTliZGUtYjU0NmJmODkxYmEy amz-sdk-request: - !!binary | YXR0ZW1wdD0x @@ -56,37 +56,37 @@ interactions: string: |- { "metrics": { - "latencyMs": 527 + "latencyMs": 811 }, "output": { "message": { "content": [ { - "text": "" + "text": "I am happy to assist you today" } ], "role": "assistant" } }, - "stopReason": "end_turn", + "stopReason": "max_tokens", "usage": { "inputTokens": 8, - "outputTokens": 5, - "totalTokens": 13 + "outputTokens": 10, + "totalTokens": 18 } } headers: Connection: - keep-alive Content-Length: - - '179' + - '212' Content-Type: - application/json Date: - - Tue, 31 Dec 2024 10:02:26 GMT + - Tue, 31 Dec 2024 13:20:42 GMT Set-Cookie: test_set_cookie x-amzn-RequestId: - - 8e183234-2be0-4d6f-8a0e-ff065d47ea60 + - 63dfbcb2-3536-4906-b10d-e5b126b3c0ae status: code: 200 message: OK diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml deleted file mode 100644 index 756c09453c..0000000000 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_content[anthropic.claude].yaml +++ /dev/null @@ -1,93 +0,0 @@ -interactions: -- request: - body: |- - { - "messages": [ - { - "role": "user", - "content": [ - { - "text": "Say this is a test" - } - ] - } - ], - "inferenceConfig": { - "maxTokens": 10, - "temperature": 0.8, - "topP": 1, - "stopSequences": [ - "|" - ] - } - } - headers: - Content-Length: - - '170' - Content-Type: - - !!binary | - YXBwbGljYXRpb24vanNvbg== - User-Agent: - - !!binary | - Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x - MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 - aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 - X-Amz-Date: - - !!binary | - MjAyNDEyMzFUMTAwMjI2Wg== - X-Amz-Security-Token: - - test_aws_security_token - X-Amzn-Trace-Id: - - !!binary | - Um9vdD0xLTc5MDNlZTI5LWFjNTViNDljOWVkZmExNDhhNjVjMDgxNjtQYXJlbnQ9OGZjODcxYmIw - NjI1ZTEwNDtTYW1wbGVkPTE= - amz-sdk-invocation-id: - - !!binary | - OTU5ZDg1MWItNzg3Zi00NjI3LTk0MGQtNzk2MjJmYjE0ZjQ4 - amz-sdk-request: - - !!binary | - YXR0ZW1wdD0x - authorization: - - Bearer test_aws_authorization - method: POST - uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse - response: - body: - string: |- - { - "metrics": { - "latencyMs": 890 - }, - "output": { - "message": { - "content": [ - { - "text": "This is a test." - } - ], - "role": "assistant" - } - }, - "stopReason": "end_turn", - "usage": { - "inputTokens": 12, - "outputTokens": 8, - "totalTokens": 20 - } - } - headers: - Connection: - - keep-alive - Content-Length: - - '195' - Content-Type: - - application/json - Date: - - Tue, 31 Dec 2024 10:02:27 GMT - Set-Cookie: test_set_cookie - x-amzn-RequestId: - - 3bc52e82-cf3a-42a6-a5d6-e4a4fe3dfbbf - status: - code: 200 - message: OK -version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py index 3de3223af6..62b8bd657d 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -31,21 +31,15 @@ BOTO3_VERSION < (1, 35, 56), reason="Converse API not available" ) @pytest.mark.vcr() -@pytest.mark.parametrize("llm_model", ["amazon.titan", "anthropic.claude"]) def test_converse_with_content( - llm_model, span_exporter, log_exporter, bedrock_runtime_client, instrument_with_content, ): - llm_model_id = { - "amazon.titan": "amazon.titan-text-lite-v1", - "anthropic.claude": "anthropic.claude-3-haiku-20240307-v1:0", - } messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] - llm_model_value = llm_model_id[llm_model] + llm_model_value = "amazon.titan-text-lite-v1" max_tokens, temperature, top_p, stop_sequences = 10, 0.8, 1, ["|"] response = bedrock_runtime_client.converse( messages=messages, From 5bd5cff722c57e2a50fdfa328ca64c294967f258 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Wed, 15 Jan 2025 14:23:59 +0100 Subject: [PATCH 5/8] Add test for error case and rework things a bit --- .../botocore/extensions/bedrock.py | 35 +++--- .../tests/bedrock_utils.py | 116 ++++++++++++++++++ .../test_converse_with_invalid_model.yaml | 69 +++++++++++ .../tests/test_botocore_bedrock.py | 111 +++++------------ 4 files changed, 236 insertions(+), 95 deletions(-) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py index 9d8242216d..fe826da603 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py @@ -24,6 +24,10 @@ from opentelemetry.instrumentation.botocore.extensions.types import ( _AttributeMapT, _AwsSdkExtension, + _BotoClientErrorT, +) +from opentelemetry.semconv._incubating.attributes.error_attributes import ( + ERROR_TYPE, ) from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( GEN_AI_OPERATION_NAME, @@ -40,6 +44,7 @@ GenAiSystemValues, ) from opentelemetry.trace.span import Span +from opentelemetry.trace.status import Status, StatusCode _logger = logging.getLogger(__name__) @@ -56,25 +61,17 @@ class _BedrockRuntimeExtension(_AwsSdkExtension): _HANDLED_OPERATIONS = {"Converse"} def extract_attributes(self, attributes: _AttributeMapT): - attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value - if self._call_context.operation not in self._HANDLED_OPERATIONS: return + attributes[GEN_AI_SYSTEM] = GenAiSystemValues.AWS_BEDROCK.value + model_id = self._call_context.params.get(_MODEL_ID_KEY) if model_id: attributes[GEN_AI_REQUEST_MODEL] = model_id - - # FIXME: add other model patterns - text_model_patterns = [ - "amazon.titan-text", - "anthropic.claude", - "meta.llama", - ] - if any(pattern in model_id for pattern in text_model_patterns): - attributes[GEN_AI_OPERATION_NAME] = ( - GenAiOperationNameValues.CHAT.value - ) + attributes[GEN_AI_OPERATION_NAME] = ( + GenAiOperationNameValues.CHAT.value + ) if inference_config := self._call_context.params.get( "inferenceConfig" @@ -122,9 +119,7 @@ def on_success(self, span: Span, result: dict[str, Any]): if self._call_context.operation not in self._HANDLED_OPERATIONS: return - model_id = self._call_context.params.get(_MODEL_ID_KEY) - - if not model_id: + if not span.is_recording(): return if usage := result.get("usage"): @@ -144,3 +139,11 @@ def on_success(self, span: Span, result: dict[str, Any]): GEN_AI_RESPONSE_FINISH_REASONS, [stop_reason], ) + + def on_error(self, span: Span, exception: _BotoClientErrorT): + if self._call_context.operation not in self._HANDLED_OPERATIONS: + return + + span.set_status(Status(StatusCode.ERROR, str(exception))) + if span.is_recording(): + span.set_attribute(ERROR_TYPE, type(exception).__qualname__) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py new file mode 100644 index 0000000000..6d2415432f --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/bedrock_utils.py @@ -0,0 +1,116 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any + +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + + +def assert_completion_attributes( + span: ReadableSpan, + request_model: str, + response: dict[str, Any] | None, + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: list[str] | None = None, +): + if usage := (response and response.get("usage")): + input_tokens = usage["inputTokens"] + output_tokens = usage["outputTokens"] + else: + input_tokens, output_tokens = None, None + + if response: + finish_reason = (response["stopReason"],) + else: + finish_reason = None + + return assert_all_attributes( + span, + request_model, + input_tokens, + output_tokens, + finish_reason, + operation_name, + request_top_p, + request_temperature, + request_max_tokens, + tuple(request_stop_sequences) + if request_stop_sequences is not None + else request_stop_sequences, + ) + + +def assert_equal_or_not_present(value, attribute_name, span): + if value: + assert value == span.attributes[attribute_name] + else: + assert attribute_name not in span.attributes + + +def assert_all_attributes( + span: ReadableSpan, + request_model: str, + input_tokens: int | None = None, + output_tokens: int | None = None, + finish_reason: tuple[str] | None = None, + operation_name: str = "chat", + request_top_p: int | None = None, + request_temperature: int | None = None, + request_max_tokens: int | None = None, + request_stop_sequences: tuple[str] | None = None, +): + assert span.name == f"{operation_name} {request_model}" + assert ( + operation_name + == span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + ) + assert ( + GenAIAttributes.GenAiSystemValues.AWS_BEDROCK.value + == span.attributes[GenAIAttributes.GEN_AI_SYSTEM] + ) + assert ( + request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] + ) + + assert_equal_or_not_present( + input_tokens, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, span + ) + assert_equal_or_not_present( + output_tokens, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, span + ) + assert_equal_or_not_present( + finish_reason, GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, span + ) + assert_equal_or_not_present( + request_top_p, GenAIAttributes.GEN_AI_REQUEST_TOP_P, span + ) + assert_equal_or_not_present( + request_temperature, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, span + ) + assert_equal_or_not_present( + request_max_tokens, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, span + ) + assert_equal_or_not_present( + request_stop_sequences, + GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES, + span, + ) diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml new file mode 100644 index 0000000000..ecbfb6bbd0 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_converse_with_invalid_model.yaml @@ -0,0 +1,69 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "role": "user", + "content": [ + { + "text": "Say this is a test" + } + ] + } + ] + } + headers: + Content-Length: + - '77' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - !!binary | + Qm90bzMvMS4zNS41NiBtZC9Cb3RvY29yZSMxLjM1LjU2IHVhLzIuMCBvcy9saW51eCM2LjEuMC0x + MDM0LW9lbSBtZC9hcmNoI3g4Nl82NCBsYW5nL3B5dGhvbiMzLjEwLjEyIG1kL3B5aW1wbCNDUHl0 + aG9uIGNmZy9yZXRyeS1tb2RlI2xlZ2FjeSBCb3RvY29yZS8xLjM1LjU2 + X-Amz-Date: + - !!binary | + MjAyNTAxMTVUMTEwMTQ3Wg== + X-Amz-Security-Token: + - test_aws_security_token + X-Amzn-Trace-Id: + - !!binary | + Um9vdD0xLWIzM2JhNTkxLTdkYmQ0ZDZmYTBmZTdmYzc2MTExOThmNztQYXJlbnQ9NzRmNmQ1NTEz + MzkzMzUxNTtTYW1wbGVkPTE= + amz-sdk-invocation-id: + - !!binary | + NTQ5MmQ0NTktNzhkNi00ZWY4LTlmMDMtZTA5ODhkZGRiZDI5 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - Bearer test_aws_authorization + method: POST + uri: https://bedrock-runtime.eu-central-1.amazonaws.com/model/does-not-exist/converse + response: + body: + string: |- + { + "message": "The provided model identifier is invalid." + } + headers: + Connection: + - keep-alive + Content-Length: + - '55' + Content-Type: + - application/json + Date: + - Wed, 15 Jan 2025 11:01:47 GMT + Set-Cookie: test_set_cookie + x-amzn-ErrorType: + - ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/ + x-amzn-RequestId: + - d425bf99-8a4e-4d83-8d77-a48410dd82b2 + status: + code: 400 + message: Bad Request +version: 1 diff --git a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py index 62b8bd657d..8de7721bc9 100644 --- a/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py +++ b/instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py @@ -14,15 +14,15 @@ from __future__ import annotations -from typing import Any - import boto3 import pytest -from opentelemetry.sdk.trace import ReadableSpan -from opentelemetry.semconv._incubating.attributes import ( - gen_ai_attributes as GenAIAttributes, +from opentelemetry.semconv._incubating.attributes.error_attributes import ( + ERROR_TYPE, ) +from opentelemetry.trace.status import StatusCode + +from .bedrock_utils import assert_completion_attributes BOTO3_VERSION = tuple(int(x) for x in boto3.__version__.split(".")) @@ -68,82 +68,35 @@ def test_converse_with_content( assert len(logs) == 0 -def assert_completion_attributes( - span: ReadableSpan, - request_model: str, - response: dict[str, Any], - operation_name: str = "chat", - request_top_p: int | None = None, - request_temperature: int | None = None, - request_max_tokens: int | None = None, - request_stop_sequences: list[str] | None = None, +@pytest.mark.skipif( + BOTO3_VERSION < (1, 35, 56), reason="Converse API not available" +) +@pytest.mark.vcr() +def test_converse_with_invalid_model( + span_exporter, + log_exporter, + bedrock_runtime_client, + instrument_with_content, ): - return assert_all_attributes( - span, - request_model, - response["usage"]["inputTokens"], - response["usage"]["outputTokens"], - (response["stopReason"],), - operation_name, - request_top_p, - request_temperature, - request_max_tokens, - tuple(request_stop_sequences), - ) - + messages = [{"role": "user", "content": [{"text": "Say this is a test"}]}] -def assert_equal_or_not_present(value, attribute_name, span): - if value: - assert value == span.attributes[attribute_name] - else: - assert attribute_name not in span.attributes - - -def assert_all_attributes( - span: ReadableSpan, - request_model: str, - input_tokens: int | None = None, - output_tokens: int | None = None, - finish_reason: tuple[str] | None = None, - operation_name: str = "chat", - request_top_p: int | None = None, - request_temperature: int | None = None, - request_max_tokens: int | None = None, - request_stop_sequences: tuple[str] | None = None, -): - assert span.name == f"{operation_name} {request_model}" - assert ( - operation_name - == span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] - ) - assert ( - GenAIAttributes.GenAiSystemValues.AWS_BEDROCK.value - == span.attributes[GenAIAttributes.GEN_AI_SYSTEM] - ) - assert ( - request_model == span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] - ) + llm_model_value = "does-not-exist" + with pytest.raises(bedrock_runtime_client.exceptions.ValidationException): + bedrock_runtime_client.converse( + messages=messages, + modelId=llm_model_value, + ) - assert_equal_or_not_present( - input_tokens, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, span - ) - assert_equal_or_not_present( - output_tokens, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, span - ) - assert_equal_or_not_present( - finish_reason, GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, span - ) - assert_equal_or_not_present( - request_top_p, GenAIAttributes.GEN_AI_REQUEST_TOP_P, span - ) - assert_equal_or_not_present( - request_temperature, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, span - ) - assert_equal_or_not_present( - request_max_tokens, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, span - ) - assert_equal_or_not_present( - request_stop_sequences, - GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES, + (span,) = span_exporter.get_finished_spans() + assert_completion_attributes( span, + llm_model_value, + None, + "chat", ) + + assert span.status.status_code == StatusCode.ERROR + assert span.attributes[ERROR_TYPE] == "ValidationException" + + logs = log_exporter.get_finished_logs() + assert len(logs) == 0 From 16aac70f0ba4ef41ede9485c952800d3f7ae6184 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Wed, 15 Jan 2025 14:53:26 +0100 Subject: [PATCH 6/8] Add converse example --- .../examples/bedrock-runtime/zero-code/.env | 15 ++++++ .../bedrock-runtime/zero-code/README.rst | 50 +++++++++++++++++++ .../bedrock-runtime/zero-code/converse.py | 22 ++++++++ .../zero-code/requirements.txt | 6 +++ 4 files changed, 93 insertions(+) create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py create mode 100644 instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env new file mode 100644 index 0000000000..0ab6418c72 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/.env @@ -0,0 +1,15 @@ +# Update this with your real values +AWS_ACCESS_KEY_ID=key +AWS_SECRET_ACCESS_KEY=secret +AWS_DEFAULT_REGION=eu-central-1 +# Uncomment and set if your credentials are temporary +# AWS_SESSION_TOKEN= + +# Uncomment and change to your OTLP endpoint +# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +# OTEL_EXPORTER_OTLP_PROTOCOL=grpc + +OTEL_SERVICE_NAME=opentelemetry-python-bedrock + +# Uncomment if your OTLP endpoint doesn't support logs +# OTEL_LOGS_EXPORTER=console diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst new file mode 100644 index 0000000000..37e1db9b30 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/README.rst @@ -0,0 +1,50 @@ +Bedrock Zero-Code Instrumentation Example +========================================= + +This is an example of how to instrument Bedrock calls with zero code changes, +using `opentelemetry-instrument`. + +When examples are run, it exports traces and logs to an OTLP +compatible endpoint. Traces include details such as the model used and the +duration of the chat request. Logs capture the chat request and the generated +response, providing a comprehensive view of the performance and behavior of +your OpenAI requests. + +Note: `.env <.env>`_ file configures additional environment variables: + +- `OTEL_LOGS_EXPORTER=otlp` to specify exporter type. + +Available examples +------------------ + +- `converse.py` uses `bedrock-runtime` `Converse API _`. + +Setup +----- + +Minimally, update the `.env <.env>`_ file with your "AWS_SECRET_ACCESS_KEY", +"AWS_SECRET_ACCESS_KEY", "AWS_DEFAULT_REGION" and if you are using temporary +credentials "AWS_SESSION_TOKEN". An +OTLP compatible endpoint should be listening for traces and logs on +http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well. + +Next, set up a virtual environment like this: + +:: + + python3 -m venv .venv + source .venv/bin/activate + pip install "python-dotenv[cli]" + pip install -r requirements.txt + +Run +--- + +Run the example like this: + +:: + + dotenv run -- opentelemetry-instrument python converse.py + +You should see a poem generated by Bedrock while traces exported to your +configured observability tool. diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py new file mode 100644 index 0000000000..b6ce55d50d --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/converse.py @@ -0,0 +1,22 @@ +import os + +import boto3 + + +def main(): + client = boto3.client("bedrock-runtime") + response = client.converse( + modelId=os.getenv("CHAT_MODEL", "amazon.titan-text-lite-v1"), + messages=[ + { + "role": "user", + "content": [{"text": "Write a short poem on OpenTelemetry."}], + }, + ], + ) + + print(response["output"]["message"]["content"][0]["text"]) + + +if __name__ == "__main__": + main() diff --git a/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt new file mode 100644 index 0000000000..dea6c40109 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-botocore/examples/bedrock-runtime/zero-code/requirements.txt @@ -0,0 +1,6 @@ +boto3~=1.35.99 + +opentelemetry-sdk~=1.29.0 +opentelemetry-exporter-otlp-proto-grpc~=1.29.0 +opentelemetry-distro~=0.50b0 +opentelemetry-instrumentation-botocore~=0.50b0 From 15dbd404ece2db0d2600e63a8444e33e6d5d91a8 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Mon, 20 Jan 2025 09:13:15 +0100 Subject: [PATCH 7/8] Generate workflows --- .github/workflows/core_contrib_test_0.yml | 28 ++- .github/workflows/test_0.yml | 252 +++++++++++----------- .github/workflows/test_1.yml | 216 +++++++++---------- .github/workflows/test_2.yml | 108 ++++++++++ 4 files changed, 367 insertions(+), 237 deletions(-) diff --git a/.github/workflows/core_contrib_test_0.yml b/.github/workflows/core_contrib_test_0.yml index 7ab737c657..bbc43ce736 100644 --- a/.github/workflows/core_contrib_test_0.yml +++ b/.github/workflows/core_contrib_test_0.yml @@ -349,8 +349,8 @@ jobs: - name: Run tests run: tox -e py38-test-instrumentation-aws-lambda -- -ra - py38-test-instrumentation-botocore: - name: instrumentation-botocore + py38-test-instrumentation-botocore-0: + name: instrumentation-botocore-0 runs-on: ubuntu-latest steps: - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} @@ -369,7 +369,29 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py38-test-instrumentation-botocore -- -ra + run: tox -e py38-test-instrumentation-botocore-0 -- -ra + + py38-test-instrumentation-botocore-1: + name: instrumentation-botocore-1 + runs-on: ubuntu-latest + steps: + - name: Checkout contrib repo @ SHA - ${{ env.CONTRIB_REPO_SHA }} + uses: actions/checkout@v4 + with: + repository: open-telemetry/opentelemetry-python-contrib + ref: ${{ env.CONTRIB_REPO_SHA }} + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + architecture: "x64" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-botocore-1 -- -ra py38-test-instrumentation-boto3sqs: name: instrumentation-boto3sqs diff --git a/.github/workflows/test_0.yml b/.github/workflows/test_0.yml index bbfb5a6865..b43a46b94f 100644 --- a/.github/workflows/test_0.yml +++ b/.github/workflows/test_0.yml @@ -1852,8 +1852,8 @@ jobs: - name: Run tests run: tox -e pypy3-test-instrumentation-aws-lambda -- -ra - py38-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.8 Ubuntu + py38-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.8 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1868,10 +1868,28 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py38-test-instrumentation-botocore -- -ra + run: tox -e py38-test-instrumentation-botocore-0 -- -ra - py39-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.9 Ubuntu + py38-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-botocore-1 -- -ra + + py39-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.9 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1886,10 +1904,46 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py39-test-instrumentation-botocore -- -ra + run: tox -e py39-test-instrumentation-botocore-0 -- -ra + + py39-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-instrumentation-botocore-1 -- -ra + + py310-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-instrumentation-botocore-0 -- -ra - py310-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.10 Ubuntu + py310-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.10 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1904,10 +1958,10 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py310-test-instrumentation-botocore -- -ra + run: tox -e py310-test-instrumentation-botocore-1 -- -ra - py311-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.11 Ubuntu + py311-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.11 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1922,10 +1976,28 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py311-test-instrumentation-botocore -- -ra + run: tox -e py311-test-instrumentation-botocore-0 -- -ra - py312-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.12 Ubuntu + py311-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py311-test-instrumentation-botocore-1 -- -ra + + py312-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.12 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1940,10 +2012,46 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py312-test-instrumentation-botocore -- -ra + run: tox -e py312-test-instrumentation-botocore-0 -- -ra + + py312-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-botocore-1 -- -ra + + py313-test-instrumentation-botocore-0_ubuntu-latest: + name: instrumentation-botocore-0 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-instrumentation-botocore-0 -- -ra - py313-test-instrumentation-botocore_ubuntu-latest: - name: instrumentation-botocore 3.13 Ubuntu + py313-test-instrumentation-botocore-1_ubuntu-latest: + name: instrumentation-botocore-1 3.13 Ubuntu runs-on: ubuntu-latest steps: - name: Checkout repo @ SHA - ${{ github.sha }} @@ -1958,7 +2066,7 @@ jobs: run: pip install tox-uv - name: Run tests - run: tox -e py313-test-instrumentation-botocore -- -ra + run: tox -e py313-test-instrumentation-botocore-1 -- -ra py38-test-instrumentation-boto3sqs_ubuntu-latest: name: instrumentation-boto3sqs 3.8 Ubuntu @@ -4407,111 +4515,3 @@ jobs: - name: Run tests run: tox -e py38-test-instrumentation-requests -- -ra - - py39-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py39-test-instrumentation-requests -- -ra - - py310-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py310-test-instrumentation-requests -- -ra - - py311-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.11 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.11 - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py311-test-instrumentation-requests -- -ra - - py312-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.12 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.12 - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py312-test-instrumentation-requests -- -ra - - py313-test-instrumentation-requests_ubuntu-latest: - name: instrumentation-requests 3.13 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.13 - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py313-test-instrumentation-requests -- -ra - - py38-test-instrumentation-starlette_ubuntu-latest: - name: instrumentation-starlette 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py38-test-instrumentation-starlette -- -ra diff --git a/.github/workflows/test_1.yml b/.github/workflows/test_1.yml index 7cd1b5ed61..c1712a5367 100644 --- a/.github/workflows/test_1.yml +++ b/.github/workflows/test_1.yml @@ -16,6 +16,114 @@ env: jobs: + py39-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-instrumentation-requests -- -ra + + py310-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-instrumentation-requests -- -ra + + py311-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.11 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py311-test-instrumentation-requests -- -ra + + py312-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.12 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py312-test-instrumentation-requests -- -ra + + py313-test-instrumentation-requests_ubuntu-latest: + name: instrumentation-requests 3.13 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py313-test-instrumentation-requests -- -ra + + py38-test-instrumentation-starlette_ubuntu-latest: + name: instrumentation-starlette 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-instrumentation-starlette -- -ra + py39-test-instrumentation-starlette_ubuntu-latest: name: instrumentation-starlette 3.9 Ubuntu runs-on: ubuntu-latest @@ -4407,111 +4515,3 @@ jobs: - name: Run tests run: tox -e pypy3-test-util-http -- -ra - - py38-test-propagator-aws-xray-0_ubuntu-latest: - name: propagator-aws-xray-0 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py38-test-propagator-aws-xray-0 -- -ra - - py38-test-propagator-aws-xray-1_ubuntu-latest: - name: propagator-aws-xray-1 3.8 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.8 - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py38-test-propagator-aws-xray-1 -- -ra - - py39-test-propagator-aws-xray-0_ubuntu-latest: - name: propagator-aws-xray-0 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py39-test-propagator-aws-xray-0 -- -ra - - py39-test-propagator-aws-xray-1_ubuntu-latest: - name: propagator-aws-xray-1 3.9 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.9 - uses: actions/setup-python@v5 - with: - python-version: "3.9" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py39-test-propagator-aws-xray-1 -- -ra - - py310-test-propagator-aws-xray-0_ubuntu-latest: - name: propagator-aws-xray-0 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py310-test-propagator-aws-xray-0 -- -ra - - py310-test-propagator-aws-xray-1_ubuntu-latest: - name: propagator-aws-xray-1 3.10 Ubuntu - runs-on: ubuntu-latest - steps: - - name: Checkout repo @ SHA - ${{ github.sha }} - uses: actions/checkout@v4 - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Install tox - run: pip install tox-uv - - - name: Run tests - run: tox -e py310-test-propagator-aws-xray-1 -- -ra diff --git a/.github/workflows/test_2.yml b/.github/workflows/test_2.yml index fd1dcb00e0..bc52c4eba4 100644 --- a/.github/workflows/test_2.yml +++ b/.github/workflows/test_2.yml @@ -16,6 +16,114 @@ env: jobs: + py38-test-propagator-aws-xray-0_ubuntu-latest: + name: propagator-aws-xray-0 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-propagator-aws-xray-0 -- -ra + + py38-test-propagator-aws-xray-1_ubuntu-latest: + name: propagator-aws-xray-1 3.8 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py38-test-propagator-aws-xray-1 -- -ra + + py39-test-propagator-aws-xray-0_ubuntu-latest: + name: propagator-aws-xray-0 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-propagator-aws-xray-0 -- -ra + + py39-test-propagator-aws-xray-1_ubuntu-latest: + name: propagator-aws-xray-1 3.9 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py39-test-propagator-aws-xray-1 -- -ra + + py310-test-propagator-aws-xray-0_ubuntu-latest: + name: propagator-aws-xray-0 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-propagator-aws-xray-0 -- -ra + + py310-test-propagator-aws-xray-1_ubuntu-latest: + name: propagator-aws-xray-1 3.10 Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install tox + run: pip install tox-uv + + - name: Run tests + run: tox -e py310-test-propagator-aws-xray-1 -- -ra + py311-test-propagator-aws-xray-0_ubuntu-latest: name: propagator-aws-xray-0 3.11 Ubuntu runs-on: ubuntu-latest From 3b70fd7fae0026ed10492f5cbfad5fd0bf566498 Mon Sep 17 00:00:00 2001 From: Riccardo Magliocchetti Date: Mon, 20 Jan 2025 09:15:02 +0100 Subject: [PATCH 8/8] Add changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e40e73270..39e3fcdba8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3129](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3129)) - `opentelemetry-util-http` Add `py.typed` file to enable PEP 561 ([#3127](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3127)) +- `opentelemetry-opentelemetry-botocore` Add basic support for GenAI attributes for AWS Bedrock Converse API + ([#3161](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3161)) ### Fixed