diff --git a/semantic_conventions/.rubocop.yml b/semantic_conventions/.rubocop.yml index e89d75b460..2182a2eb7b 100644 --- a/semantic_conventions/.rubocop.yml +++ b/semantic_conventions/.rubocop.yml @@ -26,6 +26,9 @@ Metrics/MethodLength: Metrics/ModuleLength: Enabled: false +Naming/ClassAndModuleCamelCase: + Exclude: + - lib/opentelemetry/semantic_conventions/**/*.rb Naming/FileName: Exclude: - lib/opentelemetry-semantic_conventions.rb diff --git a/semantic_conventions/Rakefile b/semantic_conventions/Rakefile index a6cbb8dcf7..6150a99f2c 100644 --- a/semantic_conventions/Rakefile +++ b/semantic_conventions/Rakefile @@ -10,8 +10,6 @@ require 'yard' require 'rubocop/rake_task' require 'tmpdir' -SPEC_VERSION = '1.10.0' - RuboCop::RakeTask.new Rake::TestTask.new :test do |t| @@ -31,34 +29,71 @@ else task default: %i[generate test rubocop yard] end -task :generate do - cwd = Dir.pwd +SPEC_VERSION = '1.20.0' +PREV_SPEC_VERSION = Dir.children('./lib/opentelemetry/semantic_conventions/') + .select { |dir| dir != SPEC_VERSION && File.directory?("./lib/opentelemetry/semantic_conventions/#{dir}") } + .map { |dir| Gem::Version.new(dir) } + .max + +GENERATOR_VERSION = '0.22.0' +semconvrepodir = './tmpsemconvrepo' +gen_output_dir = "./lib/opentelemetry/semantic_conventions/#{SPEC_VERSION}" - Dir.mktmpdir('opentelemetry-specification', Dir.pwd) do |tmpdir| - `git clone https://github.com/open-telemetry/opentelemetry-specification.git #{tmpdir}` - Dir.chdir(tmpdir) do - `git fetch` - `git checkout "v#{SPEC_VERSION}"` - end - - %w[trace resource].each do |kind| - cmd = %W[ - docker run --rm - -v "#{tmpdir}/semantic_conventions/#{kind}":/source - -v "#{cwd}/templates":/templates - -v "#{cwd}/lib":/output - otel/semconvgen:0.11.1 - -f /source code - --template /templates/semantic_conventions.j2 - --output /output/opentelemetry/semantic_conventions/#{kind}.rb - -Dmodule=#{kind[0].upcase}#{kind[1..]} - ] - - puts "Running: #{cmd.join(' ')}" - `#{cmd.join(' ')}` - end +task generate: [:update_gem_version, :update_includes, "#{gen_output_dir}/trace.rb", "#{gen_output_dir}/resource.rb"] + +directory semconvrepodir do + `git clone --depth=1 --branch v#{SPEC_VERSION} https://github.com/open-telemetry/opentelemetry-specification.git #{semconvrepodir}` +end + +task check_out_semconv_version: [semconvrepodir] do + Dir.chdir(semconvrepodir) do + `git fetch` + `git checkout "v#{SPEC_VERSION}"` end +end + +directory gen_output_dir + +file "#{gen_output_dir}/trace.rb" => [:check_out_semconv_version, gen_output_dir] do + semconvgen(semconvrepodir, kind: 'trace', only: 'span,event,attribute_group,scope') +end + +file "#{gen_output_dir}/resource.rb" => [:check_out_semconv_version, gen_output_dir] do + semconvgen(semconvrepodir, kind: 'resource', only: 'resource') +end + +def semconvgen(semconvrepo, kind:, only:) + cwd = Dir.pwd + cmd = %W[ + docker run --rm + -v "#{semconvrepo}/semantic_conventions/":/source + -v "#{cwd}/templates":/templates + -v "#{cwd}/lib":/output + otel/semconvgen:#{GENERATOR_VERSION} + --only #{only} + --yaml-root /source code + --template /templates/semantic_conventions.j2 + --output /output/opentelemetry/semantic_conventions/#{SPEC_VERSION}/#{kind}.rb + -Dkind=#{kind} + -Dmodule=#{kind[0].upcase}#{kind[1..]} + -Dspec_version=#{SPEC_VERSION} + -Dprev_spec_version=#{PREV_SPEC_VERSION} + ] + + puts "Running: #{cmd.join(' ')}" + `#{cmd.join(' ')}` +end + +task :update_includes do + `sed -i.bak "s/::SemanticConventions_.*::/::SemanticConventions_#{SPEC_VERSION.tr('.', '_')}::/g" lib/opentelemetry/semantic_conventions/trace.rb` + `sed -i.bak "s/::SemanticConventions_.*::/::SemanticConventions_#{SPEC_VERSION.tr('.', '_')}::/g" lib/opentelemetry/semantic_conventions/resource.rb` + `sed -i.bak "s/#{PREV_SPEC_VERSION.to_s.gsub('.', '\\.')}/#{SPEC_VERSION}/" lib/opentelemetry/semantic_conventions.rb` + `rm lib/opentelemetry/semantic_conventions/trace.rb.bak` + `rm lib/opentelemetry/semantic_conventions/resource.rb.bak` + `rm lib/opentelemetry/semantic_conventions.rb.bak` +end +task :update_gem_version do `sed -i.bak "s/VERSION = '.*'/VERSION = '#{SPEC_VERSION}'/g" lib/opentelemetry/semantic_conventions/version.rb` `rm lib/opentelemetry/semantic_conventions/version.rb.bak` end diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions.rb index a5c118072f..4d27c7979d 100644 --- a/semantic_conventions/lib/opentelemetry/semantic_conventions.rb +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions.rb @@ -10,5 +10,7 @@ module SemanticConventions end end +require_relative 'semantic_conventions/1.20.0/trace' require_relative 'semantic_conventions/trace' +require_relative 'semantic_conventions/1.20.0/resource' require_relative 'semantic_conventions/resource' diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions/1.10.0/resource.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.10.0/resource.rb new file mode 100644 index 0000000000..cc4d4a941a --- /dev/null +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.10.0/resource.rb @@ -0,0 +1,301 @@ +# frozen_string_literal: true + +# Copyright The OpenTelemetry Authors +# +# SPDX-License-Identifier: Apache-2.0 + +module OpenTelemetry + module SemanticConventions_1_10_0 + # https://github.com/open-telemetry/opentelemetry-specification/tree/v1.10.0/specification + module Resource + # Name of the cloud provider + CLOUD_PROVIDER = 'cloud.provider' + + # The cloud account ID the resource is assigned to + CLOUD_ACCOUNT_ID = 'cloud.account.id' + + # The geographical region the resource is running + # @note Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://intl.cloud.tencent.com/document/product/213/6091) + CLOUD_REGION = 'cloud.region' + + # Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running + # @note Availability zones are called "zones" on Alibaba Cloud and Google Cloud + CLOUD_AVAILABILITY_ZONE = 'cloud.availability_zone' + + # The cloud platform in use + # @note The prefix of the service SHOULD match the one specified in `cloud.provider` + CLOUD_PLATFORM = 'cloud.platform' + + # The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html) + AWS_ECS_CONTAINER_ARN = 'aws.ecs.container.arn' + + # The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html) + AWS_ECS_CLUSTER_ARN = 'aws.ecs.cluster.arn' + + # The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task + AWS_ECS_LAUNCHTYPE = 'aws.ecs.launchtype' + + # The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + AWS_ECS_TASK_ARN = 'aws.ecs.task.arn' + + # The task definition family this task definition is a member of + AWS_ECS_TASK_FAMILY = 'aws.ecs.task.family' + + # The revision for this task definition + AWS_ECS_TASK_REVISION = 'aws.ecs.task.revision' + + # The ARN of an EKS cluster + AWS_EKS_CLUSTER_ARN = 'aws.eks.cluster.arn' + + # The name(s) of the AWS log group(s) an application is writing to + # @note Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group + AWS_LOG_GROUP_NAMES = 'aws.log.group.names' + + # The Amazon Resource Name(s) (ARN) of the AWS log group(s) + # @note See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format) + AWS_LOG_GROUP_ARNS = 'aws.log.group.arns' + + # The name(s) of the AWS log stream(s) an application is writing to + AWS_LOG_STREAM_NAMES = 'aws.log.stream.names' + + # The ARN(s) of the AWS log stream(s) + # @note See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream + AWS_LOG_STREAM_ARNS = 'aws.log.stream.arns' + + # Container name used by container runtime + CONTAINER_NAME = 'container.name' + + # Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated + CONTAINER_ID = 'container.id' + + # The container runtime managing this container + CONTAINER_RUNTIME = 'container.runtime' + + # Name of the image the container was built on + CONTAINER_IMAGE_NAME = 'container.image.name' + + # Container image tag + CONTAINER_IMAGE_TAG = 'container.image.tag' + + # Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier) + DEPLOYMENT_ENVIRONMENT = 'deployment.environment' + + # A unique identifier representing the device + # @note The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence + DEVICE_ID = 'device.id' + + # The model identifier for the device + # @note It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device + DEVICE_MODEL_IDENTIFIER = 'device.model.identifier' + + # The marketing name for the device model + # @note It's recommended this value represents a human readable version of the device model rather than a machine readable alternative + DEVICE_MODEL_NAME = 'device.model.name' + + # The name of the device manufacturer + # @note The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple` + DEVICE_MANUFACTURER = 'device.manufacturer' + + # The name of the single function that this runtime instance executes + # @note This is the name of the function as configured/deployed on the FaaS platform and is usually different from the name of the callback function (which may be stored in the [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) span attributes) + FAAS_NAME = 'faas.name' + + # The unique ID of the single function that this runtime instance executes + # @note Depending on the cloud provider, use: + # + # * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + # Take care not to use the "invoked ARN" directly but replace any + # [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invokable with multiple + # different aliases. + # * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) + # * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id). + # + # On some providers, it may not be possible to determine the full ID at startup, + # which is why this field cannot be made required. For example, on AWS the account ID + # part of the ARN is not available without calling another AWS API + # which may be deemed too slow for a short-running lambda function. + # As an alternative, consider setting `faas.id` as a span attribute instead + FAAS_ID = 'faas.id' + + # The immutable version of the function being executed + # @note Depending on the cloud provider and platform, use: + # + # * **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + # (an integer represented as a decimal string). + # * **Google Cloud Run:** The [revision](https://cloud.google.com/run/docs/managing/revisions) + # (i.e., the function name plus the revision suffix). + # * **Google Cloud Functions:** The value of the + # [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + # * **Azure Functions:** Not applicable. Do not set this attribute + FAAS_VERSION = 'faas.version' + + # The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version + # @note * **AWS Lambda:** Use the (full) log stream name + FAAS_INSTANCE = 'faas.instance' + + # The amount of memory available to the serverless function in MiB + # @note It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information + FAAS_MAX_MEMORY = 'faas.max_memory' + + # Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider + HOST_ID = 'host.id' + + # Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user + HOST_NAME = 'host.name' + + # Type of host. For Cloud, this must be the machine type + HOST_TYPE = 'host.type' + + # The CPU architecture the host system is running on + HOST_ARCH = 'host.arch' + + # Name of the VM image or OS install the host was instantiated from + HOST_IMAGE_NAME = 'host.image.name' + + # VM image ID. For Cloud, this value is from the provider + HOST_IMAGE_ID = 'host.image.id' + + # The version string of the VM image as defined in [Version Attributes](README.md#version-attributes) + HOST_IMAGE_VERSION = 'host.image.version' + + # The name of the cluster + K8S_CLUSTER_NAME = 'k8s.cluster.name' + + # The name of the Node + K8S_NODE_NAME = 'k8s.node.name' + + # The UID of the Node + K8S_NODE_UID = 'k8s.node.uid' + + # The name of the namespace that the pod is running in + K8S_NAMESPACE_NAME = 'k8s.namespace.name' + + # The UID of the Pod + K8S_POD_UID = 'k8s.pod.uid' + + # The name of the Pod + K8S_POD_NAME = 'k8s.pod.name' + + # The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`) + K8S_CONTAINER_NAME = 'k8s.container.name' + + # Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec + K8S_CONTAINER_RESTART_COUNT = 'k8s.container.restart_count' + + # The UID of the ReplicaSet + K8S_REPLICASET_UID = 'k8s.replicaset.uid' + + # The name of the ReplicaSet + K8S_REPLICASET_NAME = 'k8s.replicaset.name' + + # The UID of the Deployment + K8S_DEPLOYMENT_UID = 'k8s.deployment.uid' + + # The name of the Deployment + K8S_DEPLOYMENT_NAME = 'k8s.deployment.name' + + # The UID of the StatefulSet + K8S_STATEFULSET_UID = 'k8s.statefulset.uid' + + # The name of the StatefulSet + K8S_STATEFULSET_NAME = 'k8s.statefulset.name' + + # The UID of the DaemonSet + K8S_DAEMONSET_UID = 'k8s.daemonset.uid' + + # The name of the DaemonSet + K8S_DAEMONSET_NAME = 'k8s.daemonset.name' + + # The UID of the Job + K8S_JOB_UID = 'k8s.job.uid' + + # The name of the Job + K8S_JOB_NAME = 'k8s.job.name' + + # The UID of the CronJob + K8S_CRONJOB_UID = 'k8s.cronjob.uid' + + # The name of the CronJob + K8S_CRONJOB_NAME = 'k8s.cronjob.name' + + # The operating system type + OS_TYPE = 'os.type' + + # Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands + OS_DESCRIPTION = 'os.description' + + # Human readable operating system name + OS_NAME = 'os.name' + + # The version string of the operating system as defined in [Version Attributes](../../resource/semantic_conventions/README.md#version-attributes) + OS_VERSION = 'os.version' + + # Process identifier (PID) + PROCESS_PID = 'process.pid' + + # The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW` + PROCESS_EXECUTABLE_NAME = 'process.executable.name' + + # The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW` + PROCESS_EXECUTABLE_PATH = 'process.executable.path' + + # The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW` + PROCESS_COMMAND = 'process.command' + + # The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead + PROCESS_COMMAND_LINE = 'process.command_line' + + # All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main` + PROCESS_COMMAND_ARGS = 'process.command_args' + + # The username of the user that owns the process + PROCESS_OWNER = 'process.owner' + + # The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler + PROCESS_RUNTIME_NAME = 'process.runtime.name' + + # The version of the runtime of this process, as returned by the runtime without modification + PROCESS_RUNTIME_VERSION = 'process.runtime.version' + + # An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment + PROCESS_RUNTIME_DESCRIPTION = 'process.runtime.description' + + # Logical name of the service + # @note MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md#process), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service` + SERVICE_NAME = 'service.name' + + # A namespace for `service.name` + # @note A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace + SERVICE_NAMESPACE = 'service.namespace' + + # The string ID of the service instance + # @note MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). It is preferable for the ID to be persistent and stay the same for the lifetime of the service instance, however it is acceptable that the ID is ephemeral and changes during important lifetime events for the service (e.g. service restarts). If the service has no inherent unique ID that can be used as the value of this attribute it is recommended to generate a random Version 1 or Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use Version 5, see RFC 4122 for more recommendations) + SERVICE_INSTANCE_ID = 'service.instance.id' + + # The version string of the service API or implementation + SERVICE_VERSION = 'service.version' + + # The name of the telemetry SDK as defined above + TELEMETRY_SDK_NAME = 'telemetry.sdk.name' + + # The language of the telemetry SDK + TELEMETRY_SDK_LANGUAGE = 'telemetry.sdk.language' + + # The version string of the telemetry SDK + TELEMETRY_SDK_VERSION = 'telemetry.sdk.version' + + # The version string of the auto instrumentation agent, if used + TELEMETRY_AUTO_VERSION = 'telemetry.auto.version' + + # The name of the web engine + WEBENGINE_NAME = 'webengine.name' + + # The version of the web engine + WEBENGINE_VERSION = 'webengine.version' + + # Additional description of the web engine (e.g. detailed version and edition information) + WEBENGINE_DESCRIPTION = 'webengine.description' + + end + end +end diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions/1.10.0/trace.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.10.0/trace.rb new file mode 100644 index 0000000000..ce007b6736 --- /dev/null +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.10.0/trace.rb @@ -0,0 +1,488 @@ +# frozen_string_literal: true + +# Copyright The OpenTelemetry Authors +# +# SPDX-License-Identifier: Apache-2.0 + +module OpenTelemetry + module SemanticConventions_1_10_0 + # https://github.com/open-telemetry/opentelemetry-specification/tree/v1.10.0/specification + module Trace + # The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable) + # @note This may be different from `faas.id` if an alias is involved + AWS_LAMBDA_INVOKED_ARN = 'aws.lambda.invoked_arn' + + # The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event + CLOUDEVENTS_EVENT_ID = 'cloudevents.event_id' + + # The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened + CLOUDEVENTS_EVENT_SOURCE = 'cloudevents.event_source' + + # The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses + CLOUDEVENTS_EVENT_SPEC_VERSION = 'cloudevents.event_spec_version' + + # The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence + CLOUDEVENTS_EVENT_TYPE = 'cloudevents.event_type' + + # The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source) + CLOUDEVENTS_EVENT_SUBJECT = 'cloudevents.event_subject' + + # Parent-child Reference type + # @note The causal relationship between a child Span and a parent Span + OPENTRACING_REF_TYPE = 'opentracing.ref_type' + + # An identifier for the database management system (DBMS) product being used. See below for a list of well-known identifiers + DB_SYSTEM = 'db.system' + + # The connection string used to connect to the database. It is recommended to remove embedded credentials + DB_CONNECTION_STRING = 'db.connection_string' + + # Username for accessing the database + DB_USER = 'db.user' + + # The fully-qualified class name of the [Java Database Connectivity (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver used to connect + DB_JDBC_DRIVER_CLASSNAME = 'db.jdbc.driver_classname' + + # This attribute is used to report the name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails) + # @note In some SQL databases, the database name to be used is called "schema name". In case there are multiple layers that could be considered for database name (e.g. Oracle instance name and schema name), the database name to be used is the more specific layer (e.g. Oracle schema name) + DB_NAME = 'db.name' + + # The database statement being executed + # @note The value may be sanitized to exclude sensitive information + DB_STATEMENT = 'db.statement' + + # The name of the operation being executed, e.g. the [MongoDB command name](https://docs.mongodb.com/manual/reference/command/#database-operations) such as `findAndModify`, or the SQL keyword + # @note When setting this to an SQL keyword, it is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if the operation name is provided by the library being instrumented. If the SQL statement has an ambiguous operation, or performs more than one operation, this value may be omitted + DB_OPERATION = 'db.operation' + + # Remote hostname or similar, see note below + NET_PEER_NAME = 'net.peer.name' + + # Remote address of the peer (dotted decimal for IPv4 or [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) + NET_PEER_IP = 'net.peer.ip' + + # Remote port number + NET_PEER_PORT = 'net.peer.port' + + # Transport protocol used. See note below + NET_TRANSPORT = 'net.transport' + + # The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) connecting to. This name is used to determine the port of a named instance + # @note If setting a `db.mssql.instance_name`, `net.peer.port` is no longer required (but still recommended if non-standard) + DB_MSSQL_INSTANCE_NAME = 'db.mssql.instance_name' + + # The fetch size used for paging, i.e. how many rows will be returned at once + DB_CASSANDRA_PAGE_SIZE = 'db.cassandra.page_size' + + # The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html) + DB_CASSANDRA_CONSISTENCY_LEVEL = 'db.cassandra.consistency_level' + + # The name of the primary table that the operation is acting upon, including the keyspace name (if applicable) + # @note This mirrors the db.sql.table attribute but references cassandra rather than sql. It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set + DB_CASSANDRA_TABLE = 'db.cassandra.table' + + # Whether or not the query is idempotent + DB_CASSANDRA_IDEMPOTENCE = 'db.cassandra.idempotence' + + # The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively + DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT = 'db.cassandra.speculative_execution_count' + + # The ID of the coordinating node for a query + DB_CASSANDRA_COORDINATOR_ID = 'db.cassandra.coordinator.id' + + # The data center of the coordinating node for a query + DB_CASSANDRA_COORDINATOR_DC = 'db.cassandra.coordinator.dc' + + # The index of the database being accessed as used in the [`SELECT` command](https://redis.io/commands/select), provided as an integer. To be used instead of the generic `db.name` attribute + DB_REDIS_DATABASE_INDEX = 'db.redis.database_index' + + # The collection being accessed within the database stated in `db.name` + DB_MONGODB_COLLECTION = 'db.mongodb.collection' + + # The name of the primary table that the operation is acting upon, including the database name (if applicable) + # @note It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set + DB_SQL_TABLE = 'db.sql.table' + + # The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it + EXCEPTION_TYPE = 'exception.type' + + # The exception message + EXCEPTION_MESSAGE = 'exception.message' + + # A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG + EXCEPTION_STACKTRACE = 'exception.stacktrace' + + # SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span + # @note An exception is considered to have escaped (or left) the scope of a span, + # if that span is ended while the exception is still logically "in flight". + # This may be actually "in flight" in some languages (e.g. if the exception + # is passed to a Context manager's `__exit__` method in Python) but will + # usually be caught at the point of recording the exception in most languages. + # + # It is usually not possible to determine at the point where an exception is thrown + # whether it will escape the scope of a span. + # However, it is trivial to know that an exception + # will escape, if one checks for an active exception just before ending the span, + # as done in the [example above](#recording-an-exception). + # + # It follows that an exception may still escape the scope of the span + # even if the `exception.escaped` attribute was not set or set to false, + # since the event might have been recorded at a time where it was not + # clear whether the exception will escape + EXCEPTION_ESCAPED = 'exception.escaped' + + # Type of the trigger which caused this function execution + # @note For the server/consumer span on the incoming side, + # `faas.trigger` MUST be set. + # + # Clients invoking FaaS instances usually cannot set `faas.trigger`, + # since they would typically need to look in the payload to determine + # the event type. If clients set it, it should be the same as the + # trigger that corresponding incoming would have (i.e., this has + # nothing to do with the underlying transport used to make the API + # call to invoke the lambda, which is often HTTP) + FAAS_TRIGGER = 'faas.trigger' + + # The execution ID of the current function execution + FAAS_EXECUTION = 'faas.execution' + + # The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name + FAAS_DOCUMENT_COLLECTION = 'faas.document.collection' + + # Describes the type of the operation that was performed on the data + FAAS_DOCUMENT_OPERATION = 'faas.document.operation' + + # A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime) + FAAS_DOCUMENT_TIME = 'faas.document.time' + + # The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name + FAAS_DOCUMENT_NAME = 'faas.document.name' + + # HTTP request method + HTTP_METHOD = 'http.method' + + # Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not transmitted over HTTP, but if it is known, it should be included nevertheless + # @note `http.url` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case the attribute's value should be `https://www.example.com/` + HTTP_URL = 'http.url' + + # The full request target as passed in a HTTP request line or equivalent + HTTP_TARGET = 'http.target' + + # The value of the [HTTP host header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header should also be reported, see note + # @note When the header is present but empty the attribute SHOULD be set to the empty string. Note that this is a valid situation that is expected in certain cases, according the aforementioned [section of RFC 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not set the attribute MUST NOT be set + HTTP_HOST = 'http.host' + + # The URI scheme identifying the used protocol + HTTP_SCHEME = 'http.scheme' + + # [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6) + HTTP_STATUS_CODE = 'http.status_code' + + # Kind of HTTP protocol used + # @note If `net.transport` is not specified, it can be assumed to be `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed + HTTP_FLAVOR = 'http.flavor' + + # Value of the [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the client + HTTP_USER_AGENT = 'http.user_agent' + + # The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For requests using transport encoding, this should be the compressed size + HTTP_REQUEST_CONTENT_LENGTH = 'http.request_content_length' + + # The size of the uncompressed request payload body after transport decoding. Not set if transport encoding not used + HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED = 'http.request_content_length_uncompressed' + + # The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For requests using transport encoding, this should be the compressed size + HTTP_RESPONSE_CONTENT_LENGTH = 'http.response_content_length' + + # The size of the uncompressed response payload body after transport decoding. Not set if transport encoding not used + HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED = 'http.response_content_length_uncompressed' + + # The ordinal number of request re-sending attempt + HTTP_RETRY_COUNT = 'http.retry_count' + + # The primary server name of the matched virtual host. This should be obtained via configuration. If no such configuration can be obtained, this attribute MUST NOT be set ( `net.host.name` should be used instead) + # @note `http.url` is usually not readily available on the server side but would have to be assembled in a cumbersome and sometimes lossy process from other information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus preferred to supply the raw data that is available + HTTP_SERVER_NAME = 'http.server_name' + + # The matched route (path template) + HTTP_ROUTE = 'http.route' + + # The IP address of the original client behind all proxies, if known (e.g. from [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)) + # @note This is not necessarily the same as `net.peer.ip`, which would + # identify the network-level peer, which may be a proxy. + # + # This attribute should be set when a source of information different + # from the one used for `net.peer.ip`, is available even if that other + # source just confirms the same value as `net.peer.ip`. + # Rationale: For `net.peer.ip`, one typically does not know if it + # comes from a proxy, reverse proxy, or the actual client. Setting + # `http.client_ip` when it's the same as `net.peer.ip` means that + # one is at least somewhat confident that the address is not that of + # the closest proxy + HTTP_CLIENT_IP = 'http.client_ip' + + # Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host + NET_HOST_IP = 'net.host.ip' + + # Like `net.peer.port` but for the host port + NET_HOST_PORT = 'net.host.port' + + # Local hostname or similar, see note below + NET_HOST_NAME = 'net.host.name' + + # The internet connection type currently being used by the host + NET_HOST_CONNECTION_TYPE = 'net.host.connection.type' + + # This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection + NET_HOST_CONNECTION_SUBTYPE = 'net.host.connection.subtype' + + # The name of the mobile carrier + NET_HOST_CARRIER_NAME = 'net.host.carrier.name' + + # The mobile carrier country code + NET_HOST_CARRIER_MCC = 'net.host.carrier.mcc' + + # The mobile carrier network code + NET_HOST_CARRIER_MNC = 'net.host.carrier.mnc' + + # The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network + NET_HOST_CARRIER_ICC = 'net.host.carrier.icc' + + # A string identifying the messaging system + MESSAGING_SYSTEM = 'messaging.system' + + # The message destination name. This might be equal to the span name but is required nevertheless + MESSAGING_DESTINATION = 'messaging.destination' + + # The kind of message destination + MESSAGING_DESTINATION_KIND = 'messaging.destination_kind' + + # A boolean that is true if the message destination is temporary + MESSAGING_TEMP_DESTINATION = 'messaging.temp_destination' + + # The name of the transport protocol + MESSAGING_PROTOCOL = 'messaging.protocol' + + # The version of the transport protocol + MESSAGING_PROTOCOL_VERSION = 'messaging.protocol_version' + + # Connection string + MESSAGING_URL = 'messaging.url' + + # A value used by the messaging system as an identifier for the message, represented as a string + MESSAGING_MESSAGE_ID = 'messaging.message_id' + + # The [conversation ID](#conversations) identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID" + MESSAGING_CONVERSATION_ID = 'messaging.conversation_id' + + # The (uncompressed) size of the message payload in bytes. Also use this attribute if it is unknown whether the compressed or uncompressed payload size is reported + MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES = 'messaging.message_payload_size_bytes' + + # The compressed size of the message payload in bytes + MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES = 'messaging.message_payload_compressed_size_bytes' + + # A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime) + FAAS_TIME = 'faas.time' + + # A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm) + FAAS_CRON = 'faas.cron' + + # A boolean that is true if the serverless function is executed for the first time (aka cold-start) + FAAS_COLDSTART = 'faas.coldstart' + + # The name of the invoked function + # @note SHOULD be equal to the `faas.name` resource attribute of the invoked function + FAAS_INVOKED_NAME = 'faas.invoked_name' + + # The cloud provider of the invoked function + # @note SHOULD be equal to the `cloud.provider` resource attribute of the invoked function + FAAS_INVOKED_PROVIDER = 'faas.invoked_provider' + + # The cloud region of the invoked function + # @note SHOULD be equal to the `cloud.region` resource attribute of the invoked function + FAAS_INVOKED_REGION = 'faas.invoked_region' + + # The [`service.name`](../../resource/semantic_conventions/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any + PEER_SERVICE = 'peer.service' + + # Username or client_id extracted from the access token or [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the inbound request from outside the system + ENDUSER_ID = 'enduser.id' + + # Actual/assumed role the client is making the request under extracted from token or application security context + ENDUSER_ROLE = 'enduser.role' + + # Scopes or granted authorities the client currently possesses extracted from token or application security context. The value would come from the scope associated with an [OAuth 2.0 Access Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value in a [SAML 2.0 Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html) + ENDUSER_SCOPE = 'enduser.scope' + + # Current "managed" thread ID (as opposed to OS thread ID) + THREAD_ID = 'thread.id' + + # Current thread name + THREAD_NAME = 'thread.name' + + # The method or function name, or equivalent (usually rightmost part of the code unit's name) + CODE_FUNCTION = 'code.function' + + # The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit + CODE_NAMESPACE = 'code.namespace' + + # The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path) + CODE_FILEPATH = 'code.filepath' + + # The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function` + CODE_LINENO = 'code.lineno' + + # The value `aws-api` + RPC_SYSTEM = 'rpc.system' + + # The name of the service to which a request is made, as returned by the AWS SDK + # @note This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side) + RPC_SERVICE = 'rpc.service' + + # The name of the operation corresponding to the request, as returned by the AWS SDK + # @note This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side) + RPC_METHOD = 'rpc.method' + + # The keys in the `RequestItems` object field + AWS_DYNAMODB_TABLE_NAMES = 'aws.dynamodb.table_names' + + # The JSON-serialized value of each item in the `ConsumedCapacity` response field + AWS_DYNAMODB_CONSUMED_CAPACITY = 'aws.dynamodb.consumed_capacity' + + # The JSON-serialized value of the `ItemCollectionMetrics` response field + AWS_DYNAMODB_ITEM_COLLECTION_METRICS = 'aws.dynamodb.item_collection_metrics' + + # The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter + AWS_DYNAMODB_PROVISIONED_READ_CAPACITY = 'aws.dynamodb.provisioned_read_capacity' + + # The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter + AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY = 'aws.dynamodb.provisioned_write_capacity' + + # The value of the `ConsistentRead` request parameter + AWS_DYNAMODB_CONSISTENT_READ = 'aws.dynamodb.consistent_read' + + # The value of the `ProjectionExpression` request parameter + AWS_DYNAMODB_PROJECTION = 'aws.dynamodb.projection' + + # The value of the `Limit` request parameter + AWS_DYNAMODB_LIMIT = 'aws.dynamodb.limit' + + # The value of the `AttributesToGet` request parameter + AWS_DYNAMODB_ATTRIBUTES_TO_GET = 'aws.dynamodb.attributes_to_get' + + # The value of the `IndexName` request parameter + AWS_DYNAMODB_INDEX_NAME = 'aws.dynamodb.index_name' + + # The value of the `Select` request parameter + AWS_DYNAMODB_SELECT = 'aws.dynamodb.select' + + # The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field + AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES = 'aws.dynamodb.global_secondary_indexes' + + # The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field + AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES = 'aws.dynamodb.local_secondary_indexes' + + # The value of the `ExclusiveStartTableName` request parameter + AWS_DYNAMODB_EXCLUSIVE_START_TABLE = 'aws.dynamodb.exclusive_start_table' + + # The the number of items in the `TableNames` response parameter + AWS_DYNAMODB_TABLE_COUNT = 'aws.dynamodb.table_count' + + # The value of the `ScanIndexForward` request parameter + AWS_DYNAMODB_SCAN_FORWARD = 'aws.dynamodb.scan_forward' + + # The value of the `Segment` request parameter + AWS_DYNAMODB_SEGMENT = 'aws.dynamodb.segment' + + # The value of the `TotalSegments` request parameter + AWS_DYNAMODB_TOTAL_SEGMENTS = 'aws.dynamodb.total_segments' + + # The value of the `Count` response parameter + AWS_DYNAMODB_COUNT = 'aws.dynamodb.count' + + # The value of the `ScannedCount` response parameter + AWS_DYNAMODB_SCANNED_COUNT = 'aws.dynamodb.scanned_count' + + # The JSON-serialized value of each item in the `AttributeDefinitions` request field + AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS = 'aws.dynamodb.attribute_definitions' + + # The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` request field + AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES = 'aws.dynamodb.global_secondary_index_updates' + + # A string identifying the kind of message consumption as defined in the [Operation names](#operation-names) section above. If the operation is "send", this attribute MUST NOT be set, since the operation can be inferred from the span kind in that case + MESSAGING_OPERATION = 'messaging.operation' + + # The identifier for the consumer receiving a message. For Kafka, set it to `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are present, or only `messaging.kafka.consumer_group`. For brokers, such as RabbitMQ and Artemis, set it to the `client_id` of the client consuming the message + MESSAGING_CONSUMER_ID = 'messaging.consumer_id' + + # RabbitMQ message routing key + MESSAGING_RABBITMQ_ROUTING_KEY = 'messaging.rabbitmq.routing_key' + + # Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message_id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set + # @note If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value + MESSAGING_KAFKA_MESSAGE_KEY = 'messaging.kafka.message_key' + + # Name of the Kafka Consumer Group that is handling the message. Only applies to consumers, not producers + MESSAGING_KAFKA_CONSUMER_GROUP = 'messaging.kafka.consumer_group' + + # Client Id for the Consumer or Producer that is handling the message + MESSAGING_KAFKA_CLIENT_ID = 'messaging.kafka.client_id' + + # Partition the message is sent to + MESSAGING_KAFKA_PARTITION = 'messaging.kafka.partition' + + # A boolean that is true if the message is a tombstone + MESSAGING_KAFKA_TOMBSTONE = 'messaging.kafka.tombstone' + + # Namespace of RocketMQ resources, resources in different namespaces are individual + MESSAGING_ROCKETMQ_NAMESPACE = 'messaging.rocketmq.namespace' + + # Name of the RocketMQ producer/consumer group that is handling the message. The client type is identified by the SpanKind + MESSAGING_ROCKETMQ_CLIENT_GROUP = 'messaging.rocketmq.client_group' + + # The unique identifier for each client + MESSAGING_ROCKETMQ_CLIENT_ID = 'messaging.rocketmq.client_id' + + # Type of message + MESSAGING_ROCKETMQ_MESSAGE_TYPE = 'messaging.rocketmq.message_type' + + # The secondary classifier of message besides topic + MESSAGING_ROCKETMQ_MESSAGE_TAG = 'messaging.rocketmq.message_tag' + + # Key(s) of message, another way to mark message besides message id + MESSAGING_ROCKETMQ_MESSAGE_KEYS = 'messaging.rocketmq.message_keys' + + # Model of message consumption. This only applies to consumer spans + MESSAGING_ROCKETMQ_CONSUMPTION_MODEL = 'messaging.rocketmq.consumption_model' + + # The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request + RPC_GRPC_STATUS_CODE = 'rpc.grpc.status_code' + + # Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 does not specify this, the value can be omitted + RPC_JSONRPC_VERSION = 'rpc.jsonrpc.version' + + # `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification + RPC_JSONRPC_REQUEST_ID = 'rpc.jsonrpc.request_id' + + # `error.code` property of response if it is an error response + RPC_JSONRPC_ERROR_CODE = 'rpc.jsonrpc.error_code' + + # `error.message` property of response if it is an error response + RPC_JSONRPC_ERROR_MESSAGE = 'rpc.jsonrpc.error_message' + + # Whether this is a received or sent message + MESSAGE_TYPE = 'message.type' + + # MUST be calculated as two different counters starting from `1` one for sent messages and one for received message + # @note This way we guarantee that the values will be consistent between different implementations + MESSAGE_ID = 'message.id' + + # Compressed size of the message in bytes + MESSAGE_COMPRESSED_SIZE = 'message.compressed_size' + + # Uncompressed size of the message in bytes + MESSAGE_UNCOMPRESSED_SIZE = 'message.uncompressed_size' + + end + end +end diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions/1.20.0/resource.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.20.0/resource.rb new file mode 100644 index 0000000000..e558154162 --- /dev/null +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.20.0/resource.rb @@ -0,0 +1,402 @@ +# frozen_string_literal: true + +# Copyright The OpenTelemetry Authors +# +# SPDX-License-Identifier: Apache-2.0 + +require_relative '../1.10.0/resource' + +module OpenTelemetry + module SemanticConventions_1_20_0 + # https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/ + module Resource + def self.const_missing(const_name) + attribute_name = OpenTelemetry::SemanticConventions_1_10_0::Trace.const_get(const_name) + super(const_name) unless attribute_name + + warn "#{const_name} is deprecated." + const_set(const_name, attribute_name) + attribute_name + end + + # The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html) + AWS_ECS_CLUSTER_ARN = 'aws.ecs.cluster.arn' + + # The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html) + AWS_ECS_CONTAINER_ARN = 'aws.ecs.container.arn' + + # The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task + AWS_ECS_LAUNCHTYPE = 'aws.ecs.launchtype' + + # The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + AWS_ECS_TASK_ARN = 'aws.ecs.task.arn' + + # The task definition family this task definition is a member of + AWS_ECS_TASK_FAMILY = 'aws.ecs.task.family' + + # The revision for this task definition + AWS_ECS_TASK_REVISION = 'aws.ecs.task.revision' + + # The ARN of an EKS cluster + AWS_EKS_CLUSTER_ARN = 'aws.eks.cluster.arn' + + # The Amazon Resource Name(s) (ARN) of the AWS log group(s) + # + # @note See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format) + AWS_LOG_GROUP_ARNS = 'aws.log.group.arns' + + # The name(s) of the AWS log group(s) an application is writing to + # + # @note Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group + AWS_LOG_GROUP_NAMES = 'aws.log.group.names' + + # The ARN(s) of the AWS log stream(s) + # + # @note See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream + AWS_LOG_STREAM_ARNS = 'aws.log.stream.arns' + + # The name(s) of the AWS log stream(s) an application is writing to + AWS_LOG_STREAM_NAMES = 'aws.log.stream.names' + + # Array of brand name and version separated by a space + # + # @note This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.brands`) + BROWSER_BRANDS = 'browser.brands' + + # Preferred language of the user using the browser + # + # @note This value is intended to be taken from the Navigator API `navigator.language` + BROWSER_LANGUAGE = 'browser.language' + + # A boolean that is true if the browser is running on a mobile device + # + # @note This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset + BROWSER_MOBILE = 'browser.mobile' + + # The platform on which the browser is running + # + # @note This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.platform`). If unavailable, the legacy `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD be left unset in order for the values to be consistent. + # The list of possible values is defined in the [W3C User-Agent Client Hints specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). Note that some (but not all) of these values can overlap with values in the [`os.type` and `os.name` attributes](./os.md). However, for consistency, the values in the `browser.platform` attribute should capture the exact value that the user agent provides + BROWSER_PLATFORM = 'browser.platform' + + # The cloud account ID the resource is assigned to + CLOUD_ACCOUNT_ID = 'cloud.account.id' + + # Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running + # + # @note Availability zones are called "zones" on Alibaba Cloud and Google Cloud + CLOUD_AVAILABILITY_ZONE = 'cloud.availability_zone' + + # The cloud platform in use + # + # @note The prefix of the service SHOULD match the one specified in `cloud.provider` + CLOUD_PLATFORM = 'cloud.platform' + + # Name of the cloud provider + CLOUD_PROVIDER = 'cloud.provider' + + # The geographical region the resource is running + # + # @note Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://www.tencentcloud.com/document/product/213/6091) + CLOUD_REGION = 'cloud.region' + + # Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP) + # + # @note On some cloud providers, it may not be possible to determine the full ID at startup, + # so it may be necessary to set `cloud.resource_id` as a span attribute instead. + # + # The exact value to use for `cloud.resource_id` depends on the cloud provider. + # The following well-known definitions MUST be used if you set this attribute and they apply: + # + # * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + # Take care not to use the "invoked ARN" directly but replace any + # [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + # with the resolved function version, as the same runtime instance may be invokable with + # multiple different aliases. + # * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) + # * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) of the invoked function, + # *not* the function app, having the form + # `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + # This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share + # a TracerProvider + CLOUD_RESOURCE_ID = 'cloud.resource_id' + + # Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated + CONTAINER_ID = 'container.id' + + # Name of the image the container was built on + CONTAINER_IMAGE_NAME = 'container.image.name' + + # Container image tag + CONTAINER_IMAGE_TAG = 'container.image.tag' + + # Container name used by container runtime + CONTAINER_NAME = 'container.name' + + # The container runtime managing this container + CONTAINER_RUNTIME = 'container.runtime' + + # Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier) + DEPLOYMENT_ENVIRONMENT = 'deployment.environment' + + # A unique identifier representing the device + # + # @note The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence + DEVICE_ID = 'device.id' + + # The name of the device manufacturer + # + # @note The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple` + DEVICE_MANUFACTURER = 'device.manufacturer' + + # The model identifier for the device + # + # @note It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device + DEVICE_MODEL_IDENTIFIER = 'device.model.identifier' + + # The marketing name for the device model + # + # @note It's recommended this value represents a human readable version of the device model rather than a machine readable alternative + DEVICE_MODEL_NAME = 'device.model.name' + + # The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version + # + # @note * **AWS Lambda:** Use the (full) log stream name + FAAS_INSTANCE = 'faas.instance' + + # The amount of memory available to the serverless function converted to Bytes + # + # @note It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must be multiplied by 1,048,576) + FAAS_MAX_MEMORY = 'faas.max_memory' + + # The name of the single function that this runtime instance executes + # + # @note This is the name of the function as configured/deployed on the FaaS + # platform and is usually different from the name of the callback + # function (which may be stored in the + # [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + # span attributes). + # + # For some cloud providers, the above definition is ambiguous. The following + # definition of function name MUST be used for this attribute + # (and consequently the span name) for the listed cloud providers/products: + # + # * **Azure:** The full name `/`, i.e., function app name + # followed by a forward slash followed by the function name (this form + # can also be seen in the resource JSON for the function). + # This means that a span attribute MUST be used, as an Azure function + # app can host multiple functions that would usually share + # a TracerProvider (see also the `cloud.resource_id` attribute) + FAAS_NAME = 'faas.name' + + # The immutable version of the function being executed + # + # @note Depending on the cloud provider and platform, use: + # + # * **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + # (an integer represented as a decimal string). + # * **Google Cloud Run:** The [revision](https://cloud.google.com/run/docs/managing/revisions) + # (i.e., the function name plus the revision suffix). + # * **Google Cloud Functions:** The value of the + # [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + # * **Azure Functions:** Not applicable. Do not set this attribute + FAAS_VERSION = 'faas.version' + + # Unique identifier for the application + HEROKU_APP_ID = 'heroku.app.id' + + # Commit hash for the current release + HEROKU_RELEASE_COMMIT = 'heroku.release.commit' + + # Time and date the release was created + HEROKU_RELEASE_CREATION_TIMESTAMP = 'heroku.release.creation_timestamp' + + # The CPU architecture the host system is running on + HOST_ARCH = 'host.arch' + + # Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider. For non-containerized systems, this should be the `machine-id`. See the table below for the sources to use to determine the `machine-id` based on operating system + HOST_ID = 'host.id' + + # VM image ID. For Cloud, this value is from the provider + HOST_IMAGE_ID = 'host.image.id' + + # Name of the VM image or OS install the host was instantiated from + HOST_IMAGE_NAME = 'host.image.name' + + # The version string of the VM image as defined in [Version Attributes](README.md#version-attributes) + HOST_IMAGE_VERSION = 'host.image.version' + + # Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user + HOST_NAME = 'host.name' + + # Type of host. For Cloud, this must be the machine type + HOST_TYPE = 'host.type' + + # The name of the cluster + K8S_CLUSTER_NAME = 'k8s.cluster.name' + + # The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`) + K8S_CONTAINER_NAME = 'k8s.container.name' + + # Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec + K8S_CONTAINER_RESTART_COUNT = 'k8s.container.restart_count' + + # The name of the CronJob + K8S_CRONJOB_NAME = 'k8s.cronjob.name' + + # The UID of the CronJob + K8S_CRONJOB_UID = 'k8s.cronjob.uid' + + # The name of the DaemonSet + K8S_DAEMONSET_NAME = 'k8s.daemonset.name' + + # The UID of the DaemonSet + K8S_DAEMONSET_UID = 'k8s.daemonset.uid' + + # The name of the Deployment + K8S_DEPLOYMENT_NAME = 'k8s.deployment.name' + + # The UID of the Deployment + K8S_DEPLOYMENT_UID = 'k8s.deployment.uid' + + # The name of the Job + K8S_JOB_NAME = 'k8s.job.name' + + # The UID of the Job + K8S_JOB_UID = 'k8s.job.uid' + + # The name of the namespace that the pod is running in + K8S_NAMESPACE_NAME = 'k8s.namespace.name' + + # The name of the Node + K8S_NODE_NAME = 'k8s.node.name' + + # The UID of the Node + K8S_NODE_UID = 'k8s.node.uid' + + # The name of the Pod + K8S_POD_NAME = 'k8s.pod.name' + + # The UID of the Pod + K8S_POD_UID = 'k8s.pod.uid' + + # The name of the ReplicaSet + K8S_REPLICASET_NAME = 'k8s.replicaset.name' + + # The UID of the ReplicaSet + K8S_REPLICASET_UID = 'k8s.replicaset.uid' + + # The name of the StatefulSet + K8S_STATEFULSET_NAME = 'k8s.statefulset.name' + + # The UID of the StatefulSet + K8S_STATEFULSET_UID = 'k8s.statefulset.uid' + + # Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands + OS_DESCRIPTION = 'os.description' + + # Human readable operating system name + OS_NAME = 'os.name' + + # The operating system type + OS_TYPE = 'os.type' + + # The version string of the operating system as defined in [Version Attributes](../../resource/semantic_conventions/README.md#version-attributes) + OS_VERSION = 'os.version' + + # Deprecated, use the `otel.scope.name` attribute + # + # @deprecated + OTEL_LIBRARY_NAME = 'otel.library.name' + + # Deprecated, use the `otel.scope.version` attribute + # + # @deprecated + OTEL_LIBRARY_VERSION = 'otel.library.version' + + # The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP) + OTEL_SCOPE_NAME = 'otel.scope.name' + + # The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP) + OTEL_SCOPE_VERSION = 'otel.scope.version' + + # The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW` + PROCESS_COMMAND = 'process.command' + + # All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main` + PROCESS_COMMAND_ARGS = 'process.command_args' + + # The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead + PROCESS_COMMAND_LINE = 'process.command_line' + + # The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW` + PROCESS_EXECUTABLE_NAME = 'process.executable.name' + + # The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW` + PROCESS_EXECUTABLE_PATH = 'process.executable.path' + + # The username of the user that owns the process + PROCESS_OWNER = 'process.owner' + + # Parent Process identifier (PID) + PROCESS_PARENT_PID = 'process.parent_pid' + + # Process identifier (PID) + PROCESS_PID = 'process.pid' + + # An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment + PROCESS_RUNTIME_DESCRIPTION = 'process.runtime.description' + + # The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler + PROCESS_RUNTIME_NAME = 'process.runtime.name' + + # The version of the runtime of this process, as returned by the runtime without modification + PROCESS_RUNTIME_VERSION = 'process.runtime.version' + + # The string ID of the service instance + # + # @note MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). It is preferable for the ID to be persistent and stay the same for the lifetime of the service instance, however it is acceptable that the ID is ephemeral and changes during important lifetime events for the service (e.g. service restarts). If the service has no inherent unique ID that can be used as the value of this attribute it is recommended to generate a random Version 1 or Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use Version 5, see RFC 4122 for more recommendations) + SERVICE_INSTANCE_ID = 'service.instance.id' + + # Logical name of the service + # + # @note MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md#process), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service` + SERVICE_NAME = 'service.name' + + # A namespace for `service.name` + # + # @note A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace + SERVICE_NAMESPACE = 'service.namespace' + + # The version string of the service API or implementation + SERVICE_VERSION = 'service.version' + + # The version string of the auto instrumentation agent, if used + TELEMETRY_AUTO_VERSION = 'telemetry.auto.version' + + # The language of the telemetry SDK + TELEMETRY_SDK_LANGUAGE = 'telemetry.sdk.language' + + # The name of the telemetry SDK as defined above + TELEMETRY_SDK_NAME = 'telemetry.sdk.name' + + # The version string of the telemetry SDK + TELEMETRY_SDK_VERSION = 'telemetry.sdk.version' + + # Full user-agent string provided by the browser + # + # @note The user-agent value SHOULD be provided only from browsers that do not have a mechanism to retrieve brands and platform individually from the User-Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` API can be used + USER_AGENT_ORIGINAL = 'user_agent.original' + + # Additional description of the web engine (e.g. detailed version and edition information) + WEBENGINE_DESCRIPTION = 'webengine.description' + + # The name of the web engine + WEBENGINE_NAME = 'webengine.name' + + # The version of the web engine + WEBENGINE_VERSION = 'webengine.version' + + end + end +end \ No newline at end of file diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions/1.20.0/trace.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.20.0/trace.rb new file mode 100644 index 0000000000..c9aed14d5e --- /dev/null +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions/1.20.0/trace.rb @@ -0,0 +1,760 @@ +# frozen_string_literal: true + +# Copyright The OpenTelemetry Authors +# +# SPDX-License-Identifier: Apache-2.0 + +require_relative '../1.10.0/trace' + +module OpenTelemetry + module SemanticConventions_1_20_0 + # https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/ + module Trace + def self.const_missing(const_name) + attribute_name = OpenTelemetry::SemanticConventions_1_10_0::Trace.const_get(const_name) + super(const_name) unless attribute_name + + warn "#{const_name} is deprecated." + const_set(const_name, attribute_name) + attribute_name + end + + # The JSON-serialized value of each item in the `AttributeDefinitions` request field + AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS = 'aws.dynamodb.attribute_definitions' + + # The value of the `AttributesToGet` request parameter + AWS_DYNAMODB_ATTRIBUTES_TO_GET = 'aws.dynamodb.attributes_to_get' + + # The value of the `ConsistentRead` request parameter + AWS_DYNAMODB_CONSISTENT_READ = 'aws.dynamodb.consistent_read' + + # The JSON-serialized value of each item in the `ConsumedCapacity` response field + AWS_DYNAMODB_CONSUMED_CAPACITY = 'aws.dynamodb.consumed_capacity' + + # The value of the `Count` response parameter + AWS_DYNAMODB_COUNT = 'aws.dynamodb.count' + + # The value of the `ExclusiveStartTableName` request parameter + AWS_DYNAMODB_EXCLUSIVE_START_TABLE = 'aws.dynamodb.exclusive_start_table' + + # The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` request field + AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES = 'aws.dynamodb.global_secondary_index_updates' + + # The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field + AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES = 'aws.dynamodb.global_secondary_indexes' + + # The value of the `IndexName` request parameter + AWS_DYNAMODB_INDEX_NAME = 'aws.dynamodb.index_name' + + # The JSON-serialized value of the `ItemCollectionMetrics` response field + AWS_DYNAMODB_ITEM_COLLECTION_METRICS = 'aws.dynamodb.item_collection_metrics' + + # The value of the `Limit` request parameter + AWS_DYNAMODB_LIMIT = 'aws.dynamodb.limit' + + # The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field + AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES = 'aws.dynamodb.local_secondary_indexes' + + # The value of the `ProjectionExpression` request parameter + AWS_DYNAMODB_PROJECTION = 'aws.dynamodb.projection' + + # The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter + AWS_DYNAMODB_PROVISIONED_READ_CAPACITY = 'aws.dynamodb.provisioned_read_capacity' + + # The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter + AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY = 'aws.dynamodb.provisioned_write_capacity' + + # The value of the `ScanIndexForward` request parameter + AWS_DYNAMODB_SCAN_FORWARD = 'aws.dynamodb.scan_forward' + + # The value of the `ScannedCount` response parameter + AWS_DYNAMODB_SCANNED_COUNT = 'aws.dynamodb.scanned_count' + + # The value of the `Segment` request parameter + AWS_DYNAMODB_SEGMENT = 'aws.dynamodb.segment' + + # The value of the `Select` request parameter + AWS_DYNAMODB_SELECT = 'aws.dynamodb.select' + + # The the number of items in the `TableNames` response parameter + AWS_DYNAMODB_TABLE_COUNT = 'aws.dynamodb.table_count' + + # The keys in the `RequestItems` object field + AWS_DYNAMODB_TABLE_NAMES = 'aws.dynamodb.table_names' + + # The value of the `TotalSegments` request parameter + AWS_DYNAMODB_TOTAL_SEGMENTS = 'aws.dynamodb.total_segments' + + # The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable) + # + # @note This may be different from `cloud.resource_id` if an alias is involved + AWS_LAMBDA_INVOKED_ARN = 'aws.lambda.invoked_arn' + + # The AWS request ID as returned in the response headers `x-amz-request-id` or `x-amz-requestid` + AWS_REQUEST_ID = 'aws.request_id' + + # The S3 bucket name the request refers to. Corresponds to the `--bucket` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations + # + # @note The `bucket` attribute is applicable to all S3 operations that reference a bucket, i.e. that require the bucket name as a mandatory parameter. + # This applies to almost all S3 operations except `list-buckets` + AWS_S3_BUCKET = 'aws.s3.bucket' + + # The source object (in the form `bucket`/`key`) for the copy operation + # + # @note The `copy_source` attribute applies to S3 copy operations and corresponds to the `--copy-source` parameter + # of the [copy-object operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + # This applies in particular to the following operations: + # + # - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + # - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWS_S3_COPY_SOURCE = 'aws.s3.copy_source' + + # The delete request container that specifies the objects to be deleted + # + # @note The `delete` attribute is only applicable to the [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) operation. + # The `delete` attribute corresponds to the `--delete` parameter of the + # [delete-objects operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html) + AWS_S3_DELETE = 'aws.s3.delete' + + # The S3 object key the request refers to. Corresponds to the `--key` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations + # + # @note The `key` attribute is applicable to all object-related S3 operations, i.e. that require the object key as a mandatory parameter. + # This applies in particular to the following operations: + # + # - [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + # - [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + # - [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + # - [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + # - [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + # - [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + # - [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + # - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + # - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + # - [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + # - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + # - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + # - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWS_S3_KEY = 'aws.s3.key' + + # The part number of the part being uploaded in a multipart-upload operation. This is a positive integer between 1 and 10,000 + # + # @note The `part_number` attribute is only applicable to the [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + # and [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) operations. + # The `part_number` attribute corresponds to the `--part-number` parameter of the + # [upload-part operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + AWS_S3_PART_NUMBER = 'aws.s3.part_number' + + # Upload ID that identifies the multipart upload + # + # @note The `upload_id` attribute applies to S3 multipart-upload operations and corresponds to the `--upload-id` parameter + # of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) multipart operations. + # This applies in particular to the following operations: + # + # - [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + # - [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + # - [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + # - [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + # - [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWS_S3_UPLOAD_ID = 'aws.s3.upload_id' + + # Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP) + # + # @note On some cloud providers, it may not be possible to determine the full ID at startup, + # so it may be necessary to set `cloud.resource_id` as a span attribute instead. + # + # The exact value to use for `cloud.resource_id` depends on the cloud provider. + # The following well-known definitions MUST be used if you set this attribute and they apply: + # + # * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + # Take care not to use the "invoked ARN" directly but replace any + # [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + # with the resolved function version, as the same runtime instance may be invokable with + # multiple different aliases. + # * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) + # * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) of the invoked function, + # *not* the function app, having the form + # `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + # This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share + # a TracerProvider + CLOUD_RESOURCE_ID = 'cloud.resource_id' + + # The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event + CLOUDEVENTS_EVENT_ID = 'cloudevents.event_id' + + # The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened + CLOUDEVENTS_EVENT_SOURCE = 'cloudevents.event_source' + + # The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses + CLOUDEVENTS_EVENT_SPEC_VERSION = 'cloudevents.event_spec_version' + + # The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source) + CLOUDEVENTS_EVENT_SUBJECT = 'cloudevents.event_subject' + + # The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence + CLOUDEVENTS_EVENT_TYPE = 'cloudevents.event_type' + + # The column number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function` + CODE_COLUMN = 'code.column' + + # The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path) + CODE_FILEPATH = 'code.filepath' + + # The method or function name, or equivalent (usually rightmost part of the code unit's name) + CODE_FUNCTION = 'code.function' + + # The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function` + CODE_LINENO = 'code.lineno' + + # The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit + CODE_NAMESPACE = 'code.namespace' + + # The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html) + DB_CASSANDRA_CONSISTENCY_LEVEL = 'db.cassandra.consistency_level' + + # The data center of the coordinating node for a query + DB_CASSANDRA_COORDINATOR_DC = 'db.cassandra.coordinator.dc' + + # The ID of the coordinating node for a query + DB_CASSANDRA_COORDINATOR_ID = 'db.cassandra.coordinator.id' + + # Whether or not the query is idempotent + DB_CASSANDRA_IDEMPOTENCE = 'db.cassandra.idempotence' + + # The fetch size used for paging, i.e. how many rows will be returned at once + DB_CASSANDRA_PAGE_SIZE = 'db.cassandra.page_size' + + # The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively + DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT = 'db.cassandra.speculative_execution_count' + + # The name of the primary table that the operation is acting upon, including the keyspace name (if applicable) + # + # @note This mirrors the db.sql.table attribute but references cassandra rather than sql. It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set + DB_CASSANDRA_TABLE = 'db.cassandra.table' + + # The connection string used to connect to the database. It is recommended to remove embedded credentials + DB_CONNECTION_STRING = 'db.connection_string' + + # Unique Cosmos client instance id + DB_COSMOSDB_CLIENT_ID = 'db.cosmosdb.client_id' + + # Cosmos client connection mode + DB_COSMOSDB_CONNECTION_MODE = 'db.cosmosdb.connection_mode' + + # Cosmos DB container name + DB_COSMOSDB_CONTAINER = 'db.cosmosdb.container' + + # CosmosDB Operation Type + DB_COSMOSDB_OPERATION_TYPE = 'db.cosmosdb.operation_type' + + # RU consumed for that operation + DB_COSMOSDB_REQUEST_CHARGE = 'db.cosmosdb.request_charge' + + # Request payload size in bytes + DB_COSMOSDB_REQUEST_CONTENT_LENGTH = 'db.cosmosdb.request_content_length' + + # Cosmos DB status code + DB_COSMOSDB_STATUS_CODE = 'db.cosmosdb.status_code' + + # Cosmos DB sub status code + DB_COSMOSDB_SUB_STATUS_CODE = 'db.cosmosdb.sub_status_code' + + # The fully-qualified class name of the [Java Database Connectivity (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver used to connect + DB_JDBC_DRIVER_CLASSNAME = 'db.jdbc.driver_classname' + + # The collection being accessed within the database stated in `db.name` + DB_MONGODB_COLLECTION = 'db.mongodb.collection' + + # The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) connecting to. This name is used to determine the port of a named instance + # + # @note If setting a `db.mssql.instance_name`, `net.peer.port` is no longer required (but still recommended if non-standard) + DB_MSSQL_INSTANCE_NAME = 'db.mssql.instance_name' + + # This attribute is used to report the name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails) + # + # @note In some SQL databases, the database name to be used is called "schema name". In case there are multiple layers that could be considered for database name (e.g. Oracle instance name and schema name), the database name to be used is the more specific layer (e.g. Oracle schema name) + DB_NAME = 'db.name' + + # The name of the operation being executed, e.g. the [MongoDB command name](https://docs.mongodb.com/manual/reference/command/#database-operations) such as `findAndModify`, or the SQL keyword + # + # @note When setting this to an SQL keyword, it is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if the operation name is provided by the library being instrumented. If the SQL statement has an ambiguous operation, or performs more than one operation, this value may be omitted + DB_OPERATION = 'db.operation' + + # The index of the database being accessed as used in the [`SELECT` command](https://redis.io/commands/select), provided as an integer. To be used instead of the generic `db.name` attribute + DB_REDIS_DATABASE_INDEX = 'db.redis.database_index' + + # The name of the primary table that the operation is acting upon, including the database name (if applicable) + # + # @note It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set + DB_SQL_TABLE = 'db.sql.table' + + # The database statement being executed + DB_STATEMENT = 'db.statement' + + # An identifier for the database management system (DBMS) product being used. See below for a list of well-known identifiers + DB_SYSTEM = 'db.system' + + # Username for accessing the database + DB_USER = 'db.user' + + # Username or client_id extracted from the access token or [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the inbound request from outside the system + ENDUSER_ID = 'enduser.id' + + # Actual/assumed role the client is making the request under extracted from token or application security context + ENDUSER_ROLE = 'enduser.role' + + # Scopes or granted authorities the client currently possesses extracted from token or application security context. The value would come from the scope associated with an [OAuth 2.0 Access Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value in a [SAML 2.0 Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html) + ENDUSER_SCOPE = 'enduser.scope' + + # The domain identifies the business context for the events + # + # @note Events across different domains may have same `event.name`, yet be + # unrelated events + EVENT_DOMAIN = 'event.domain' + + # The name identifies the event + EVENT_NAME = 'event.name' + + # SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span + # + # @note An exception is considered to have escaped (or left) the scope of a span, + # if that span is ended while the exception is still logically "in flight". + # This may be actually "in flight" in some languages (e.g. if the exception + # is passed to a Context manager's `__exit__` method in Python) but will + # usually be caught at the point of recording the exception in most languages. + # + # It is usually not possible to determine at the point where an exception is thrown + # whether it will escape the scope of a span. + # However, it is trivial to know that an exception + # will escape, if one checks for an active exception just before ending the span, + # as done in the [example above](#recording-an-exception). + # + # It follows that an exception may still escape the scope of the span + # even if the `exception.escaped` attribute was not set or set to false, + # since the event might have been recorded at a time where it was not + # clear whether the exception will escape + EXCEPTION_ESCAPED = 'exception.escaped' + + # The exception message + EXCEPTION_MESSAGE = 'exception.message' + + # A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG + EXCEPTION_STACKTRACE = 'exception.stacktrace' + + # The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it + EXCEPTION_TYPE = 'exception.type' + + # A boolean that is true if the serverless function is executed for the first time (aka cold-start) + FAAS_COLDSTART = 'faas.coldstart' + + # A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm) + FAAS_CRON = 'faas.cron' + + # The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name + FAAS_DOCUMENT_COLLECTION = 'faas.document.collection' + + # The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name + FAAS_DOCUMENT_NAME = 'faas.document.name' + + # Describes the type of the operation that was performed on the data + FAAS_DOCUMENT_OPERATION = 'faas.document.operation' + + # A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime) + FAAS_DOCUMENT_TIME = 'faas.document.time' + + # The invocation ID of the current function invocation + FAAS_INVOCATION_ID = 'faas.invocation_id' + + # The name of the invoked function + # + # @note SHOULD be equal to the `faas.name` resource attribute of the invoked function + FAAS_INVOKED_NAME = 'faas.invoked_name' + + # The cloud provider of the invoked function + # + # @note SHOULD be equal to the `cloud.provider` resource attribute of the invoked function + FAAS_INVOKED_PROVIDER = 'faas.invoked_provider' + + # The cloud region of the invoked function + # + # @note SHOULD be equal to the `cloud.region` resource attribute of the invoked function + FAAS_INVOKED_REGION = 'faas.invoked_region' + + # A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime) + FAAS_TIME = 'faas.time' + + # Type of the trigger which caused this function invocation + # + # @note For the server/consumer span on the incoming side, + # `faas.trigger` MUST be set. + # + # Clients invoking FaaS instances usually cannot set `faas.trigger`, + # since they would typically need to look in the payload to determine + # the event type. If clients set it, it should be the same as the + # trigger that corresponding incoming would have (i.e., this has + # nothing to do with the underlying transport used to make the API + # call to invoke the lambda, which is often HTTP) + FAAS_TRIGGER = 'faas.trigger' + + # The unique identifier of the feature flag + FEATURE_FLAG_KEY = 'feature_flag.key' + + # The name of the service provider that performs the flag evaluation + FEATURE_FLAG_PROVIDER_NAME = 'feature_flag.provider_name' + + # SHOULD be a semantic identifier for a value. If one is unavailable, a stringified version of the value can be used + # + # @note A semantic identifier, commonly referred to as a variant, provides a means + # for referring to a value without including the value itself. This can + # provide additional context for understanding the meaning behind a value. + # For example, the variant `red` maybe be used for the value `#c05543`. + # + # A stringified version of the value can be used in situations where a + # semantic identifier is unavailable. String representation of the value + # should be determined by the implementer + FEATURE_FLAG_VARIANT = 'feature_flag.variant' + + # The GraphQL document being executed + # + # @note The value may be sanitized to exclude sensitive information + GRAPHQL_DOCUMENT = 'graphql.document' + + # The name of the operation being executed + GRAPHQL_OPERATION_NAME = 'graphql.operation.name' + + # The type of the operation being executed + GRAPHQL_OPERATION_TYPE = 'graphql.operation.type' + + # The IP address of the original client behind all proxies, if known (e.g. from [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)) + # + # @note This is not necessarily the same as `net.sock.peer.addr`, which would + # identify the network-level peer, which may be a proxy. + # + # This attribute should be set when a source of information different + # from the one used for `net.sock.peer.addr`, is available even if that other + # source just confirms the same value as `net.sock.peer.addr`. + # Rationale: For `net.sock.peer.addr`, one typically does not know if it + # comes from a proxy, reverse proxy, or the actual client. Setting + # `http.client_ip` when it's the same as `net.sock.peer.addr` means that + # one is at least somewhat confident that the address is not that of + # the closest proxy + HTTP_CLIENT_IP = 'http.client_ip' + + # HTTP request method + HTTP_METHOD = 'http.method' + + # The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size + HTTP_REQUEST_CONTENT_LENGTH = 'http.request_content_length' + + # The ordinal number of request resending attempt (for any reason, including redirects) + # + # @note The resend count SHOULD be updated each time an HTTP request gets resent by the client, regardless of what was the cause of the resending (e.g. redirection, authorization failure, 503 Server Unavailable, network issues, or any other) + HTTP_RESEND_COUNT = 'http.resend_count' + + # The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size + HTTP_RESPONSE_CONTENT_LENGTH = 'http.response_content_length' + + # The matched route (path template in the format used by the respective server framework). See note below + # + # @note MUST NOT be populated when this is not supported by the HTTP server framework as the route attribute should have low-cardinality and the URI path can NOT substitute it. + # SHOULD include the [application root](/specification/trace/semantic_conventions/http.md#http-server-definitions) if there is one + HTTP_ROUTE = 'http.route' + + # The URI scheme identifying the used protocol + HTTP_SCHEME = 'http.scheme' + + # [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6) + HTTP_STATUS_CODE = 'http.status_code' + + # The full request target as passed in a HTTP request line or equivalent + HTTP_TARGET = 'http.target' + + # Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not transmitted over HTTP, but if it is known, it should be included nevertheless + # + # @note `http.url` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case the attribute's value should be `https://www.example.com/` + HTTP_URL = 'http.url' + + # A unique identifier for the Log Record + # + # @note If an id is provided, other log records with the same id will be considered duplicates and can be removed safely. This means, that two distinguishable log records MUST have different values. + # The id MAY be an [Universally Unique Lexicographically Sortable Identifier (ULID)](https://github.com/ulid/spec), but other identifiers (e.g. UUID) may be used as needed + LOG_RECORD_UID = 'log.record.uid' + + # Compressed size of the message in bytes + MESSAGE_COMPRESSED_SIZE = 'message.compressed_size' + + # MUST be calculated as two different counters starting from `1` one for sent messages and one for received message + # + # @note This way we guarantee that the values will be consistent between different implementations + MESSAGE_ID = 'message.id' + + # Whether this is a received or sent message + MESSAGE_TYPE = 'message.type' + + # Uncompressed size of the message in bytes + MESSAGE_UNCOMPRESSED_SIZE = 'message.uncompressed_size' + + # The number of messages sent, received, or processed in the scope of the batching operation + # + # @note Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans that operate with a single message. When a messaging client library supports both batch and single-message API for the same operation, instrumentations SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use it for single-message APIs + MESSAGING_BATCH_MESSAGE_COUNT = 'messaging.batch.message_count' + + # The identifier for the consumer receiving a message. For Kafka, set it to `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both are present, or only `messaging.kafka.consumer.group`. For brokers, such as RabbitMQ and Artemis, set it to the `client_id` of the client consuming the message + MESSAGING_CONSUMER_ID = 'messaging.consumer.id' + + # A boolean that is true if the message destination is anonymous (could be unnamed or have auto-generated name) + MESSAGING_DESTINATION_ANONYMOUS = 'messaging.destination.anonymous' + + # The message destination name + # + # @note Destination name SHOULD uniquely identify a specific queue, topic or other entity within the broker. If + # the broker does not have such notion, the destination name SHOULD uniquely identify the broker + MESSAGING_DESTINATION_NAME = 'messaging.destination.name' + + # Low cardinality representation of the messaging destination name + # + # @note Destination names could be constructed from templates. An example would be a destination name involving a user name or product id. Although the destination name in this case is of high cardinality, the underlying template is of low cardinality and can be effectively used for grouping and aggregation + MESSAGING_DESTINATION_TEMPLATE = 'messaging.destination.template' + + # A boolean that is true if the message destination is temporary and might not exist anymore after messages are processed + MESSAGING_DESTINATION_TEMPORARY = 'messaging.destination.temporary' + + # Client Id for the Consumer or Producer that is handling the message + MESSAGING_KAFKA_CLIENT_ID = 'messaging.kafka.client_id' + + # Name of the Kafka Consumer Group that is handling the message. Only applies to consumers, not producers + MESSAGING_KAFKA_CONSUMER_GROUP = 'messaging.kafka.consumer.group' + + # Partition the message is sent to + MESSAGING_KAFKA_DESTINATION_PARTITION = 'messaging.kafka.destination.partition' + + # Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set + # + # @note If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value + MESSAGING_KAFKA_MESSAGE_KEY = 'messaging.kafka.message.key' + + # The offset of a record in the corresponding Kafka partition + MESSAGING_KAFKA_MESSAGE_OFFSET = 'messaging.kafka.message.offset' + + # A boolean that is true if the message is a tombstone + MESSAGING_KAFKA_MESSAGE_TOMBSTONE = 'messaging.kafka.message.tombstone' + + # Partition the message is received from + MESSAGING_KAFKA_SOURCE_PARTITION = 'messaging.kafka.source.partition' + + # The [conversation ID](#conversations) identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID" + MESSAGING_MESSAGE_CONVERSATION_ID = 'messaging.message.conversation_id' + + # A value used by the messaging system as an identifier for the message, represented as a string + MESSAGING_MESSAGE_ID = 'messaging.message.id' + + # The compressed size of the message payload in bytes + MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES = 'messaging.message.payload_compressed_size_bytes' + + # The (uncompressed) size of the message payload in bytes. Also use this attribute if it is unknown whether the compressed or uncompressed payload size is reported + MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES = 'messaging.message.payload_size_bytes' + + # A string identifying the kind of messaging operation as defined in the [Operation names](#operation-names) section above + # + # @note If a custom value is used, it MUST be of low cardinality + MESSAGING_OPERATION = 'messaging.operation' + + # RabbitMQ message routing key + MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY = 'messaging.rabbitmq.destination.routing_key' + + # Name of the RocketMQ producer/consumer group that is handling the message. The client type is identified by the SpanKind + MESSAGING_ROCKETMQ_CLIENT_GROUP = 'messaging.rocketmq.client_group' + + # The unique identifier for each client + MESSAGING_ROCKETMQ_CLIENT_ID = 'messaging.rocketmq.client_id' + + # Model of message consumption. This only applies to consumer spans + MESSAGING_ROCKETMQ_CONSUMPTION_MODEL = 'messaging.rocketmq.consumption_model' + + # The delay time level for delay message, which determines the message delay time + MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL = 'messaging.rocketmq.message.delay_time_level' + + # The timestamp in milliseconds that the delay message is expected to be delivered to consumer + MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP = 'messaging.rocketmq.message.delivery_timestamp' + + # It is essential for FIFO message. Messages that belong to the same message group are always processed one by one within the same consumer group + MESSAGING_ROCKETMQ_MESSAGE_GROUP = 'messaging.rocketmq.message.group' + + # Key(s) of message, another way to mark message besides message id + MESSAGING_ROCKETMQ_MESSAGE_KEYS = 'messaging.rocketmq.message.keys' + + # The secondary classifier of message besides topic + MESSAGING_ROCKETMQ_MESSAGE_TAG = 'messaging.rocketmq.message.tag' + + # Type of message + MESSAGING_ROCKETMQ_MESSAGE_TYPE = 'messaging.rocketmq.message.type' + + # Namespace of RocketMQ resources, resources in different namespaces are individual + MESSAGING_ROCKETMQ_NAMESPACE = 'messaging.rocketmq.namespace' + + # A boolean that is true if the message source is anonymous (could be unnamed or have auto-generated name) + MESSAGING_SOURCE_ANONYMOUS = 'messaging.source.anonymous' + + # The message source name + # + # @note Source name SHOULD uniquely identify a specific queue, topic, or other entity within the broker. If + # the broker does not have such notion, the source name SHOULD uniquely identify the broker + MESSAGING_SOURCE_NAME = 'messaging.source.name' + + # Low cardinality representation of the messaging source name + # + # @note Source names could be constructed from templates. An example would be a source name involving a user name or product id. Although the source name in this case is of high cardinality, the underlying template is of low cardinality and can be effectively used for grouping and aggregation + MESSAGING_SOURCE_TEMPLATE = 'messaging.source.template' + + # A boolean that is true if the message source is temporary and might not exist anymore after messages are processed + MESSAGING_SOURCE_TEMPORARY = 'messaging.source.temporary' + + # A string identifying the messaging system + MESSAGING_SYSTEM = 'messaging.system' + + # The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network + NET_HOST_CARRIER_ICC = 'net.host.carrier.icc' + + # The mobile carrier country code + NET_HOST_CARRIER_MCC = 'net.host.carrier.mcc' + + # The mobile carrier network code + NET_HOST_CARRIER_MNC = 'net.host.carrier.mnc' + + # The name of the mobile carrier + NET_HOST_CARRIER_NAME = 'net.host.carrier.name' + + # This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection + NET_HOST_CONNECTION_SUBTYPE = 'net.host.connection.subtype' + + # The internet connection type currently being used by the host + NET_HOST_CONNECTION_TYPE = 'net.host.connection.type' + + # Name of the local HTTP server that received the request + # + # @note Determined by using the first of the following that applies + # + # - The [primary server name](/specification/trace/semantic_conventions/http.md#http-server-definitions) of the matched virtual host. MUST only + # include host identifier. + # - Host identifier of the [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) + # if it's sent in absolute-form. + # - Host identifier of the `Host` header + # + # SHOULD NOT be set if only IP address is available and capturing name would require a reverse DNS lookup + NET_HOST_NAME = 'net.host.name' + + # Port of the local HTTP server that received the request + # + # @note Determined by using the first of the following that applies + # + # - Port identifier of the [primary server host](/specification/trace/semantic_conventions/http.md#http-server-definitions) of the matched virtual host. + # - Port identifier of the [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) + # if it's sent in absolute-form. + # - Port identifier of the `Host` header + NET_HOST_PORT = 'net.host.port' + + # Host identifier of the ["URI origin"](https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin) HTTP request is sent to + # + # @note Determined by using the first of the following that applies + # + # - Host identifier of the [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) + # if it's sent in absolute-form + # - Host identifier of the `Host` header + # + # SHOULD NOT be set if capturing it would require an extra DNS lookup + NET_PEER_NAME = 'net.peer.name' + + # Port identifier of the ["URI origin"](https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin) HTTP request is sent to + # + # @note When [request target](https://www.rfc-editor.org/rfc/rfc9110.html#target.resource) is absolute URI, `net.peer.name` MUST match URI port identifier, otherwise it MUST match `Host` header port identifier + NET_PEER_PORT = 'net.peer.port' + + # Application layer protocol used. The value SHOULD be normalized to lowercase + NET_PROTOCOL_NAME = 'net.protocol.name' + + # Version of the application layer protocol used. See note below + # + # @note `net.protocol.version` refers to the version of the protocol used and might be different from the protocol client's version. If the HTTP client used has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should be set to `1.1` + NET_PROTOCOL_VERSION = 'net.protocol.version' + + # Protocol [address family](https://man7.org/linux/man-pages/man7/address_families.7.html) which is used for communication + NET_SOCK_FAMILY = 'net.sock.family' + + # Local socket address. Useful in case of a multi-IP host + NET_SOCK_HOST_ADDR = 'net.sock.host.addr' + + # Local socket port number + NET_SOCK_HOST_PORT = 'net.sock.host.port' + + # Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local communication, [etc](https://man7.org/linux/man-pages/man7/address_families.7.html) + NET_SOCK_PEER_ADDR = 'net.sock.peer.addr' + + # Remote socket peer name + NET_SOCK_PEER_NAME = 'net.sock.peer.name' + + # Remote socket peer port + NET_SOCK_PEER_PORT = 'net.sock.peer.port' + + # Transport protocol used. See note below + NET_TRANSPORT = 'net.transport' + + # Parent-child Reference type + # + # @note The causal relationship between a child Span and a parent Span + OPENTRACING_REF_TYPE = 'opentracing.ref_type' + + # Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET + OTEL_STATUS_CODE = 'otel.status_code' + + # Description of the Status if it has a value, otherwise not set + OTEL_STATUS_DESCRIPTION = 'otel.status_description' + + # The [`service.name`](../../resource/semantic_conventions/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any + PEER_SERVICE = 'peer.service' + + # The [error codes](https://connect.build/docs/protocol/#error-codes) of the Connect request. Error codes are always string values + RPC_CONNECT_RPC_ERROR_CODE = 'rpc.connect_rpc.error_code' + + # The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request + RPC_GRPC_STATUS_CODE = 'rpc.grpc.status_code' + + # `error.code` property of response if it is an error response + RPC_JSONRPC_ERROR_CODE = 'rpc.jsonrpc.error_code' + + # `error.message` property of response if it is an error response + RPC_JSONRPC_ERROR_MESSAGE = 'rpc.jsonrpc.error_message' + + # `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification + RPC_JSONRPC_REQUEST_ID = 'rpc.jsonrpc.request_id' + + # Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 does not specify this, the value can be omitted + RPC_JSONRPC_VERSION = 'rpc.jsonrpc.version' + + # The name of the operation corresponding to the request, as returned by the AWS SDK + # + # @note This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side) + RPC_METHOD = 'rpc.method' + + # The name of the service to which a request is made, as returned by the AWS SDK + # + # @note This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side) + RPC_SERVICE = 'rpc.service' + + # The value `aws-api` + RPC_SYSTEM = 'rpc.system' + + # Current "managed" thread ID (as opposed to OS thread ID) + THREAD_ID = 'thread.id' + + # Current thread name + THREAD_NAME = 'thread.name' + + # Full user-agent string is generated by Cosmos DB SDK + # + # @note The user-agent value is generated by SDK which is a combination of
`sdk_version` : Current version of SDK. e.g. 'cosmos-netstandard-sdk/3.23.0'
`direct_pkg_version` : Direct package version used by Cosmos DB SDK. e.g. '3.23.1'
`number_of_client_instances` : Number of cosmos client instances created by the application. e.g. '1'
`type_of_machine_architecture` : Machine architecture. e.g. 'X64'
`operating_system` : Operating System. e.g. 'Linux 5.4.0-1098-azure 104 18'
`runtime_framework` : Runtime Framework. e.g. '.NET Core 3.1.32'
`failover_information` : Generated key to determine if region failover enabled. + # Format Reg-{D (Disabled discovery)}-S(application region)|L(List of preferred regions)|N(None, user did not configure it). + # Default value is "NS" + USER_AGENT_ORIGINAL = 'user_agent.original' + + end + end +end \ No newline at end of file diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions/resource.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions/resource.rb index 912946f71f..23a5275080 100644 --- a/semantic_conventions/lib/opentelemetry/semantic_conventions/resource.rb +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions/resource.rb @@ -6,295 +6,15 @@ module OpenTelemetry module SemanticConventions + # Semantic conventions for resource attributes module Resource - # Name of the cloud provider - CLOUD_PROVIDER = 'cloud.provider' - - # The cloud account ID the resource is assigned to - CLOUD_ACCOUNT_ID = 'cloud.account.id' - - # The geographical region the resource is running - # @note Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://intl.cloud.tencent.com/document/product/213/6091) - CLOUD_REGION = 'cloud.region' - - # Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running - # @note Availability zones are called "zones" on Alibaba Cloud and Google Cloud - CLOUD_AVAILABILITY_ZONE = 'cloud.availability_zone' - - # The cloud platform in use - # @note The prefix of the service SHOULD match the one specified in `cloud.provider` - CLOUD_PLATFORM = 'cloud.platform' - - # The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html) - AWS_ECS_CONTAINER_ARN = 'aws.ecs.container.arn' - - # The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html) - AWS_ECS_CLUSTER_ARN = 'aws.ecs.cluster.arn' - - # The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task - AWS_ECS_LAUNCHTYPE = 'aws.ecs.launchtype' - - # The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - AWS_ECS_TASK_ARN = 'aws.ecs.task.arn' - - # The task definition family this task definition is a member of - AWS_ECS_TASK_FAMILY = 'aws.ecs.task.family' - - # The revision for this task definition - AWS_ECS_TASK_REVISION = 'aws.ecs.task.revision' - - # The ARN of an EKS cluster - AWS_EKS_CLUSTER_ARN = 'aws.eks.cluster.arn' - - # The name(s) of the AWS log group(s) an application is writing to - # @note Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group - AWS_LOG_GROUP_NAMES = 'aws.log.group.names' - - # The Amazon Resource Name(s) (ARN) of the AWS log group(s) - # @note See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format) - AWS_LOG_GROUP_ARNS = 'aws.log.group.arns' - - # The name(s) of the AWS log stream(s) an application is writing to - AWS_LOG_STREAM_NAMES = 'aws.log.stream.names' - - # The ARN(s) of the AWS log stream(s) - # @note See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream - AWS_LOG_STREAM_ARNS = 'aws.log.stream.arns' - - # Container name used by container runtime - CONTAINER_NAME = 'container.name' - - # Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated - CONTAINER_ID = 'container.id' - - # The container runtime managing this container - CONTAINER_RUNTIME = 'container.runtime' - - # Name of the image the container was built on - CONTAINER_IMAGE_NAME = 'container.image.name' - - # Container image tag - CONTAINER_IMAGE_TAG = 'container.image.tag' - - # Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier) - DEPLOYMENT_ENVIRONMENT = 'deployment.environment' - - # A unique identifier representing the device - # @note The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence - DEVICE_ID = 'device.id' - - # The model identifier for the device - # @note It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device - DEVICE_MODEL_IDENTIFIER = 'device.model.identifier' - - # The marketing name for the device model - # @note It's recommended this value represents a human readable version of the device model rather than a machine readable alternative - DEVICE_MODEL_NAME = 'device.model.name' - - # The name of the device manufacturer - # @note The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple` - DEVICE_MANUFACTURER = 'device.manufacturer' - - # The name of the single function that this runtime instance executes - # @note This is the name of the function as configured/deployed on the FaaS platform and is usually different from the name of the callback function (which may be stored in the [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) span attributes) - FAAS_NAME = 'faas.name' - - # The unique ID of the single function that this runtime instance executes - # @note Depending on the cloud provider, use: - # - # * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - # Take care not to use the "invoked ARN" directly but replace any - # [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invokable with multiple - # different aliases. - # * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) - # * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id). - # - # On some providers, it may not be possible to determine the full ID at startup, - # which is why this field cannot be made required. For example, on AWS the account ID - # part of the ARN is not available without calling another AWS API - # which may be deemed too slow for a short-running lambda function. - # As an alternative, consider setting `faas.id` as a span attribute instead - FAAS_ID = 'faas.id' - - # The immutable version of the function being executed - # @note Depending on the cloud provider and platform, use: - # - # * **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - # (an integer represented as a decimal string). - # * **Google Cloud Run:** The [revision](https://cloud.google.com/run/docs/managing/revisions) - # (i.e., the function name plus the revision suffix). - # * **Google Cloud Functions:** The value of the - # [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - # * **Azure Functions:** Not applicable. Do not set this attribute - FAAS_VERSION = 'faas.version' - - # The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version - # @note * **AWS Lambda:** Use the (full) log stream name - FAAS_INSTANCE = 'faas.instance' - - # The amount of memory available to the serverless function in MiB - # @note It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information - FAAS_MAX_MEMORY = 'faas.max_memory' - - # Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider - HOST_ID = 'host.id' - - # Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user - HOST_NAME = 'host.name' - - # Type of host. For Cloud, this must be the machine type - HOST_TYPE = 'host.type' - - # The CPU architecture the host system is running on - HOST_ARCH = 'host.arch' - - # Name of the VM image or OS install the host was instantiated from - HOST_IMAGE_NAME = 'host.image.name' - - # VM image ID. For Cloud, this value is from the provider - HOST_IMAGE_ID = 'host.image.id' - - # The version string of the VM image as defined in [Version Attributes](README.md#version-attributes) - HOST_IMAGE_VERSION = 'host.image.version' - - # The name of the cluster - K8S_CLUSTER_NAME = 'k8s.cluster.name' - - # The name of the Node - K8S_NODE_NAME = 'k8s.node.name' - - # The UID of the Node - K8S_NODE_UID = 'k8s.node.uid' - - # The name of the namespace that the pod is running in - K8S_NAMESPACE_NAME = 'k8s.namespace.name' - - # The UID of the Pod - K8S_POD_UID = 'k8s.pod.uid' - - # The name of the Pod - K8S_POD_NAME = 'k8s.pod.name' - - # The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`) - K8S_CONTAINER_NAME = 'k8s.container.name' - - # Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec - K8S_CONTAINER_RESTART_COUNT = 'k8s.container.restart_count' - - # The UID of the ReplicaSet - K8S_REPLICASET_UID = 'k8s.replicaset.uid' - - # The name of the ReplicaSet - K8S_REPLICASET_NAME = 'k8s.replicaset.name' - - # The UID of the Deployment - K8S_DEPLOYMENT_UID = 'k8s.deployment.uid' - - # The name of the Deployment - K8S_DEPLOYMENT_NAME = 'k8s.deployment.name' - - # The UID of the StatefulSet - K8S_STATEFULSET_UID = 'k8s.statefulset.uid' - - # The name of the StatefulSet - K8S_STATEFULSET_NAME = 'k8s.statefulset.name' - - # The UID of the DaemonSet - K8S_DAEMONSET_UID = 'k8s.daemonset.uid' - - # The name of the DaemonSet - K8S_DAEMONSET_NAME = 'k8s.daemonset.name' - - # The UID of the Job - K8S_JOB_UID = 'k8s.job.uid' - - # The name of the Job - K8S_JOB_NAME = 'k8s.job.name' - - # The UID of the CronJob - K8S_CRONJOB_UID = 'k8s.cronjob.uid' - - # The name of the CronJob - K8S_CRONJOB_NAME = 'k8s.cronjob.name' - - # The operating system type - OS_TYPE = 'os.type' - - # Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands - OS_DESCRIPTION = 'os.description' - - # Human readable operating system name - OS_NAME = 'os.name' - - # The version string of the operating system as defined in [Version Attributes](../../resource/semantic_conventions/README.md#version-attributes) - OS_VERSION = 'os.version' - - # Process identifier (PID) - PROCESS_PID = 'process.pid' - - # The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW` - PROCESS_EXECUTABLE_NAME = 'process.executable.name' - - # The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW` - PROCESS_EXECUTABLE_PATH = 'process.executable.path' - - # The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW` - PROCESS_COMMAND = 'process.command' - - # The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead - PROCESS_COMMAND_LINE = 'process.command_line' - - # All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main` - PROCESS_COMMAND_ARGS = 'process.command_args' - - # The username of the user that owns the process - PROCESS_OWNER = 'process.owner' - - # The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler - PROCESS_RUNTIME_NAME = 'process.runtime.name' - - # The version of the runtime of this process, as returned by the runtime without modification - PROCESS_RUNTIME_VERSION = 'process.runtime.version' - - # An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment - PROCESS_RUNTIME_DESCRIPTION = 'process.runtime.description' - - # Logical name of the service - # @note MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md#process), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service` - SERVICE_NAME = 'service.name' - - # A namespace for `service.name` - # @note A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace - SERVICE_NAMESPACE = 'service.namespace' - - # The string ID of the service instance - # @note MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). It is preferable for the ID to be persistent and stay the same for the lifetime of the service instance, however it is acceptable that the ID is ephemeral and changes during important lifetime events for the service (e.g. service restarts). If the service has no inherent unique ID that can be used as the value of this attribute it is recommended to generate a random Version 1 or Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use Version 5, see RFC 4122 for more recommendations) - SERVICE_INSTANCE_ID = 'service.instance.id' - - # The version string of the service API or implementation - SERVICE_VERSION = 'service.version' - - # The name of the telemetry SDK as defined above - TELEMETRY_SDK_NAME = 'telemetry.sdk.name' - - # The language of the telemetry SDK - TELEMETRY_SDK_LANGUAGE = 'telemetry.sdk.language' - - # The version string of the telemetry SDK - TELEMETRY_SDK_VERSION = 'telemetry.sdk.version' - - # The version string of the auto instrumentation agent, if used - TELEMETRY_AUTO_VERSION = 'telemetry.auto.version' - - # The name of the web engine - WEBENGINE_NAME = 'webengine.name' - - # The version of the web engine - WEBENGINE_VERSION = 'webengine.version' - - # Additional description of the web engine (e.g. detailed version and edition information) - WEBENGINE_DESCRIPTION = 'webengine.description' + def self.const_missing(const_name) + attribute_name = OpenTelemetry::SemanticConventions_1_20_0::Resource.const_get(const_name) + super(const_name) unless attribute_name + const_set(const_name, attribute_name) + attribute_name + end end end -end \ No newline at end of file +end diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions/trace.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions/trace.rb index 452315a2ac..32f91bf33b 100644 --- a/semantic_conventions/lib/opentelemetry/semantic_conventions/trace.rb +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions/trace.rb @@ -6,482 +6,15 @@ module OpenTelemetry module SemanticConventions + # Semantic conventions for trace attributes module Trace - # The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable) - # @note This may be different from `faas.id` if an alias is involved - AWS_LAMBDA_INVOKED_ARN = 'aws.lambda.invoked_arn' - - # The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event - CLOUDEVENTS_EVENT_ID = 'cloudevents.event_id' - - # The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened - CLOUDEVENTS_EVENT_SOURCE = 'cloudevents.event_source' - - # The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses - CLOUDEVENTS_EVENT_SPEC_VERSION = 'cloudevents.event_spec_version' - - # The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence - CLOUDEVENTS_EVENT_TYPE = 'cloudevents.event_type' - - # The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source) - CLOUDEVENTS_EVENT_SUBJECT = 'cloudevents.event_subject' - - # Parent-child Reference type - # @note The causal relationship between a child Span and a parent Span - OPENTRACING_REF_TYPE = 'opentracing.ref_type' - - # An identifier for the database management system (DBMS) product being used. See below for a list of well-known identifiers - DB_SYSTEM = 'db.system' - - # The connection string used to connect to the database. It is recommended to remove embedded credentials - DB_CONNECTION_STRING = 'db.connection_string' - - # Username for accessing the database - DB_USER = 'db.user' - - # The fully-qualified class name of the [Java Database Connectivity (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver used to connect - DB_JDBC_DRIVER_CLASSNAME = 'db.jdbc.driver_classname' - - # This attribute is used to report the name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails) - # @note In some SQL databases, the database name to be used is called "schema name". In case there are multiple layers that could be considered for database name (e.g. Oracle instance name and schema name), the database name to be used is the more specific layer (e.g. Oracle schema name) - DB_NAME = 'db.name' - - # The database statement being executed - # @note The value may be sanitized to exclude sensitive information - DB_STATEMENT = 'db.statement' - - # The name of the operation being executed, e.g. the [MongoDB command name](https://docs.mongodb.com/manual/reference/command/#database-operations) such as `findAndModify`, or the SQL keyword - # @note When setting this to an SQL keyword, it is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if the operation name is provided by the library being instrumented. If the SQL statement has an ambiguous operation, or performs more than one operation, this value may be omitted - DB_OPERATION = 'db.operation' - - # Remote hostname or similar, see note below - NET_PEER_NAME = 'net.peer.name' - - # Remote address of the peer (dotted decimal for IPv4 or [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) - NET_PEER_IP = 'net.peer.ip' - - # Remote port number - NET_PEER_PORT = 'net.peer.port' - - # Transport protocol used. See note below - NET_TRANSPORT = 'net.transport' - - # The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) connecting to. This name is used to determine the port of a named instance - # @note If setting a `db.mssql.instance_name`, `net.peer.port` is no longer required (but still recommended if non-standard) - DB_MSSQL_INSTANCE_NAME = 'db.mssql.instance_name' - - # The fetch size used for paging, i.e. how many rows will be returned at once - DB_CASSANDRA_PAGE_SIZE = 'db.cassandra.page_size' - - # The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html) - DB_CASSANDRA_CONSISTENCY_LEVEL = 'db.cassandra.consistency_level' - - # The name of the primary table that the operation is acting upon, including the keyspace name (if applicable) - # @note This mirrors the db.sql.table attribute but references cassandra rather than sql. It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set - DB_CASSANDRA_TABLE = 'db.cassandra.table' - - # Whether or not the query is idempotent - DB_CASSANDRA_IDEMPOTENCE = 'db.cassandra.idempotence' - - # The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively - DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT = 'db.cassandra.speculative_execution_count' - - # The ID of the coordinating node for a query - DB_CASSANDRA_COORDINATOR_ID = 'db.cassandra.coordinator.id' - - # The data center of the coordinating node for a query - DB_CASSANDRA_COORDINATOR_DC = 'db.cassandra.coordinator.dc' - - # The index of the database being accessed as used in the [`SELECT` command](https://redis.io/commands/select), provided as an integer. To be used instead of the generic `db.name` attribute - DB_REDIS_DATABASE_INDEX = 'db.redis.database_index' - - # The collection being accessed within the database stated in `db.name` - DB_MONGODB_COLLECTION = 'db.mongodb.collection' - - # The name of the primary table that the operation is acting upon, including the database name (if applicable) - # @note It is not recommended to attempt any client-side parsing of `db.statement` just to get this property, but it should be set if it is provided by the library being instrumented. If the operation is acting upon an anonymous table, or more than one table, this value MUST NOT be set - DB_SQL_TABLE = 'db.sql.table' - - # The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it - EXCEPTION_TYPE = 'exception.type' - - # The exception message - EXCEPTION_MESSAGE = 'exception.message' - - # A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG - EXCEPTION_STACKTRACE = 'exception.stacktrace' - - # SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span - # @note An exception is considered to have escaped (or left) the scope of a span, - # if that span is ended while the exception is still logically "in flight". - # This may be actually "in flight" in some languages (e.g. if the exception - # is passed to a Context manager's `__exit__` method in Python) but will - # usually be caught at the point of recording the exception in most languages. - # - # It is usually not possible to determine at the point where an exception is thrown - # whether it will escape the scope of a span. - # However, it is trivial to know that an exception - # will escape, if one checks for an active exception just before ending the span, - # as done in the [example above](#recording-an-exception). - # - # It follows that an exception may still escape the scope of the span - # even if the `exception.escaped` attribute was not set or set to false, - # since the event might have been recorded at a time where it was not - # clear whether the exception will escape - EXCEPTION_ESCAPED = 'exception.escaped' - - # Type of the trigger which caused this function execution - # @note For the server/consumer span on the incoming side, - # `faas.trigger` MUST be set. - # - # Clients invoking FaaS instances usually cannot set `faas.trigger`, - # since they would typically need to look in the payload to determine - # the event type. If clients set it, it should be the same as the - # trigger that corresponding incoming would have (i.e., this has - # nothing to do with the underlying transport used to make the API - # call to invoke the lambda, which is often HTTP) - FAAS_TRIGGER = 'faas.trigger' - - # The execution ID of the current function execution - FAAS_EXECUTION = 'faas.execution' - - # The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name - FAAS_DOCUMENT_COLLECTION = 'faas.document.collection' - - # Describes the type of the operation that was performed on the data - FAAS_DOCUMENT_OPERATION = 'faas.document.operation' - - # A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime) - FAAS_DOCUMENT_TIME = 'faas.document.time' - - # The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name - FAAS_DOCUMENT_NAME = 'faas.document.name' - - # HTTP request method - HTTP_METHOD = 'http.method' - - # Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not transmitted over HTTP, but if it is known, it should be included nevertheless - # @note `http.url` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case the attribute's value should be `https://www.example.com/` - HTTP_URL = 'http.url' - - # The full request target as passed in a HTTP request line or equivalent - HTTP_TARGET = 'http.target' - - # The value of the [HTTP host header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header should also be reported, see note - # @note When the header is present but empty the attribute SHOULD be set to the empty string. Note that this is a valid situation that is expected in certain cases, according the aforementioned [section of RFC 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not set the attribute MUST NOT be set - HTTP_HOST = 'http.host' - - # The URI scheme identifying the used protocol - HTTP_SCHEME = 'http.scheme' - - # [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6) - HTTP_STATUS_CODE = 'http.status_code' - - # Kind of HTTP protocol used - # @note If `net.transport` is not specified, it can be assumed to be `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed - HTTP_FLAVOR = 'http.flavor' - - # Value of the [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the client - HTTP_USER_AGENT = 'http.user_agent' - - # The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For requests using transport encoding, this should be the compressed size - HTTP_REQUEST_CONTENT_LENGTH = 'http.request_content_length' - - # The size of the uncompressed request payload body after transport decoding. Not set if transport encoding not used - HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED = 'http.request_content_length_uncompressed' - - # The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For requests using transport encoding, this should be the compressed size - HTTP_RESPONSE_CONTENT_LENGTH = 'http.response_content_length' - - # The size of the uncompressed response payload body after transport decoding. Not set if transport encoding not used - HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED = 'http.response_content_length_uncompressed' - - # The ordinal number of request re-sending attempt - HTTP_RETRY_COUNT = 'http.retry_count' - - # The primary server name of the matched virtual host. This should be obtained via configuration. If no such configuration can be obtained, this attribute MUST NOT be set ( `net.host.name` should be used instead) - # @note `http.url` is usually not readily available on the server side but would have to be assembled in a cumbersome and sometimes lossy process from other information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus preferred to supply the raw data that is available - HTTP_SERVER_NAME = 'http.server_name' - - # The matched route (path template) - HTTP_ROUTE = 'http.route' - - # The IP address of the original client behind all proxies, if known (e.g. from [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)) - # @note This is not necessarily the same as `net.peer.ip`, which would - # identify the network-level peer, which may be a proxy. - # - # This attribute should be set when a source of information different - # from the one used for `net.peer.ip`, is available even if that other - # source just confirms the same value as `net.peer.ip`. - # Rationale: For `net.peer.ip`, one typically does not know if it - # comes from a proxy, reverse proxy, or the actual client. Setting - # `http.client_ip` when it's the same as `net.peer.ip` means that - # one is at least somewhat confident that the address is not that of - # the closest proxy - HTTP_CLIENT_IP = 'http.client_ip' - - # Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host - NET_HOST_IP = 'net.host.ip' - - # Like `net.peer.port` but for the host port - NET_HOST_PORT = 'net.host.port' - - # Local hostname or similar, see note below - NET_HOST_NAME = 'net.host.name' - - # The internet connection type currently being used by the host - NET_HOST_CONNECTION_TYPE = 'net.host.connection.type' - - # This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection - NET_HOST_CONNECTION_SUBTYPE = 'net.host.connection.subtype' - - # The name of the mobile carrier - NET_HOST_CARRIER_NAME = 'net.host.carrier.name' - - # The mobile carrier country code - NET_HOST_CARRIER_MCC = 'net.host.carrier.mcc' - - # The mobile carrier network code - NET_HOST_CARRIER_MNC = 'net.host.carrier.mnc' - - # The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network - NET_HOST_CARRIER_ICC = 'net.host.carrier.icc' - - # A string identifying the messaging system - MESSAGING_SYSTEM = 'messaging.system' - - # The message destination name. This might be equal to the span name but is required nevertheless - MESSAGING_DESTINATION = 'messaging.destination' - - # The kind of message destination - MESSAGING_DESTINATION_KIND = 'messaging.destination_kind' - - # A boolean that is true if the message destination is temporary - MESSAGING_TEMP_DESTINATION = 'messaging.temp_destination' - - # The name of the transport protocol - MESSAGING_PROTOCOL = 'messaging.protocol' - - # The version of the transport protocol - MESSAGING_PROTOCOL_VERSION = 'messaging.protocol_version' - - # Connection string - MESSAGING_URL = 'messaging.url' - - # A value used by the messaging system as an identifier for the message, represented as a string - MESSAGING_MESSAGE_ID = 'messaging.message_id' - - # The [conversation ID](#conversations) identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID" - MESSAGING_CONVERSATION_ID = 'messaging.conversation_id' - - # The (uncompressed) size of the message payload in bytes. Also use this attribute if it is unknown whether the compressed or uncompressed payload size is reported - MESSAGING_MESSAGE_PAYLOAD_SIZE_BYTES = 'messaging.message_payload_size_bytes' - - # The compressed size of the message payload in bytes - MESSAGING_MESSAGE_PAYLOAD_COMPRESSED_SIZE_BYTES = 'messaging.message_payload_compressed_size_bytes' - - # A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime) - FAAS_TIME = 'faas.time' - - # A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm) - FAAS_CRON = 'faas.cron' - - # A boolean that is true if the serverless function is executed for the first time (aka cold-start) - FAAS_COLDSTART = 'faas.coldstart' - - # The name of the invoked function - # @note SHOULD be equal to the `faas.name` resource attribute of the invoked function - FAAS_INVOKED_NAME = 'faas.invoked_name' - - # The cloud provider of the invoked function - # @note SHOULD be equal to the `cloud.provider` resource attribute of the invoked function - FAAS_INVOKED_PROVIDER = 'faas.invoked_provider' - - # The cloud region of the invoked function - # @note SHOULD be equal to the `cloud.region` resource attribute of the invoked function - FAAS_INVOKED_REGION = 'faas.invoked_region' - - # The [`service.name`](../../resource/semantic_conventions/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any - PEER_SERVICE = 'peer.service' - - # Username or client_id extracted from the access token or [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the inbound request from outside the system - ENDUSER_ID = 'enduser.id' - - # Actual/assumed role the client is making the request under extracted from token or application security context - ENDUSER_ROLE = 'enduser.role' - - # Scopes or granted authorities the client currently possesses extracted from token or application security context. The value would come from the scope associated with an [OAuth 2.0 Access Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value in a [SAML 2.0 Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html) - ENDUSER_SCOPE = 'enduser.scope' - - # Current "managed" thread ID (as opposed to OS thread ID) - THREAD_ID = 'thread.id' - - # Current thread name - THREAD_NAME = 'thread.name' - - # The method or function name, or equivalent (usually rightmost part of the code unit's name) - CODE_FUNCTION = 'code.function' - - # The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit - CODE_NAMESPACE = 'code.namespace' - - # The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path) - CODE_FILEPATH = 'code.filepath' - - # The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function` - CODE_LINENO = 'code.lineno' - - # The value `aws-api` - RPC_SYSTEM = 'rpc.system' - - # The name of the service to which a request is made, as returned by the AWS SDK - # @note This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side) - RPC_SERVICE = 'rpc.service' - - # The name of the operation corresponding to the request, as returned by the AWS SDK - # @note This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side) - RPC_METHOD = 'rpc.method' - - # The keys in the `RequestItems` object field - AWS_DYNAMODB_TABLE_NAMES = 'aws.dynamodb.table_names' - - # The JSON-serialized value of each item in the `ConsumedCapacity` response field - AWS_DYNAMODB_CONSUMED_CAPACITY = 'aws.dynamodb.consumed_capacity' - - # The JSON-serialized value of the `ItemCollectionMetrics` response field - AWS_DYNAMODB_ITEM_COLLECTION_METRICS = 'aws.dynamodb.item_collection_metrics' - - # The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter - AWS_DYNAMODB_PROVISIONED_READ_CAPACITY = 'aws.dynamodb.provisioned_read_capacity' - - # The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter - AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY = 'aws.dynamodb.provisioned_write_capacity' - - # The value of the `ConsistentRead` request parameter - AWS_DYNAMODB_CONSISTENT_READ = 'aws.dynamodb.consistent_read' - - # The value of the `ProjectionExpression` request parameter - AWS_DYNAMODB_PROJECTION = 'aws.dynamodb.projection' - - # The value of the `Limit` request parameter - AWS_DYNAMODB_LIMIT = 'aws.dynamodb.limit' - - # The value of the `AttributesToGet` request parameter - AWS_DYNAMODB_ATTRIBUTES_TO_GET = 'aws.dynamodb.attributes_to_get' - - # The value of the `IndexName` request parameter - AWS_DYNAMODB_INDEX_NAME = 'aws.dynamodb.index_name' - - # The value of the `Select` request parameter - AWS_DYNAMODB_SELECT = 'aws.dynamodb.select' - - # The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field - AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES = 'aws.dynamodb.global_secondary_indexes' - - # The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field - AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES = 'aws.dynamodb.local_secondary_indexes' - - # The value of the `ExclusiveStartTableName` request parameter - AWS_DYNAMODB_EXCLUSIVE_START_TABLE = 'aws.dynamodb.exclusive_start_table' - - # The the number of items in the `TableNames` response parameter - AWS_DYNAMODB_TABLE_COUNT = 'aws.dynamodb.table_count' - - # The value of the `ScanIndexForward` request parameter - AWS_DYNAMODB_SCAN_FORWARD = 'aws.dynamodb.scan_forward' - - # The value of the `Segment` request parameter - AWS_DYNAMODB_SEGMENT = 'aws.dynamodb.segment' - - # The value of the `TotalSegments` request parameter - AWS_DYNAMODB_TOTAL_SEGMENTS = 'aws.dynamodb.total_segments' - - # The value of the `Count` response parameter - AWS_DYNAMODB_COUNT = 'aws.dynamodb.count' - - # The value of the `ScannedCount` response parameter - AWS_DYNAMODB_SCANNED_COUNT = 'aws.dynamodb.scanned_count' - - # The JSON-serialized value of each item in the `AttributeDefinitions` request field - AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS = 'aws.dynamodb.attribute_definitions' - - # The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` request field - AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES = 'aws.dynamodb.global_secondary_index_updates' - - # A string identifying the kind of message consumption as defined in the [Operation names](#operation-names) section above. If the operation is "send", this attribute MUST NOT be set, since the operation can be inferred from the span kind in that case - MESSAGING_OPERATION = 'messaging.operation' - - # The identifier for the consumer receiving a message. For Kafka, set it to `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are present, or only `messaging.kafka.consumer_group`. For brokers, such as RabbitMQ and Artemis, set it to the `client_id` of the client consuming the message - MESSAGING_CONSUMER_ID = 'messaging.consumer_id' - - # RabbitMQ message routing key - MESSAGING_RABBITMQ_ROUTING_KEY = 'messaging.rabbitmq.routing_key' - - # Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message_id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set - # @note If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value - MESSAGING_KAFKA_MESSAGE_KEY = 'messaging.kafka.message_key' - - # Name of the Kafka Consumer Group that is handling the message. Only applies to consumers, not producers - MESSAGING_KAFKA_CONSUMER_GROUP = 'messaging.kafka.consumer_group' - - # Client Id for the Consumer or Producer that is handling the message - MESSAGING_KAFKA_CLIENT_ID = 'messaging.kafka.client_id' - - # Partition the message is sent to - MESSAGING_KAFKA_PARTITION = 'messaging.kafka.partition' - - # A boolean that is true if the message is a tombstone - MESSAGING_KAFKA_TOMBSTONE = 'messaging.kafka.tombstone' - - # Namespace of RocketMQ resources, resources in different namespaces are individual - MESSAGING_ROCKETMQ_NAMESPACE = 'messaging.rocketmq.namespace' - - # Name of the RocketMQ producer/consumer group that is handling the message. The client type is identified by the SpanKind - MESSAGING_ROCKETMQ_CLIENT_GROUP = 'messaging.rocketmq.client_group' - - # The unique identifier for each client - MESSAGING_ROCKETMQ_CLIENT_ID = 'messaging.rocketmq.client_id' - - # Type of message - MESSAGING_ROCKETMQ_MESSAGE_TYPE = 'messaging.rocketmq.message_type' - - # The secondary classifier of message besides topic - MESSAGING_ROCKETMQ_MESSAGE_TAG = 'messaging.rocketmq.message_tag' - - # Key(s) of message, another way to mark message besides message id - MESSAGING_ROCKETMQ_MESSAGE_KEYS = 'messaging.rocketmq.message_keys' - - # Model of message consumption. This only applies to consumer spans - MESSAGING_ROCKETMQ_CONSUMPTION_MODEL = 'messaging.rocketmq.consumption_model' - - # The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request - RPC_GRPC_STATUS_CODE = 'rpc.grpc.status_code' - - # Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 does not specify this, the value can be omitted - RPC_JSONRPC_VERSION = 'rpc.jsonrpc.version' - - # `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification - RPC_JSONRPC_REQUEST_ID = 'rpc.jsonrpc.request_id' - - # `error.code` property of response if it is an error response - RPC_JSONRPC_ERROR_CODE = 'rpc.jsonrpc.error_code' - - # `error.message` property of response if it is an error response - RPC_JSONRPC_ERROR_MESSAGE = 'rpc.jsonrpc.error_message' - - # Whether this is a received or sent message - MESSAGE_TYPE = 'message.type' - - # MUST be calculated as two different counters starting from `1` one for sent messages and one for received message - # @note This way we guarantee that the values will be consistent between different implementations - MESSAGE_ID = 'message.id' - - # Compressed size of the message in bytes - MESSAGE_COMPRESSED_SIZE = 'message.compressed_size' - - # Uncompressed size of the message in bytes - MESSAGE_UNCOMPRESSED_SIZE = 'message.uncompressed_size' + def self.const_missing(const_name) + attribute_name = OpenTelemetry::SemanticConventions_1_20_0::Trace.const_get(const_name) + super(const_name) unless attribute_name + const_set(const_name, attribute_name) + attribute_name + end end end -end \ No newline at end of file +end diff --git a/semantic_conventions/lib/opentelemetry/semantic_conventions/version.rb b/semantic_conventions/lib/opentelemetry/semantic_conventions/version.rb index b323abd7e3..f5ddd49535 100644 --- a/semantic_conventions/lib/opentelemetry/semantic_conventions/version.rb +++ b/semantic_conventions/lib/opentelemetry/semantic_conventions/version.rb @@ -6,6 +6,6 @@ module OpenTelemetry module SemanticConventions - VERSION = '1.10.0' + VERSION = '1.20.0' end end diff --git a/semantic_conventions/templates/semantic_conventions.j2 b/semantic_conventions/templates/semantic_conventions.j2 index fce6c974e5..608882aa71 100644 --- a/semantic_conventions/templates/semantic_conventions.j2 +++ b/semantic_conventions/templates/semantic_conventions.j2 @@ -4,15 +4,31 @@ # # SPDX-License-Identifier: Apache-2.0 +require_relative '../{{prev_spec_version}}/{{kind}}' + module OpenTelemetry - module SemanticConventions + module SemanticConventions_{{spec_version | replace(".", "_")}} + # https://github.com/open-telemetry/opentelemetry-specification/blob/v{{spec_version}}/specification/ module {{module}} - {%- for attribute in attributes | unique(attribute="fqn") %} + {%- if (prev_spec_version | string()) != "" %} + def self.const_missing(const_name) + attribute_name = OpenTelemetry::SemanticConventions_{{prev_spec_version | replace(".", "_")}}::Trace.const_get(const_name) + super(const_name) unless attribute_name + + warn "#{const_name} is deprecated." + const_set(const_name, attribute_name) + attribute_name + end +{# blank line #} + {%- endif %} + {%- for attribute in attributes | unique(attribute="fqn") | sort(attribute="fqn") %} # {{ attribute.brief | to_doc_brief | regex_replace(pattern="\n", replace="\n # ") }} {%- if attribute.note %} + # # @note {{ attribute.note | to_doc_brief | regex_replace(pattern="\n", replace="\n # ") }} {%- endif %} - {%- if attribute.deprecated %} + {%- if (attribute.stability | string()) == "StabilityLevel.DEPRECATED" %} + # # @deprecated {{ attribute.deprecated | to_doc_brief | regex_replace(pattern="\n", replace="\n # ") }} {%- endif %} {{ attribute.fqn | to_const_name }} = '{{ attribute.fqn }}'