diff --git a/.evergreen/aws_lambda b/.evergreen/aws_lambda new file mode 120000 index 0000000000..3366dcbced --- /dev/null +++ b/.evergreen/aws_lambda @@ -0,0 +1 @@ +../.mod/drivers-evergreen-tools/.evergreen/aws_lambda \ No newline at end of file diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 01261a3325..9e82322443 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -168,6 +168,25 @@ functions: EOT + "run CSOT tests": + - command: shell.exec + type: test + params: + shell: bash + working_dir: "src" + script: | + ${PREPARE_SHELL} + # Needed for generating temporary aws credentials. + if [ -n "${FLE}" ]; + then + export AWS_ACCESS_KEY_ID="${fle_aws_key}" + export AWS_SECRET_ACCESS_KEY="${fle_aws_secret}" + export AWS_DEFAULT_REGION="${fle_aws_region}" + fi + export CSOT_SPEC_TESTS=1 + TEST_CMD="bundle exec rspec spec/spec_tests/client_side_operations_timeout_spec.rb" \ + .evergreen/run-tests.sh + "export FLE credentials": - command: shell.exec type: test @@ -728,6 +747,9 @@ tasks: - name: "test-kerberos" commands: - func: "run Kerberos unit tests" + - name: "test-csot" + commands: + - func: "run CSOT tests" - name: "test-fle" commands: - func: "export FLE credentials" @@ -842,6 +864,7 @@ tasks: AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} AWS_SESSION_TOKEN: ${AWS_SESSION_TOKEN} LAMBDA_STACK_NAME: "dbx-ruby-lambda" + CLUSTER_PREFIX: "dbx-ruby-lambda" RVM_RUBY: ruby-3.2 MONGODB_URI: ${MONGODB_URI} axes: @@ -920,6 +943,10 @@ axes: display_name: Replica Set variables: TOPOLOGY: replica-set + - id: "replica-set-single-node" + display_name: Replica Set (Single Node) + variables: + TOPOLOGY: replica-set-single-node - id: "sharded-cluster" display_name: Sharded variables: @@ -1352,6 +1379,16 @@ buildvariants: tasks: - name: "test-mlaunch" + - matrix_name: CSOT + matrix_spec: + ruby: "ruby-3.2" + mongodb-version: "7.0" + topology: replica-set-single-node + os: rhel8 + display_name: "CSOT - ${mongodb-version}" + tasks: + - name: test-csot + - matrix_name: "no-retry-reads" matrix_spec: retry-reads: no-retry-reads @@ -1390,7 +1427,7 @@ buildvariants: lint: on ruby: "ruby-3.2" mongodb-version: "7.0" - topology: '*' + topology: ["standalone", "replica-set", "sharded-cluster"] os: rhel8 display_name: "${mongodb-version} ${topology} ${lint} ${ruby}" tasks: diff --git a/.evergreen/config/axes.yml.erb b/.evergreen/config/axes.yml.erb index c433cf9c22..b6c1aea945 100644 --- a/.evergreen/config/axes.yml.erb +++ b/.evergreen/config/axes.yml.erb @@ -74,6 +74,10 @@ axes: display_name: Replica Set variables: TOPOLOGY: replica-set + - id: "replica-set-single-node" + display_name: Replica Set (Single Node) + variables: + TOPOLOGY: replica-set-single-node - id: "sharded-cluster" display_name: Sharded variables: diff --git a/.evergreen/config/common.yml.erb b/.evergreen/config/common.yml.erb index 1189b987eb..7b25e5b49a 100644 --- a/.evergreen/config/common.yml.erb +++ b/.evergreen/config/common.yml.erb @@ -165,6 +165,25 @@ functions: EOT + "run CSOT tests": + - command: shell.exec + type: test + params: + shell: bash + working_dir: "src" + script: | + ${PREPARE_SHELL} + # Needed for generating temporary aws credentials. + if [ -n "${FLE}" ]; + then + export AWS_ACCESS_KEY_ID="${fle_aws_key}" + export AWS_SECRET_ACCESS_KEY="${fle_aws_secret}" + export AWS_DEFAULT_REGION="${fle_aws_region}" + fi + export CSOT_SPEC_TESTS=1 + TEST_CMD="bundle exec rspec spec/spec_tests/client_side_operations_timeout_spec.rb" \ + .evergreen/run-tests.sh + "export FLE credentials": - command: shell.exec type: test @@ -725,6 +744,9 @@ tasks: - name: "test-kerberos" commands: - func: "run Kerberos unit tests" + - name: "test-csot" + commands: + - func: "run CSOT tests" - name: "test-fle" commands: - func: "export FLE credentials" @@ -839,5 +861,6 @@ tasks: AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} AWS_SESSION_TOKEN: ${AWS_SESSION_TOKEN} LAMBDA_STACK_NAME: "dbx-ruby-lambda" + CLUSTER_PREFIX: "dbx-ruby-lambda" RVM_RUBY: ruby-3.2 MONGODB_URI: ${MONGODB_URI} diff --git a/.evergreen/config/standard.yml.erb b/.evergreen/config/standard.yml.erb index 11c75425da..8166879d66 100644 --- a/.evergreen/config/standard.yml.erb +++ b/.evergreen/config/standard.yml.erb @@ -33,6 +33,8 @@ recent_mdb = %w( 6.0 5.3 ) latest_5x_mdb = "5.3".inspect # so it gets quoted as a string + + all_dbs = %w(latest 7.0 6.0 5.3 5.0 4.4 4.2 4.0 3.6) %> buildvariants: @@ -142,6 +144,16 @@ buildvariants: tasks: - name: "test-mlaunch" + - matrix_name: CSOT + matrix_spec: + ruby: <%= latest_ruby %> + mongodb-version: <%= latest_stable_mdb %> + topology: replica-set-single-node + os: rhel8 + display_name: "CSOT - ${mongodb-version}" + tasks: + - name: test-csot + - matrix_name: "no-retry-reads" matrix_spec: retry-reads: no-retry-reads @@ -180,7 +192,7 @@ buildvariants: lint: on ruby: <%= latest_ruby %> mongodb-version: <%= latest_stable_mdb %> - topology: '*' + topology: <%= topologies %> os: rhel8 display_name: "${mongodb-version} ${topology} ${lint} ${ruby}" tasks: diff --git a/.evergreen/handle-paths.sh b/.evergreen/handle-paths.sh new file mode 120000 index 0000000000..77a67a0271 --- /dev/null +++ b/.evergreen/handle-paths.sh @@ -0,0 +1 @@ +../.mod/drivers-evergreen-tools/.evergreen/handle-paths.sh \ No newline at end of file diff --git a/.evergreen/run-deployed-lambda-aws-tests.sh b/.evergreen/run-deployed-lambda-aws-tests.sh deleted file mode 100755 index 32eebfbf2b..0000000000 --- a/.evergreen/run-deployed-lambda-aws-tests.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail - -# Explanation of required environment variables: -# -# TEST_LAMBDA_DIRECTORY: The root of the project's Lambda sam project. -# DRIVERS_ATLAS_PUBLIC_API_KEY: The public Atlas key for the drivers org. -# DRIVERS_ATLAS_PRIVATE_API_KEY: The private Atlas key for the drivers org. -# DRIVERS_ATLAS_LAMBDA_USER: The user for the lambda cluster. -# DRIVERS_ATLAS_LAMBDA_PASSWORD: The password for the user. -# DRIVERS_ATLAS_GROUP_ID: The id of the individual projects under the drivers org, per language. -# LAMBDA_STACK_NAME: The name of the stack on lambda "dbx--lambda" -# AWS_REGION: The region for the function - generally us-east-1 - -VARLIST=( -TEST_LAMBDA_DIRECTORY -DRIVERS_ATLAS_PUBLIC_API_KEY -DRIVERS_ATLAS_PRIVATE_API_KEY -DRIVERS_ATLAS_LAMBDA_USER -DRIVERS_ATLAS_LAMBDA_PASSWORD -DRIVERS_ATLAS_GROUP_ID -LAMBDA_STACK_NAME -AWS_REGION -) - -# Ensure that all variables required to run the test are set, otherwise throw -# an error. -for VARNAME in ${VARLIST[*]}; do -[[ -z "${!VARNAME}" ]] && echo "ERROR: $VARNAME not set" && exit 1; -done - -# Set up the common variables -. `dirname "$0"`/atlas/setup-variables.sh - -# Restarts the cluster's primary node. -restart_cluster_primary () -{ - echo "Testing Atlas primary restart..." - curl \ - --digest -u ${DRIVERS_ATLAS_PUBLIC_API_KEY}:${DRIVERS_ATLAS_PRIVATE_API_KEY} \ - -X POST \ - "${ATLAS_BASE_URL}/groups/${DRIVERS_ATLAS_GROUP_ID}/clusters/${FUNCTION_NAME}/restartPrimaries" -} - -# Deploys a lambda function to the set stack name. -deploy_lambda_function () -{ - echo "Deploying Lambda function..." - sam deploy \ - --stack-name "${FUNCTION_NAME}" \ - --capabilities CAPABILITY_IAM \ - --resolve-s3 \ - --parameter-overrides "MongoDbUri=${MONGODB_URI}" \ - --region ${AWS_REGION} -} - -# Get the ARN for the Lambda function we created and export it. -get_lambda_function_arn () -{ - echo "Getting Lambda function ARN..." - LAMBDA_FUNCTION_ARN=$(sam list stack-outputs \ - --stack-name ${FUNCTION_NAME} \ - --region ${AWS_REGION} \ - --output json | jq '.[] | select(.OutputKey == "MongoDBFunction") | .OutputValue' | tr -d '"' - ) - echo "Lambda function ARN: $LAMBDA_FUNCTION_ARN" - export LAMBDA_FUNCTION_ARN=$LAMBDA_FUNCTION_ARN -} - -delete_lambda_function () -{ - echo "Deleting Lambda Function..." - sam delete --stack-name ${FUNCTION_NAME} --no-prompts --region us-east-1 -} - -cleanup () -{ - delete_lambda_function -} - -trap cleanup EXIT SIGHUP - -cd "${TEST_LAMBDA_DIRECTORY}" - -sam build --use-container - -deploy_lambda_function - -get_lambda_function_arn - - -check_lambda_output () { - if grep -q FunctionError output.json - then - echo "Exiting due to FunctionError!" - exit 1 - fi - cat output.json | jq -r '.LogResult' | base64 --decode -} - -aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-standard.json > output.json -cat lambda-invoke-standard.json -check_lambda_output - -echo "Sleeping 1 minute to build up some streaming protocol heartbeats..." -sleep 60 -aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-frozen.json > output.json -cat lambda-invoke-frozen.json -check_lambda_output - -restart_cluster_primary - -echo "Sleeping 1 minute to build up some streaming protocol heartbeats..." -sleep 60 -aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-outage.json > output.json -cat lambda-invoke-outage.json -check_lambda_output diff --git a/.evergreen/run-tests-deployed-lambda.sh b/.evergreen/run-tests-deployed-lambda.sh index 9b5d01d526..bfd94fc2e5 100755 --- a/.evergreen/run-tests-deployed-lambda.sh +++ b/.evergreen/run-tests-deployed-lambda.sh @@ -13,4 +13,4 @@ set_env_ruby export MONGODB_URI=${MONGODB_URI} export TEST_LAMBDA_DIRECTORY=`dirname "$0"`/../spec/faas/ruby-sam-app -. `dirname "$0"`/run-deployed-lambda-aws-tests.sh +. `dirname "$0"`/aws_lambda/run-deployed-lambda-aws-tests.sh diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index a2203eb79a..2cb5a49bd6 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -65,6 +65,7 @@ fi calculate_server_args launch_ocsp_mock + launch_server "$dbdir" uri_options="$URI_OPTIONS" @@ -90,6 +91,9 @@ elif test "$TOPOLOGY" = replica-set; then # or it can try to send the commands to secondaries. hosts=localhost:27017,localhost:27018 uri_options="$uri_options&replicaSet=test-rs" +elif test "$TOPOLOGY" = replica-set-single-node; then + hosts=localhost:27017 + uri_options="$uri_options&replicaSet=test-rs" else hosts=localhost:27017 fi @@ -283,7 +287,7 @@ fi set_fcv -if test "$TOPOLOGY" = replica-set && ! echo "$MONGODB_VERSION" |fgrep -q 2.6; then +if test "$TOPOLOGY" = replica-set || test "$TOPOLOGY" = replica-set-single-node; then ruby -Ilib -I.evergreen/lib -rbundler/setup -rserver_setup -e ServerSetup.new.setup_tags fi diff --git a/.gitmodules b/.gitmodules index 6d428f359d..e1bab0957a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -3,4 +3,4 @@ url = https://github.com/mongodb-labs/drivers-evergreen-tools [submodule "spec/shared"] path = spec/shared - url = git@github.com:mongodb-labs/mongo-ruby-spec-shared.git + url = git@github.com:mongodb-labs/mongo-ruby-spec-shared.git diff --git a/.mod/drivers-evergreen-tools b/.mod/drivers-evergreen-tools index 8cb042bf7f..89e57ded07 160000 --- a/.mod/drivers-evergreen-tools +++ b/.mod/drivers-evergreen-tools @@ -1 +1 @@ -Subproject commit 8cb042bf7fda753ec19eda19f02c441d4220a8dc +Subproject commit 89e57ded0703b76322870856e83647728199f083 diff --git a/Rakefile b/Rakefile index 3e6ab44b02..f9b1d89ad0 100644 --- a/Rakefile +++ b/Rakefile @@ -21,12 +21,12 @@ CLASSIFIERS = [ [%r,^spec_tests,, :spec], ] -RUN_PRIORITY = %i( +RUN_PRIORITY = (ENV['RUN_PRIORITY'] || %( tx_examples unit unit_server integration sdam_integration cursor_reaping query_cache spec spec_sdam_integration -) +)).split.map(&:to_sym) RSpec::Core::RakeTask.new(:spec) do |t| #t.rspec_opts = "--profile 5" if ENV['CI'] diff --git a/lib/mongo.rb b/lib/mongo.rb index b90cd4a011..c866ad1a9e 100644 --- a/lib/mongo.rb +++ b/lib/mongo.rb @@ -39,6 +39,7 @@ require 'mongo/semaphore' require 'mongo/distinguishing_semaphore' require 'mongo/condition_variable' +require 'mongo/csot_timeout_holder' require 'mongo/options' require 'mongo/loggable' require 'mongo/cluster_time' diff --git a/lib/mongo/address.rb b/lib/mongo/address.rb index 90b82dec11..e349ee0619 100644 --- a/lib/mongo/address.rb +++ b/lib/mongo/address.rb @@ -178,6 +178,9 @@ def inspect # @param [ Hash ] opts The options. # # @option opts [ Float ] :connect_timeout Connect timeout. + # @option opts [ Boolean ] :csot Whether the client-side operation timeout + # should be considered when connecting the socket. This option influences + # only what errors will be raised if timeout expires. # @option opts [ true | false ] :ssl Whether to use SSL. # @option opts [ String ] :ssl_ca_cert # Same as the corresponding Client/Socket::SSL option. @@ -214,11 +217,12 @@ def inspect # @since 2.0.0 # @api private def socket(socket_timeout, opts = {}) + csot = !!opts[:csot] opts = { connect_timeout: Server::CONNECT_TIMEOUT, }.update(options).update(Hash[opts.map { |k, v| [k.to_sym, v] }]) - map_exceptions do + map_exceptions(csot) do if seed.downcase =~ Unix::MATCH specific_address = Unix.new(seed.downcase) return specific_address.socket(socket_timeout, opts) @@ -281,11 +285,26 @@ def parse_host_port end end - def map_exceptions + # Maps some errors to different ones, mostly low-level errors to driver + # level errors + # + # @param [ Boolean ] csot Whether the client-side operation timeout + # should be considered when connecting the socket. + def map_exceptions(csot) begin yield rescue Errno::ETIMEDOUT => e - raise Error::SocketTimeoutError, "#{e.class}: #{e} (for #{self})" + if csot + raise Error::TimeoutError, "#{e.class}: #{e} (for #{self})" + else + raise Error::SocketTimeoutError, "#{e.class}: #{e} (for #{self})" + end + rescue Error::SocketTimeoutError => e + if csot + raise Error::TimeoutError, "#{e.class}: #{e} (for #{self})" + else + raise e + end rescue IOError, SystemCallError => e raise Error::SocketError, "#{e.class}: #{e} (for #{self})" rescue OpenSSL::SSL::SSLError => e diff --git a/lib/mongo/auth/aws/credentials_retriever.rb b/lib/mongo/auth/aws/credentials_retriever.rb index fdbbb0e015..7ab83750d5 100644 --- a/lib/mongo/auth/aws/credentials_retriever.rb +++ b/lib/mongo/auth/aws/credentials_retriever.rb @@ -69,20 +69,24 @@ def initialize(user = nil, credentials_cache: CredentialsCache.instance) # Retrieves a valid set of credentials, if possible, or raises # Auth::InvalidConfiguration. # + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout, if any. + # # @return [ Auth::Aws::Credentials ] A valid set of credentials. # # @raise Auth::InvalidConfiguration if a source contains an invalid set # of credentials. # @raise Auth::Aws::CredentialsNotFound if credentials could not be # retrieved from any source. - def credentials + # @raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout defined on the operation context. + def credentials(timeout_holder = nil) credentials = credentials_from_user(user) return credentials unless credentials.nil? credentials = credentials_from_environment return credentials unless credentials.nil? - credentials = @credentials_cache.fetch { obtain_credentials_from_endpoints } + credentials = @credentials_cache.fetch { obtain_credentials_from_endpoints(timeout_holder) } return credentials unless credentials.nil? raise Auth::Aws::CredentialsNotFound @@ -127,17 +131,21 @@ def credentials_from_environment # Returns credentials from the AWS metadata endpoints. # + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeout. + # # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed or the obtained credentials are invalid. # # @raise Auth::InvalidConfiguration if a source contains an invalid set # of credentials. - def obtain_credentials_from_endpoints - if (credentials = web_identity_credentials) && credentials_valid?(credentials, 'Web identity token') + # @ raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout defined on the operation context. + def obtain_credentials_from_endpoints(timeout_holder = nil) + if (credentials = web_identity_credentials(timeout_holder)) && credentials_valid?(credentials, 'Web identity token') credentials - elsif (credentials = ecs_metadata_credentials) && credentials_valid?(credentials, 'ECS task metadata') + elsif (credentials = ecs_metadata_credentials(timeout_holder)) && credentials_valid?(credentials, 'ECS task metadata') credentials - elsif (credentials = ec2_metadata_credentials) && credentials_valid?(credentials, 'EC2 instance metadata') + elsif (credentials = ec2_metadata_credentials(timeout_holder)) && credentials_valid?(credentials, 'EC2 instance metadata') credentials end end @@ -145,21 +153,26 @@ def obtain_credentials_from_endpoints # Returns credentials from the EC2 metadata endpoint. The credentials # could be empty, partial or invalid. # + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeout. + # # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed. - def ec2_metadata_credentials + # @ raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout. + def ec2_metadata_credentials(timeout_holder = nil) + timeout_holder&.check_timeout! http = Net::HTTP.new('169.254.169.254') req = Net::HTTP::Put.new('/latest/api/token', # The TTL is required in order to obtain the metadata token. {'x-aws-ec2-metadata-token-ttl-seconds' => '30'}) - resp = ::Timeout.timeout(METADATA_TIMEOUT) do + resp = with_timeout(timeout_holder) do http.request(req) end if resp.code != '200' return nil end metadata_token = resp.body - resp = ::Timeout.timeout(METADATA_TIMEOUT) do + resp = with_timeout(timeout_holder) do http_get(http, '/latest/meta-data/iam/security-credentials', metadata_token) end if resp.code != '200' @@ -167,7 +180,7 @@ def ec2_metadata_credentials end role_name = resp.body escaped_role_name = CGI.escape(role_name).gsub('+', '%20') - resp = ::Timeout.timeout(METADATA_TIMEOUT) do + resp = with_timeout(timeout_holder) do http_get(http, "/latest/meta-data/iam/security-credentials/#{escaped_role_name}", metadata_token) end if resp.code != '200' @@ -189,7 +202,17 @@ def ec2_metadata_credentials return nil end - def ecs_metadata_credentials + # Returns credentials from the ECS metadata endpoint. The credentials + # could be empty, partial or invalid. + # + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. + # + # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil + # if retrieval failed. + # @ raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout defined on the operation context. + def ecs_metadata_credentials(timeout_holder = nil) + timeout_holder&.check_timeout! relative_uri = ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] if relative_uri.nil? || relative_uri.empty? return nil @@ -203,7 +226,7 @@ def ecs_metadata_credentials # a leading slash must be added by the driver, but this is not # in fact needed. req = Net::HTTP::Get.new(relative_uri) - resp = ::Timeout.timeout(METADATA_TIMEOUT) do + resp = with_timeout(timeout_holder) do http.request(req) end if resp.code != '200' @@ -225,13 +248,15 @@ def ecs_metadata_credentials # inside EKS. See https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html # for further details. # + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. + # # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed. - def web_identity_credentials + def web_identity_credentials(timeout_holder = nil) web_identity_token, role_arn, role_session_name = prepare_web_identity_inputs return nil if web_identity_token.nil? response = request_web_identity_credentials( - web_identity_token, role_arn, role_session_name + web_identity_token, role_arn, role_session_name, timeout_holder ) return if response.nil? credentials_from_web_identity_response(response) @@ -266,10 +291,15 @@ def prepare_web_identity_inputs # that the caller is assuming. # @param [ String ] role_session_name An identifier for the assumed # role session. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [ Net::HTTPResponse | nil ] AWS API response if successful, # otherwise nil. - def request_web_identity_credentials(token, role_arn, role_session_name) + # + # @ raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout defined on the operation context. + def request_web_identity_credentials(token, role_arn, role_session_name, timeout_holder) + timeout_holder&.check_timeout! uri = URI('https://sts.amazonaws.com/') params = { 'Action' => 'AssumeRoleWithWebIdentity', @@ -281,8 +311,10 @@ def request_web_identity_credentials(token, role_arn, role_session_name) uri.query = ::URI.encode_www_form(params) req = Net::HTTP::Post.new(uri) req['Accept'] = 'application/json' - resp = Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |https| - https.request(req) + resp = with_timeout(timeout_holder) do + Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |https| + https.request(req) + end end if resp.code != '200' return nil @@ -351,6 +383,27 @@ def credentials_valid?(credentials, source) true end + + # Execute the given block considering the timeout defined on the context, + # or the default timeout value. + # + # We use +Timeout.timeout+ here because there is no other acceptable easy + # way to time limit http requests. + # + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. + # + # @ raise Error::TimeoutError if deadline exceeded. + def with_timeout(timeout_holder) + timeout = timeout_holder&.remaining_timeout_sec! || METADATA_TIMEOUT + exception_class = if timeout_holder&.csot? + Error::TimeoutError + else + nil + end + ::Timeout.timeout(timeout, exception_class) do + yield + end + end end end end diff --git a/lib/mongo/auth/base.rb b/lib/mongo/auth/base.rb index efcae3c151..7c49e96527 100644 --- a/lib/mongo/auth/base.rb +++ b/lib/mongo/auth/base.rb @@ -117,7 +117,7 @@ def dispatch_msg(connection, conversation, msg) else nil end - result = Operation::Result.new(reply, connection.description, connection_global_id) + result = Operation::Result.new(reply, connection.description, connection_global_id, context: context) connection.update_cluster_time(result) reply_document end diff --git a/lib/mongo/bulk_write.rb b/lib/mongo/bulk_write.rb index 336a98396e..1286e39634 100644 --- a/lib/mongo/bulk_write.rb +++ b/lib/mongo/bulk_write.rb @@ -60,10 +60,15 @@ def execute result_combiner = ResultCombiner.new operations = op_combiner.combine validate_requests! + deadline = calculate_deadline - client.send(:with_session, @options) do |session| - context = Operation::Context.new(client: client, session: session) + client.with_session(@options) do |session| operations.each do |operation| + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: { operation_timeout_ms: op_timeout_ms(deadline) } + ) if single_statement?(operation) write_concern = write_concern(session) write_with_retry(write_concern, context: context) do |connection, txn_num, context| @@ -124,6 +129,9 @@ def initialize(collection, requests, options = {}) @collection = collection @requests = requests @options = options || {} + if @options[:timeout_ms] && @options[:timeout_ms] < 0 + raise ArgumentError, "timeout_ms options must be non-negative integer" + end end # Is the bulk write ordered? @@ -162,6 +170,31 @@ def write_concern(session = nil) :update_one, :insert_one ].freeze + # @return [ Float | nil ] Deadline for the batch of operations, if set. + def calculate_deadline + timeout_ms = @options[:timeout_ms] || collection.timeout_ms + return nil if timeout_ms.nil? + + if timeout_ms == 0 + 0 + else + Utils.monotonic_time + (timeout_ms / 1_000.0) + end + end + + # @param [ Float | nil ] deadline Deadline for the batch of operations. + # + # @return [ Integer | nil ] Timeout in milliseconds for the next operation. + def op_timeout_ms(deadline) + return nil if deadline.nil? + + if deadline == 0 + 0 + else + ((deadline - Utils.monotonic_time) * 1_000).to_i + end + end + def single_statement?(operation) SINGLE_STATEMENT_OPS.include?(operation.keys.first) end diff --git a/lib/mongo/client.rb b/lib/mongo/client.rb index e06231d4ad..4dee22311a 100644 --- a/lib/mongo/client.rb +++ b/lib/mongo/client.rb @@ -111,6 +111,7 @@ class Client :ssl_verify_certificate, :ssl_verify_hostname, :ssl_verify_ocsp_endpoint, + :timeout_ms, :truncate_logs, :user, :wait_queue_timeout, @@ -413,6 +414,8 @@ def hash # @option options [ true, false ] :ssl_verify_hostname Whether to perform peer hostname # validation. This setting overrides :ssl_verify with respect to whether hostname validation # is performed. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ true, false ] :truncate_logs Whether to truncate the # logs at the default 250 characters. # @option options [ String ] :user The user name. @@ -934,8 +937,10 @@ def reconnect # See https://mongodb.com/docs/manual/reference/command/listDatabases/ # for more information and usage. # @option opts [ Session ] :session The session to use. - # @option options [ Object ] :comment A user-provided + # @option opts [ Object ] :comment A user-provided # comment to attach to this command. + # @option opts [ Integer | nil ] :timeout_ms Operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Array ] The names of the databases. # @@ -955,7 +960,11 @@ def database_names(filter = {}, opts = {}) # # @option opts [ true, false ] :authorized_databases A flag that determines # which databases are returned based on user privileges when access control - # is enabled + # is enabled. + # @option opts [ Object ] :comment A user-provided + # comment to attach to this command. + # @option opts [ Integer | nil ] :timeout_ms Operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # See https://mongodb.com/docs/manual/reference/command/listDatabases/ # for more information and usage. @@ -1095,7 +1104,7 @@ def watch(pipeline = [], options = {}) return use(Database::ADMIN).watch(pipeline, options) unless database.name == Database::ADMIN view_options = options.dup - view_options[:await_data] = true if options[:max_await_time_ms] + view_options[:cursor_type] = :tailable_await if options[:max_await_time_ms] Mongo::Collection::View::ChangeStream.new( Mongo::Collection::View.new(self["#{Database::COMMAND}.aggregate"], {}, view_options), @@ -1185,6 +1194,22 @@ def encrypted_fields_map @encrypted_fields_map ||= @options.fetch(:auto_encryption_options, {})[:encrypted_fields_map] end + # @return [ Integer | nil ] Value of timeout_ms option if set. + # @api private + def timeout_ms + @options[:timeout_ms] + end + + # @return [ Float | nil ] Value of timeout_ms option converted to seconds. + # @api private + def timeout_sec + if timeout_ms.nil? + nil + else + timeout_ms / 1_000.0 + end + end + private # Create a new encrypter object using the client's auto encryption options @@ -1230,6 +1255,8 @@ def do_close # @option options [ true | false ] :implicit When no session is passed in, # whether to create an implicit session. # @option options [ Session ] :session The session to validate and return. + # @option options [ Operation::Context | nil ] :context Context of the + # operation the session is used for. # # @return [ Session ] A session object. # @@ -1242,7 +1269,7 @@ def get_session!(options = {}) return options[:session].validate!(self) end - cluster.validate_session_support! + cluster.validate_session_support!(timeout: timeout_sec) options = {implicit: true}.update(options) diff --git a/lib/mongo/client_encryption.rb b/lib/mongo/client_encryption.rb index 5390048edd..b585a8925f 100644 --- a/lib/mongo/client_encryption.rb +++ b/lib/mongo/client_encryption.rb @@ -40,6 +40,8 @@ class ClientEncryption # should be hashes of TLS connection options. The options are equivalent # to TLS connection options of Mongo::Client. # @see Mongo::Client#initialize for list of TLS options. + # @option options [ Integer ] :timeout_ms Timeout that will be applied to all + # operations on this instance. # # @raise [ ArgumentError ] If required options are missing or incorrectly # formatted. diff --git a/lib/mongo/cluster.rb b/lib/mongo/cluster.rb index eac6d6229d..46e9f556f1 100644 --- a/lib/mongo/cluster.rb +++ b/lib/mongo/cluster.rb @@ -779,12 +779,19 @@ def has_writable_server? # Deprecated and ignored. # @param [ Session | nil ] session Optional session to take into account # for mongos pinning. + # @param [ Float | nil ] :timeout Timeout in seconds for the operation, + # if any. # # @return [ Mongo::Server ] A primary server. # # @since 2.0.0 - def next_primary(ping = nil, session = nil) - ServerSelector.primary.select_server(self, nil, session) + def next_primary(ping = nil, session = nil, timeout: nil) + ServerSelector.primary.select_server( + self, + nil, + session, + timeout: timeout + ) end # Get the connection pool for the server. @@ -974,8 +981,11 @@ def disconnect_server_if_connected(server) # any servers and doesn't find any servers for the duration of # server selection timeout. # + # @param [ Float | nil ] :timeout Timeout for the validation. Since the + # validation process involves server selection, + # # @api private - def validate_session_support! + def validate_session_support!(timeout: nil) if topology.is_a?(Topology::LoadBalanced) return end @@ -993,7 +1003,7 @@ def validate_session_support! # No data bearing servers known - perform server selection to try to # get a response from at least one of them, to return an accurate # assessment of whether sessions are currently supported. - ServerSelector.get(mode: :primary_preferred).select_server(self) + ServerSelector.get(mode: :primary_preferred).select_server(self, timeout: timeout) @state_change_lock.synchronize do @sdam_flow_lock.synchronize do unless topology.logical_session_timeout diff --git a/lib/mongo/cluster/sdam_flow.rb b/lib/mongo/cluster/sdam_flow.rb index eab2fd88b9..bd908b682e 100644 --- a/lib/mongo/cluster/sdam_flow.rb +++ b/lib/mongo/cluster/sdam_flow.rb @@ -116,8 +116,12 @@ def server_description_changed log_warn( "Server #{updated_desc.address.to_s} has an incorrect replica set name '#{updated_desc.replica_set_name}'; expected '#{topology.replica_set_name}'" ) - @updated_desc = ::Mongo::Server::Description.new(updated_desc.address, - {}, average_round_trip_time: updated_desc.average_round_trip_time) + @updated_desc = ::Mongo::Server::Description.new( + updated_desc.address, + {}, + average_round_trip_time: updated_desc.average_round_trip_time, + minimum_round_trip_time: updated_desc.minimum_round_trip_time + ) update_server_descriptions end end @@ -233,8 +237,12 @@ def update_rs_from_primary end if stale_primary? - @updated_desc = ::Mongo::Server::Description.new(updated_desc.address, - {}, average_round_trip_time: updated_desc.average_round_trip_time) + @updated_desc = ::Mongo::Server::Description.new( + updated_desc.address, + {}, + average_round_trip_time: updated_desc.average_round_trip_time, + minimum_round_trip_time: updated_desc.minimum_round_trip_time + ) update_server_descriptions check_if_has_primary return @@ -270,9 +278,14 @@ def update_rs_from_primary servers_list.each do |server| if server.address != updated_desc.address if server.primary? - server.update_description(::Mongo::Server::Description.new( - server.address, {}, - average_round_trip_time: server.description.average_round_trip_time)) + server.update_description( + ::Mongo::Server::Description.new( + server.address, + {}, + average_round_trip_time: server.description.average_round_trip_time, + minimum_round_trip_time: updated_desc.minimum_round_trip_time + ) + ) end end end diff --git a/lib/mongo/collection.rb b/lib/mongo/collection.rb index a2b2076b7d..5cd379b982 100644 --- a/lib/mongo/collection.rb +++ b/lib/mongo/collection.rb @@ -134,8 +134,10 @@ def ==(other) # and *:nearest*. # - *:tag_sets* -- an array of hashes. # - *:local_threshold*. - # @option opts [ Session ] :session The session to use for the operation. - # @option opts [ Integer ] :size The size of the capped collection. + # @option options [ Session ] :session The session to use for the operation. + # @option options [ Integer ] :size The size of the capped collection. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash ] :time_series Create a time-series collection. # The hash may have the following items: # - *:timeField* -- The name of the field which contains the date in each @@ -163,6 +165,7 @@ def initialize(database, name, options = {}) @database = database @name = name.to_s.freeze @options = options.dup + @timeout_ms = options.delete(:timeout_ms) =begin WriteConcern object support if @options[:write_concern].is_a?(WriteConcern::Base) # Cache the instance so that we do not needlessly reconstruct it. @@ -401,7 +404,10 @@ def create(opts = {}) self.write_concern end - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session + ) maybe_create_qe_collections(opts[:encrypted_fields], client, session) do |encrypted_fields| Operation::Create.new( selector: operation, @@ -413,7 +419,10 @@ def create(opts = {}) collation: options[:collation] || options['collation'], encrypted_fields: encrypted_fields, validator: options[:validator], - ).execute(next_primary(nil, session), context: context) + ).execute( + next_primary(nil, session), + context: context + ) end end end @@ -432,12 +441,14 @@ def create(opts = {}) # @option opts [ Hash ] :write_concern The write concern options. # @option opts [ Hash | nil ] :encrypted_fields Encrypted fields hash that # was provided to `create` collection helper. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Result ] The result of the command. # # @since 2.0.0 def drop(opts = {}) - client.send(:with_session, opts) do |session| + client.with_session(opts) do |session| maybe_drop_emm_collections(opts[:encrypted_fields], client, session) do temp_write_concern = write_concern write_concern = if opts[:write_concern] @@ -445,7 +456,11 @@ def drop(opts = {}) else temp_write_concern end - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) operation = Operation::Drop.new({ selector: { :drop => name }, db_name: database.name, @@ -496,6 +511,11 @@ def drop(opts = {}) # @option options [ Integer ] :skip The number of docs to skip before returning results. # @option options [ Hash ] :sort The key and direction pairs by which the result set # will be sorted. + # @option options [ :cursor_lifetime | :iteration ] :timeout_mode How to interpret + # :timeout_ms (whether it applies to the lifetime of the cursor, or per + # iteration). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. # @@ -528,11 +548,9 @@ def find(filter = nil, options = {}) # See the server documentation for details. # @option options [ Integer ] :max_time_ms The maximum amount of time in # milliseconds to allow the aggregation to run. - # @option options [ true | false ] :use_cursor Indicates whether the command - # will request that the server provide results using a cursor. Note that - # as of server version 3.6, aggregations always provide results using a - # cursor and this option is therefore not valid. # @option options [ Session ] :session The session to use. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ View::Aggregation ] The aggregation object. # @@ -600,6 +618,11 @@ def aggregate(pipeline, options = {}) # events included with this flag set are: createIndexes, dropIndexes, # modify, create, shardCollection, reshardCollection, # refineCollectionShardKey. + # @option options [ :cursor_lifetime | :iteration ] :timeout_mode How to interpret + # :timeout_ms (whether it applies to the lifetime of the cursor, or per + # iteration). + # @option options [ Integer ] :timeout_ms The maximum amount of time to + # allow the query to run, in milliseconds. # # @note A change stream only allows 'majority' read concern. # @note This helper method is preferable to running a raw aggregation with @@ -610,7 +633,7 @@ def aggregate(pipeline, options = {}) # @since 2.5.0 def watch(pipeline = [], options = {}) view_options = options.dup - view_options[:await_data] = true if options[:max_await_time_ms] + view_options[:cursor_type] = :tailable_await if options[:max_await_time_ms] View::ChangeStream.new(View.new(self, {}, view_options), pipeline, nil, options) end @@ -631,6 +654,8 @@ def watch(pipeline = [], options = {}) # @option options [ Session ] :session The session to use. # @option options [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Integer ] The document count. # @@ -667,6 +692,8 @@ def count(filter = nil, options = {}) # @option options [ Session ] :session The session to use. # @option options [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Integer ] The document count. # @@ -688,6 +715,8 @@ def count_documents(filter = {}, options = {}) # @option options [ Hash ] :read The read preference options. # @option options [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Integer ] The document count. # @@ -709,6 +738,8 @@ def estimated_document_count(options = {}) # @option options [ Hash ] :read The read preference options. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Array ] The list of distinct values. # @@ -781,6 +812,8 @@ def inspect # @option opts [ Object ] :comment A user-provided comment to attach to # this command. # @option opts [ Session ] :session The session to use for the operation. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. # @@ -801,7 +834,11 @@ def insert_one(document, opts = {}) raise ArgumentError, "Document to be inserted cannot be nil" end - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) write_with_retry(write_concern, context: context) do |connection, txn_num, context| Operation::Insert.new( :documents => [ document ], @@ -834,6 +871,9 @@ def insert_one(document, opts = {}) # @option options [ true | false ] :ordered Whether the operations # should be executed in order. # @option options [ Session ] :session The session to use for the operation. + # @option options [ Integer ] :timeout_ms The timeout in milliseconds for the + # complete operation. Must a positive integer. The default value is unset + # which means infinite. # @option options [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. # @@ -862,6 +902,8 @@ def insert_many(documents, options = {}) # @option options [ true | false ] :bypass_document_validation Whether or # not to skip document level validation. # @option options [ Session ] :session The session to use for the set of operations. + # @option options [ Integer ] :timeout_ms The timeout in milliseconds for all the operations. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. # @@ -884,6 +926,8 @@ def bulk_write(requests, options = {}) # @option options [ Session ] :session The session to use. # @option options [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. # @@ -906,6 +950,8 @@ def delete_one(filter = nil, options = {}) # @option options [ Session ] :session The session to use. # @option options [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. # @@ -931,12 +977,17 @@ def delete_many(filter = nil, options = {}) # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command # to run in milliseconds. # @option options [ Session ] :session The session to use. + # @option options [ :cursor_lifetime | :iteration ] :timeout_mode How to interpret + # :timeout_ms (whether it applies to the lifetime of the cursor, or per + # iteration). + # @option options [ Integer ] :timeout_ms The maximum amount of time to + # allow the query to run, in milliseconds. # # @return [ Array ] An array of cursors. # # @since 2.1 def parallel_scan(cursor_count, options = {}) - find({}, options).send(:parallel_scan, cursor_count, options) + find({}, options).parallel_scan(cursor_count, options) end # Replaces a single document in the collection with the new document. @@ -954,6 +1005,8 @@ def parallel_scan(cursor_count, options = {}) # not to skip document level validation. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). # @option options [ Hash ] :let Mapping of variables to use in the command. @@ -983,6 +1036,8 @@ def replace_one(filter, replacement, options = {}) # @option options [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # @option options [ Session ] :session The session to use. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). # @option options [ Hash ] :let Mapping of variables to use in the command. @@ -1012,6 +1067,8 @@ def update_many(filter, update, options = {}) # @option options [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # @option options [ Session ] :session The session to use. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). # @option options [ Hash ] :let Mapping of variables to use in the command. @@ -1042,6 +1099,8 @@ def update_one(filter, update, options = {}) # Defaults to the collection's write concern. # @option options [ Hash ] :collation The collation to use. # @option options [ Session ] :session The session to use. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). # @option options [ Hash ] :let Mapping of variables to use in the command. @@ -1086,6 +1145,8 @@ def find_one_and_delete(filter, options = {}) # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). # @option options [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ BSON::Document ] The document. # @@ -1122,6 +1183,8 @@ def find_one_and_update(filter, update, options = {}) # @option options [ Session ] :session The session to use. # @option options [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option options [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. # @@ -1152,5 +1215,28 @@ def namespace def system_collection? name.start_with?('system.') end + + # @return [ Integer | nil ] Operation timeout that is for this database or + # for the corresponding client. + # + # @api private + def timeout_ms + @timeout_ms || database.timeout_ms + end + + # @return [ Hash ] timeout_ms value set on the operation level (if any), + # and/or timeout_ms that is set on collection/database/client level (if any). + # + # @api private + def operation_timeouts(opts = {}) + # TODO: We should re-evaluate if we need two timeouts separately. + {}.tap do |result| + if opts[:timeout_ms].nil? + result[:inherited_timeout_ms] = timeout_ms + else + result[:operation_timeout_ms] = opts.delete(:timeout_ms) + end + end + end end end diff --git a/lib/mongo/collection/helpers.rb b/lib/mongo/collection/helpers.rb index a6feaf9c99..f21dfedc87 100644 --- a/lib/mongo/collection/helpers.rb +++ b/lib/mongo/collection/helpers.rb @@ -30,7 +30,7 @@ module Helpers # @return [ Result ] The result of the execution. def do_drop(operation, session, context) operation.execute(next_primary(nil, session), context: context) - rescue Error::OperationFailure => ex + rescue Error::OperationFailure::Family => ex # NamespaceNotFound if ex.code == 26 || ex.code.nil? && ex.message =~ /ns not found/ false diff --git a/lib/mongo/collection/view.rb b/lib/mongo/collection/view.rb index dcc0f89752..476fe65992 100644 --- a/lib/mongo/collection/view.rb +++ b/lib/mongo/collection/view.rb @@ -63,10 +63,10 @@ class View :client, :cluster, :database, + :nro_write_with_retry, :read_with_retry, :read_with_retry_cursor, :write_with_retry, - :nro_write_with_retry, :write_concern_with_session # Delegate to the cluster for the next primary. @@ -74,6 +74,12 @@ class View alias :selector :filter + # @return [ Integer | nil | The timeout_ms value that was passed as an + # option to the view. + # + # @api private + attr_reader :operation_timeout_ms + # Compare two +View+ objects. # # @example Compare the view with another object. @@ -151,15 +157,24 @@ def hash # document more than once. Deprecated as of MongoDB server version 4.0. # @option options [ Hash ] :sort The key and direction pairs used to sort # the results. + # @option options [ :cursor_lifetime | :iteration ] :timeout_mode How to interpret + # :timeout_ms (whether it applies to the lifetime of the cursor, or per + # iteration). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @since 2.0.0 def initialize(collection, filter = {}, options = {}) validate_doc!(filter) - @collection = collection filter = BSON::Document.new(filter) options = BSON::Document.new(options) + @collection = collection + @operation_timeout_ms = options.delete(:timeout_ms) + + validate_timeout_mode!(options) + # This is when users pass $query in filter and other modifiers # alongside? query = filter.delete(:$query) @@ -171,6 +186,14 @@ def initialize(collection, filter = {}, options = {}) @options = Operation::Find::Builder::Modifiers.map_driver_options(modifiers).merge!(options).freeze end + # The timeout_ms value to use for this operation; either specified as an + # option to the view, or inherited from the collection. + # + # @return [ Integer | nil ] the timeout_ms for this operation + def timeout_ms + operation_timeout_ms || collection.timeout_ms + end + # Get a human-readable string representation of +View+. # # @example Get the inspection. @@ -196,6 +219,20 @@ def write_concern WriteConcern.get(options[:write_concern] || options[:write] || collection.write_concern) end + # @return [ Hash ] timeout_ms value set on the operation level (if any), + # and/or timeout_ms that is set on collection/database/client level (if any). + # + # @api private + def operation_timeouts(opts = {}) + {}.tap do |result| + if opts[:timeout_ms] || operation_timeout_ms + result[:operation_timeout_ms] = opts[:timeout_ms] || operation_timeout_ms + else + result[:inherited_timeout_ms] = collection.timeout_ms + end + end + end + private def initialize_copy(other) @@ -205,13 +242,14 @@ def initialize_copy(other) end def new(options) + options = options.merge(timeout_ms: operation_timeout_ms) if operation_timeout_ms View.new(collection, filter, options) end def view; self; end def with_session(opts = {}, &block) - client.send(:with_session, @options.merge(opts), &block) + client.with_session(@options.merge(opts), &block) end end end diff --git a/lib/mongo/collection/view/aggregation.rb b/lib/mongo/collection/view/aggregation.rb index 396c6eb142..ccbd5e09e0 100644 --- a/lib/mongo/collection/view/aggregation.rb +++ b/lib/mongo/collection/view/aggregation.rb @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/collection/view/aggregation/behavior' + module Mongo class Collection class View @@ -23,46 +25,11 @@ class View # # @since 2.0.0 class Aggregation - extend Forwardable - include Enumerable - include Immutable - include Iterable - include Explainable - include Loggable - include Retryable + include Behavior - # @return [ View ] view The collection view. - attr_reader :view # @return [ Array ] pipeline The aggregation pipeline. attr_reader :pipeline - # Delegate necessary operations to the view. - def_delegators :view, :collection, :read, :cluster - - # Delegate necessary operations to the collection. - def_delegators :collection, :database, :client - - # The reroute message. - # - # @since 2.1.0 - # @deprecated - REROUTE = 'Rerouting the Aggregation operation to the primary server.'.freeze - - # Set to true if disk usage is allowed during the aggregation. - # - # @example Set disk usage flag. - # aggregation.allow_disk_use(true) - # - # @param [ true, false ] value The flag value. - # - # @return [ true, false, Aggregation ] The aggregation if a value was - # set or the value if used as a getter. - # - # @since 2.0.0 - def allow_disk_use(value = nil) - configure(:allow_disk_use, value) - end - # Initialize the aggregation for the provided collection view, pipeline # and options. # @@ -87,58 +54,25 @@ def allow_disk_use(value = nil) # See the server documentation for details. # @option options [ Integer ] :max_time_ms The maximum amount of time in # milliseconds to allow the aggregation to run. - # @option options [ true, false ] :use_cursor Indicates whether the command - # will request that the server provide results using a cursor. Note that - # as of server version 3.6, aggregations always provide results using a - # cursor and this option is therefore not valid. # @option options [ Session ] :session The session to use. + # @option options [ :cursor_lifetime | :iteration ] :timeout_mode How to interpret + # :timeout_ms (whether it applies to the lifetime of the cursor, or per + # iteration). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @since 2.0.0 def initialize(view, pipeline, options = {}) - @view = view - @pipeline = pipeline.dup - unless Mongo.broken_view_aggregate || view.filter.empty? - @pipeline.unshift(:$match => view.filter) + perform_setup(view, options) do + @pipeline = pipeline.dup + unless Mongo.broken_view_aggregate || view.filter.empty? + @pipeline.unshift(:$match => view.filter) + end end - @options = BSON::Document.new(options).freeze - end - - # Get the explain plan for the aggregation. - # - # @example Get the explain plan for the aggregation. - # aggregation.explain - # - # @return [ Hash ] The explain plan. - # - # @since 2.0.0 - def explain - self.class.new(view, pipeline, options.merge(explain: true)).first - end - - # Whether this aggregation will write its result to a database collection. - # - # @return [ Boolean ] Whether the aggregation will write its result - # to a collection. - # - # @api private - def write? - pipeline.any? { |op| op.key?('$out') || op.key?(:$out) || op.key?('$merge') || op.key?(:$merge) } end private - def server_selector - @view.send(:server_selector) - end - - def aggregate_spec(session, read_preference) - Builder::Aggregation.new( - pipeline, - view, - options.merge(session: session, read_preference: read_preference) - ).specification - end - def new(options) Aggregation.new(view, pipeline, options) end @@ -180,32 +114,17 @@ def effective_read_preference(connection) end - def send_initial_query(server, session) + def send_initial_query(server, context) server.with_connection do |connection| initial_query_op( - session, + context.session, effective_read_preference(connection) ).execute_with_connection( connection, - context: Operation::Context.new(client: client, session: session) + context: context ) end end - - # Skip, sort, limit, projection are specified as pipeline stages - # rather than as options. - def cache_options - { - namespace: collection.namespace, - selector: pipeline, - read_concern: view.read_concern, - read_preference: view.read_preference, - collation: options[:collation], - # Aggregations can read documents from more than one collection, - # so they will be cleared on every write operation. - multi_collection: true, - } - end end end end diff --git a/lib/mongo/collection/view/aggregation/behavior.rb b/lib/mongo/collection/view/aggregation/behavior.rb new file mode 100644 index 0000000000..349b82e4bc --- /dev/null +++ b/lib/mongo/collection/view/aggregation/behavior.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +module Mongo + class Collection + class View + class Aggregation + # Distills the behavior common to aggregator classes, like + # View::Aggregator and View::ChangeStream. + module Behavior + extend Forwardable + include Enumerable + include Immutable + include Iterable + include Explainable + include Loggable + include Retryable + + # @return [ View ] view The collection view. + attr_reader :view + + # Delegate necessary operations to the view. + def_delegators :view, :collection, :read, :cluster, :cursor_type, :limit, :batch_size + + # Delegate necessary operations to the collection. + def_delegators :collection, :database, :client + + # Set to true if disk usage is allowed during the aggregation. + # + # @example Set disk usage flag. + # aggregation.allow_disk_use(true) + # + # @param [ true, false ] value The flag value. + # + # @return [ true, false, Aggregation ] The aggregation if a value was + # set or the value if used as a getter. + # + # @since 2.0.0 + def allow_disk_use(value = nil) + configure(:allow_disk_use, value) + end + + # Get the explain plan for the aggregation. + # + # @example Get the explain plan for the aggregation. + # aggregation.explain + # + # @return [ Hash ] The explain plan. + # + # @since 2.0.0 + def explain + self.class.new(view, pipeline, options.merge(explain: true)).first + end + + # Whether this aggregation will write its result to a database collection. + # + # @return [ Boolean ] Whether the aggregation will write its result + # to a collection. + # + # @api private + def write? + pipeline.any? { |op| op.key?('$out') || op.key?(:$out) || op.key?('$merge') || op.key?(:$merge) } + end + + # @return [ Integer | nil ] the timeout_ms value that was passed as + # an option to this object, or which was inherited from the view. + # + # @api private + def timeout_ms + @timeout_ms || view.timeout_ms + end + + private + + # Common setup for all classes that include this behavior; the + # constructor should invoke this method. + def perform_setup(view, options, forbid: []) + @view = view + + @timeout_ms = options.delete(:timeout_ms) + @options = BSON::Document.new(options).freeze + + yield + + validate_timeout_mode!(options, forbid: forbid) + end + + def server_selector + @view.send(:server_selector) + end + + def aggregate_spec(session, read_preference) + Builder::Aggregation.new( + pipeline, + view, + options.merge(session: session, read_preference: read_preference) + ).specification + end + + # Skip, sort, limit, projection are specified as pipeline stages + # rather than as options. + def cache_options + { + namespace: collection.namespace, + selector: pipeline, + read_concern: view.read_concern, + read_preference: view.read_preference, + collation: options[:collation], + # Aggregations can read documents from more than one collection, + # so they will be cleared on every write operation. + multi_collection: true, + } + end + + # @return [ Hash ] timeout_ms value set on the operation level (if any), + # and/or timeout_ms that is set on collection/database/client level (if any). + # + # @api private + def operation_timeouts(opts = {}) + {}.tap do |result| + if opts[:timeout_ms] || @timeout_ms + result[:operation_timeout_ms] = opts.delete(:timeout_ms) || @timeout_ms + else + result[:inherited_timeout_ms] = view.timeout_ms + end + end + end + end + end + end + end +end diff --git a/lib/mongo/collection/view/builder/aggregation.rb b/lib/mongo/collection/view/builder/aggregation.rb index 964e0b3d48..d60000d5a9 100644 --- a/lib/mongo/collection/view/builder/aggregation.rb +++ b/lib/mongo/collection/view/builder/aggregation.rb @@ -113,17 +113,11 @@ def aggregation_command command[:readConcern] = Options::Mapper.transform_values_to_strings( read_concern) end - command[:cursor] = cursor if cursor + command[:cursor] = batch_size_doc command.merge!(Options::Mapper.transform_documents(options, MAPPINGS)) command end - def cursor - if options[:use_cursor] == true || options[:use_cursor].nil? - batch_size_doc - end - end - def batch_size_doc value = options[:batch_size] || view.batch_size if value == 0 && write? diff --git a/lib/mongo/collection/view/change_stream.rb b/lib/mongo/collection/view/change_stream.rb index ee2d9c23bd..3bcce42989 100644 --- a/lib/mongo/collection/view/change_stream.rb +++ b/lib/mongo/collection/view/change_stream.rb @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/collection/view/aggregation/behavior' require 'mongo/collection/view/change_stream/retryable' module Mongo @@ -35,7 +36,8 @@ class View # # # @since 2.5.0 - class ChangeStream < Aggregation + class ChangeStream + include Aggregation::Behavior include Retryable # @return [ String ] The fullDocument option default value. @@ -60,6 +62,10 @@ class ChangeStream < Aggregation # @since 2.5.0 attr_reader :options + # @return [ Cursor ] the underlying cursor for this operation + # @api private + attr_reader :cursor + # Initialize the change stream for the provided collection view, pipeline # and options. # @@ -125,11 +131,13 @@ class ChangeStream < Aggregation # # @since 2.5.0 def initialize(view, pipeline, changes_for, options = {}) - @view = view - @changes_for = changes_for - @change_stream_filters = pipeline && pipeline.dup - @options = options && options.dup.freeze - @start_after = @options[:start_after] + # change stream cursors can only be :iterable, so we don't allow + # timeout_mode to be specified. + perform_setup(view, options, forbid: %i[ timeout_mode ]) do + @changes_for = changes_for + @change_stream_filters = pipeline && pipeline.dup + @start_after = @options[:start_after] + end # The resume token tracked by the change stream, used only # when there is no cursor, or no cursor resume token @@ -181,24 +189,30 @@ def each # @return [ BSON::Document | nil ] A change stream document. # @since 2.6.0 def try_next + recreate_cursor! if @timed_out + raise StopIteration.new if closed? + begin doc = @cursor.try_next rescue Mongo::Error => e - if !e.change_stream_resumable? - raise - end - - # Rerun initial aggregation. - # Any errors here will stop iteration and break out of this - # method. + # "If a next call fails with a timeout error, drivers MUST NOT + # invalidate the change stream. The subsequent next call MUST + # perform a resume attempt to establish a new change stream on the + # server..." + # + # However, SocketTimeoutErrors are TimeoutErrors, but are also + # change-stream-resumable. To preserve existing (specified) behavior, + # We only count timeouts when the error is not also + # change-stream-resumable. + @timed_out = e.is_a?(Mongo::Error::TimeoutError) && !e.change_stream_resumable? + + raise unless @timed_out || e.change_stream_resumable? - # Save cursor's resume token so we can use it - # to create a new cursor @resume_token = @cursor.resume_token + raise e if @timed_out - close - create_cursor! + recreate_cursor!(@cursor.context) retry end @@ -231,14 +245,17 @@ def try_next # This method ignores any errors that occur when closing the # server-side cursor. # + # @params [ Hash ] opts Options to be passed to the cursor close + # command. + # # @return [ nil ] Always nil. # # @since 2.5.0 - def close + def close(opts = {}) unless closed? begin - @cursor.close - rescue Error::OperationFailure, Error::SocketError, Error::SocketTimeoutError, Error::MissingConnection + @cursor.close(opts) + rescue Error::OperationFailure::Family, Error::SocketError, Error::SocketTimeoutError, Error::MissingConnection # ignore end @cursor = nil @@ -284,6 +301,28 @@ def resume_token cursor_resume_token || @resume_token end + # "change streams are an abstraction around tailable-awaitData cursors..." + # + # @return :tailable_await + def cursor_type + :tailable_await + end + + # "change streams...implicitly use ITERATION mode" + # + # @return :iteration + def timeout_mode + :iteration + end + + # Returns the value of the max_await_time_ms option that was + # passed to this change stream. + # + # @return [ Integer | nil ] the max_await_time_ms value + def max_await_time_ms + options[:max_await_time_ms] + end + private def for_cluster? @@ -298,19 +337,23 @@ def for_collection? !for_cluster? && !for_database? end - def create_cursor! + def create_cursor!(timeout_ms = nil) # clear the cache because we may get a newer or an older server # (rolling upgrades) @start_at_operation_time_supported = nil - session = client.send(:get_session, @options) + session = client.get_session(@options) + context = Operation::Context.new(client: client, session: session, view: self, operation_timeouts: timeout_ms ? { operation_timeout_ms: timeout_ms } : operation_timeouts) + start_at_operation_time = nil start_at_operation_time_supported = nil - @cursor = read_with_retry_cursor(session, server_selector, view) do |server| + + @cursor = read_with_retry_cursor(session, server_selector, self, context: context) do |server| server.with_connection do |connection| start_at_operation_time_supported = connection.description.server_version_gte?('4.0') - result = send_initial_query(connection, session) + result = send_initial_query(connection, context) + if doc = result.replies.first && result.replies.first.documents.first start_at_operation_time = doc['operationTime'] else @@ -324,6 +367,7 @@ def create_cursor! result end end + @start_at_operation_time = start_at_operation_time @start_at_operation_time_supported = start_at_operation_time_supported end @@ -390,11 +434,11 @@ def change_doc end end - def send_initial_query(connection, session) - initial_query_op(session, view.read_preference) + def send_initial_query(connection, context) + initial_query_op(context.session, view.read_preference) .execute_with_connection( connection, - context: Operation::Context.new(client: client, session: session), + context: context, ) end @@ -412,6 +456,15 @@ def time_to_bson_timestamp(time) def resuming? !!@resuming end + + # Recreates the current cursor (typically as a consequence of attempting + # to resume the change stream) + def recreate_cursor!(context = nil) + @timed_out = false + + close + create_cursor!(context&.remaining_timeout_ms) + end end end end diff --git a/lib/mongo/collection/view/iterable.rb b/lib/mongo/collection/view/iterable.rb index 83ec0e458b..c35a23d559 100644 --- a/lib/mongo/collection/view/iterable.rb +++ b/lib/mongo/collection/view/iterable.rb @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/cursor_host' + module Mongo class Collection class View @@ -24,13 +26,7 @@ class View # # @since 2.0.0 module Iterable - - # Returns the cursor associated with this view, if any. - # - # @return [ nil | Cursor ] The cursor, if any. - # - # @api private - attr_reader :cursor + include Mongo::CursorHost # Iterate through documents returned by a query with this +View+. # @@ -45,48 +41,21 @@ module Iterable # # @yieldparam [ Hash ] Each matching document. def each - # If the caching cursor is closed and was not fully iterated, - # the documents we have in it are not the complete result set and - # we have no way of completing that iteration. - # Therefore, discard that cursor and start iteration again. - # The case of the caching cursor not being closed and not having - # been fully iterated isn't tested - see RUBY-2773. - @cursor = if use_query_cache? && cached_cursor && ( - cached_cursor.fully_iterated? || !cached_cursor.closed? - ) - cached_cursor - else - session = client.send(:get_session, @options) - select_cursor(session).tap do |cursor| - if use_query_cache? - # No need to store the cursor in the query cache if there is - # already a cached cursor stored at this key. - QueryCache.set(cursor, **cache_options) - end - end - end + @cursor = prefer_cached_cursor? ? cached_cursor : new_cursor_for_iteration + return @cursor.to_enum unless block_given? - if use_query_cache? - # If a query with a limit is performed, the query cache will - # re-use results from an earlier query with the same or larger - # limit, and then impose the lower limit during iteration. - limit_for_cached_query = respond_to?(:limit) ? QueryCache.normalized_limit(limit) : nil - end - - if block_given? - # Ruby versions 2.5 and older do not support arr[0..nil] syntax, so - # this must be a separate conditional. - cursor_to_iterate = if limit_for_cached_query - @cursor.to_a[0...limit_for_cached_query] - else - @cursor - end + limit_for_cached_query = compute_limit_for_cached_query - cursor_to_iterate.each do |doc| - yield doc - end + # Ruby versions 2.5 and older do not support arr[0..nil] syntax, so + # this must be a separate conditional. + cursor_to_iterate = if limit_for_cached_query + @cursor.to_a[0...limit_for_cached_query] else - @cursor.to_enum + @cursor + end + + cursor_to_iterate.each do |doc| + yield doc end end @@ -100,7 +69,7 @@ def each # # @return [ nil ] Always nil. # - # @raise [ Error::OperationFailure ] If the server cursor close fails. + # @raise [ Error::OperationFailure::Family ] If the server cursor close fails. # # @since 2.1.0 def close_query @@ -113,18 +82,25 @@ def close_query private def select_cursor(session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts, + view: self + ) + if respond_to?(:write?, true) && write? server = server_selector.select_server(cluster, nil, session, write_aggregation: true) - result = send_initial_query(server, session) + result = send_initial_query(server, context) if use_query_cache? - CachingCursor.new(view, result, server, session: session) + CachingCursor.new(view, result, server, session: session, context: context) else - Cursor.new(view, result, server, session: session) + Cursor.new(view, result, server, session: session, context: context) end else - read_with_retry_cursor(session, server_selector, view) do |server| - send_initial_query(server, session) + read_with_retry_cursor(session, server_selector, view, context: context) do |server| + send_initial_query(server, context) end end end @@ -168,18 +144,13 @@ def initial_query_op(session) batch_size: batch_size, hint: options[:hint], max_scan: options[:max_scan], - max_time_ms: options[:max_time_ms], max_value: options[:max_value], min_value: options[:min_value], no_cursor_timeout: options[:no_cursor_timeout], return_key: options[:return_key], show_disk_loc: options[:show_disk_loc], comment: options[:comment], - oplog_replay: if (v = options[:oplog_replay]).nil? - collection.options[:oplog_replay] - else - v - end, + oplog_replay: oplog_replay } if spec[:oplog_replay] @@ -196,14 +167,45 @@ def initial_query_op(session) end end - def send_initial_query(server, session = nil) - initial_query_op(session).execute(server, context: Operation::Context.new(client: client, session: session)) + def send_initial_query(server, context) + initial_query_op(context.session).execute(server, context: context) end def use_query_cache? QueryCache.enabled? && !collection.system_collection? end + # If the caching cursor is closed and was not fully iterated, + # the documents we have in it are not the complete result set and + # we have no way of completing that iteration. + # Therefore, discard that cursor and start iteration again. + def prefer_cached_cursor? + use_query_cache? && + cached_cursor && + (cached_cursor.fully_iterated? || !cached_cursor.closed?) + end + + # Start a new cursor for use when iterating (via #each). + def new_cursor_for_iteration + session = client.get_session(@options) + select_cursor(session).tap do |cursor| + if use_query_cache? + # No need to store the cursor in the query cache if there is + # already a cached cursor stored at this key. + QueryCache.set(cursor, **cache_options) + end + end + end + + def compute_limit_for_cached_query + return nil unless use_query_cache? && respond_to?(:limit) + + # If a query with a limit is performed, the query cache will + # re-use results from an earlier query with the same or larger + # limit, and then impose the lower limit during iteration. + return QueryCache.normalized_limit(limit) + end + # Add tailable cusror options to the command specifiction if needed. # # @param [ Hash ] spec The command specification. @@ -216,6 +218,13 @@ def maybe_set_tailable_options(spec) spec[:await_data] = true end end + + # @return [ true | false | nil ] options[:oplog_replay], if + # set, otherwise the same option from the collection. + def oplog_replay + v = options[:oplog_replay] + v.nil? ? collection.options[:oplog_replay] : v + end end end end diff --git a/lib/mongo/collection/view/map_reduce.rb b/lib/mongo/collection/view/map_reduce.rb index 279ce24409..386227708c 100644 --- a/lib/mongo/collection/view/map_reduce.rb +++ b/lib/mongo/collection/view/map_reduce.rb @@ -51,7 +51,7 @@ class MapReduce attr_reader :reduce_function # Delegate necessary operations to the view. - def_delegators :view, :collection, :read, :cluster + def_delegators :view, :collection, :read, :cluster, :timeout_ms # Delegate necessary operations to the collection. def_delegators :collection, :database, :client @@ -70,9 +70,10 @@ class MapReduce # @yieldparam [ Hash ] Each matching document. def each @cursor = nil - session = client.send(:get_session, @options) + session = client.get_session(@options) server = cluster.next_primary(nil, session) - result = send_initial_query(server, session, context: Operation::Context.new(client: client, session: session)) + context = Operation::Context.new(client: client, session: session, operation_timeouts: view.operation_timeouts) + result = send_initial_query(server, context) result = send_fetch_query(server, session) unless inline? @cursor = Cursor.new(view, result, server, session: session) if block_given? @@ -279,9 +280,9 @@ def secondary_ok? out.respond_to?(:keys) && out.keys.first.to_s.downcase == INLINE end - def send_initial_query(server, session, context:) + def send_initial_query(server, context) server.with_connection do |connection| - send_initial_query_with_connection(connection, session, context: context) + send_initial_query_with_connection(connection, context.session, context: context) end end diff --git a/lib/mongo/collection/view/readable.rb b/lib/mongo/collection/view/readable.rb index 412137d40f..d548031ac4 100644 --- a/lib/mongo/collection/view/readable.rb +++ b/lib/mongo/collection/view/readable.rb @@ -48,11 +48,9 @@ module Readable # See the server documentation for details. # @option options [ Integer ] :max_time_ms The maximum amount of time in # milliseconds to allow the aggregation to run. - # @option options [ true, false ] :use_cursor Indicates whether the command - # will request that the server provide results using a cursor. Note that - # as of server version 3.6, aggregations always provide results using a - # cursor and this option is therefore not valid. # @option options [ Session ] :session The session to use. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Aggregation ] The aggregation object. # @@ -157,6 +155,8 @@ def comment(comment = nil) # @option opts [ Mongo::Session ] :session The session to use for the operation. # @option opts [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Integer ] The document count. # @@ -182,7 +182,12 @@ def count(opts = {}) read_pref = opts[:read] || read_preference selector = ServerSelector.get(read_pref || server_selector) with_session(opts) do |session| - read_with_retry(session, selector) do |server| + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) + read_with_retry(session, selector, context) do |server| Operation::Count.new( selector: cmd, db_name: database.name, @@ -193,7 +198,10 @@ def count(opts = {}) # string key. Note that this isn't documented as valid usage. collation: opts[:collation] || opts['collation'] || collation, comment: opts[:comment], - ).execute(server, context: Operation::Context.new(client: client, session: session)) + ).execute( + server, + context: context + ) end.n.to_i end end @@ -216,6 +224,8 @@ def count(opts = {}) # @option opts [ Mongo::Session ] :session The session to use for the operation. # @option ops [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Integer ] The document count. # @@ -227,7 +237,7 @@ def count_documents(opts = {}) pipeline << { :'$limit' => opts[:limit] } if opts[:limit] pipeline << { :'$group' => { _id: 1, n: { :'$sum' => 1 } } } - opts = opts.slice(:hint, :max_time_ms, :read, :collation, :session, :comment) + opts = opts.slice(:hint, :max_time_ms, :read, :collation, :session, :comment, :timeout_ms) opts[:collation] ||= collation first = aggregate(pipeline, opts).first @@ -247,6 +257,8 @@ def count_documents(opts = {}) # @option opts [ Hash ] :read The read preference options. # @option opts [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Integer ] The document count. # @@ -267,8 +279,12 @@ def estimated_document_count(opts = {}) read_pref = opts[:read] || read_preference selector = ServerSelector.get(read_pref || server_selector) with_session(opts) do |session| - read_with_retry(session, selector) do |server| - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) + read_with_retry(session, selector, context) do |server| cmd = { count: collection.name } cmd[:maxTimeMS] = opts[:max_time_ms] if opts[:max_time_ms] if read_concern @@ -284,7 +300,7 @@ def estimated_document_count(opts = {}) result.n.to_i end end - rescue Error::OperationFailure => exc + rescue Error::OperationFailure::Family => exc if exc.code == 26 # NamespaceNotFound # This should only happen with the aggregation pipeline path @@ -331,7 +347,12 @@ def distinct(field_name, opts = {}) read_pref = opts[:read] || read_preference selector = ServerSelector.get(read_pref || server_selector) with_session(opts) do |session| - read_with_retry(session, selector) do |server| + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) + read_with_retry(session, selector, context) do |server| Operation::Distinct.new( selector: cmd, db_name: database.name, @@ -342,7 +363,10 @@ def distinct(field_name, opts = {}) # For some reason collation was historically accepted as a # string key. Note that this isn't documented as valid usage. collation: opts[:collation] || opts['collation'] || collation, - ).execute(server, context: Operation::Context.new(client: client, session: session)) + ).execute( + server, + context: context + ) end.first['values'] end end @@ -627,6 +651,15 @@ def cursor_type(type = nil) configure(:cursor_type, type) end + # The per-operation timeout in milliseconds. Must a positive integer. + # + # @param [ Integer ] timeout_ms Timeout value. + # + # @return [ Integer, View ] Either the timeout_ms value or a new +View+. + def timeout_ms(timeout_ms = nil) + configure(:timeout_ms, timeout_ms) + end + # @api private def read_concern if options[:session] && options[:session].in_transaction? @@ -656,24 +689,10 @@ def read_preference end end - private - - def collation(doc = nil) - configure(:collation, doc) - end - - def server_selector - @server_selector ||= if options[:session] && options[:session].in_transaction? - ServerSelector.get(read_preference || client.server_selector) - else - ServerSelector.get(read_preference || collection.server_selector) - end - end - def parallel_scan(cursor_count, options = {}) if options[:session] # The session would be overwritten by the one in +options+ later. - session = client.send(:get_session, @options) + session = client.get_session(@options) else session = nil end @@ -712,6 +731,20 @@ def parallel_scan(cursor_count, options = {}) end end + private + + def collation(doc = nil) + configure(:collation, doc) + end + + def server_selector + @server_selector ||= if options[:session] && options[:session].in_transaction? + ServerSelector.get(read_preference || client.server_selector) + else + ServerSelector.get(read_preference || collection.server_selector) + end + end + def validate_doc!(doc) raise Error::InvalidDocument.new unless doc.respond_to?(:keys) end diff --git a/lib/mongo/collection/view/writable.rb b/lib/mongo/collection/view/writable.rb index 40bc230725..ea947b298a 100644 --- a/lib/mongo/collection/view/writable.rb +++ b/lib/mongo/collection/view/writable.rb @@ -46,11 +46,13 @@ module Writable # @option opts [ Session ] :session The session to use. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. + # @option opts [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. - # @option options [ Object ] :comment A user-provided + # @option opts [ Object ] :comment A user-provided # comment to attach to this command. # # @return [ BSON::Document, nil ] The document, if found. @@ -80,7 +82,11 @@ def find_one_and_delete(opts = {}) comment: opts[:comment], }.compact - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) write_with_retry(write_concern, context: context) do |connection, txn_num, context| gte_4_4 = connection.server.description.server_version_gte?('4.4') if !gte_4_4 && opts[:hint] && write_concern && !write_concern.acknowledged? @@ -116,9 +122,11 @@ def find_one_and_delete(opts = {}) # @option opts [ Hash ] :collation The collation to use. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. + # @option opts [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. # # @return [ BSON::Document ] The document. @@ -136,7 +144,7 @@ def find_one_and_replace(replacement, opts = {}) # @param [ BSON::Document ] document The updates. # @param [ Hash ] opts The options. # - # @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command + # @option opts [ Integer ] :max_time_ms The maximum amount of time to allow the command # to run in milliseconds. # @option opts [ Hash ] :projection The fields to include or exclude in the returned doc. # @option opts [ Hash ] :sort The key and direction pairs by which the result set @@ -149,13 +157,15 @@ def find_one_and_replace(replacement, opts = {}) # @option opts [ Array ] :array_filters A set of filters specifying to which array elements # an update should apply. # @option opts [ Session ] :session The session to use. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. + # @option opts [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. - # @option options [ Object ] :comment A user-provided + # @option opts [ Object ] :comment A user-provided # comment to attach to this command. # # @return [ BSON::Document | nil ] The document or nil if none is found. @@ -188,7 +198,11 @@ def find_one_and_update(document, opts = {}) comment: opts[:comment] }.compact - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) write_with_retry(write_concern, context: context) do |connection, txn_num, context| gte_4_4 = connection.server.description.server_version_gte?('4.4') if !gte_4_4 && opts[:hint] && write_concern && !write_concern.acknowledged? @@ -216,13 +230,15 @@ def find_one_and_update(document, opts = {}) # # @option opts [ Hash ] :collation The collation to use. # @option opts [ Session ] :session The session to use. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. + # @option opts [ Hash ] :let Mapping of variables to use in the command. # See the server documentation for details. - # @option options [ Object ] :comment A user-provided + # @option opts [ Object ] :comment A user-provided # comment to attach to this command. # # @return [ Result ] The response from the database. @@ -244,8 +260,11 @@ def delete_many(opts = {}) hint: opts[:hint], collation: opts[:collation] || opts['collation'] || collation, }.compact - - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) nro_write_with_retry(write_concern, context: context) do |connection, txn_num, context| gte_4_4 = connection.server.description.server_version_gte?('4.4') if !gte_4_4 && opts[:hint] && write_concern && !write_concern.acknowledged? @@ -274,15 +293,17 @@ def delete_many(opts = {}) # @param [ Hash ] opts The options. # # @option opts [ Hash ] :collation The collation to use. - # @option opts [ Session ] :session The session to use. + # @option opts [ Object ] :comment A user-provided + # comment to attach to this command. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option opts [ Hash ] :let Mapping of variables to use in the command. + # See the server documentation for details. + # @option opts [ Session ] :session The session to use. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. - # See the server documentation for details. - # @option options [ Object ] :comment A user-provided - # comment to attach to this command. # # @return [ Result ] The response from the database. # @@ -304,7 +325,11 @@ def delete_one(opts = {}) collation: opts[:collation] || opts['collation'] || collation, }.compact - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) write_with_retry(write_concern, context: context) do |connection, txn_num, context| gte_4_4 = connection.server.description.server_version_gte?('4.4') if !gte_4_4 && opts[:hint] && write_concern && !write_concern.acknowledged? @@ -334,20 +359,22 @@ def delete_one(opts = {}) # @param [ Hash ] replacement The replacement document. # @param [ Hash ] opts The options. # - # @option opts [ true, false ] :upsert Whether to upsert if the - # document doesn't exist. # @option opts [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option opts [ Hash ] :collation The collation to use. - # @option opts [ Session ] :session The session to use. + # @option opts [ Object ] :comment A user-provided + # comment to attach to this command. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option opts [ Hash ] :let Mapping of variables to use in the command. + # See the server documentation for details. + # @option opts [ Session ] :session The session to use. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts [ Hash ] :write_concern The write concern options. + # @option opts [ true, false ] :upsert Whether to upsert if the + # document doesn't exist. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. - # See the server documentation for details. - # @option options [ Object ] :comment A user-provided - # comment to attach to this command. # # @return [ Result ] The response from the database. # @@ -374,7 +401,11 @@ def replace_one(replacement, opts = {}) update_doc['upsert'] = true end - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) write_with_retry(write_concern, context: context) do |connection, txn_num, context| gte_4_2 = connection.server.description.server_version_gte?('4.2') if !gte_4_2 && opts[:hint] && write_concern && !write_concern.acknowledged? @@ -404,22 +435,24 @@ def replace_one(replacement, opts = {}) # @param [ Hash | Array ] spec The update document or pipeline. # @param [ Hash ] opts The options. # - # @option opts [ true, false ] :upsert Whether to upsert if the - # document doesn't exist. + # @option opts [ Array ] :array_filters A set of filters specifying to + # which array elements an update should apply. # @option opts [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option opts [ Hash ] :collation The collation to use. - # @option opts [ Array ] :array_filters A set of filters specifying to - # which array elements an update should apply. - # @option opts [ Session ] :session The session to use. + # @option opts [ Object ] :comment A user-provided + # comment to attach to this command. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option opts [ Hash ] :let Mapping of variables to use in the command. + # See the server documentation for details. + # @option opts [ Session ] :session The session to use. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. + # @option opts [ true, false ] :upsert Whether to upsert if the + # document doesn't exist. # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. - # See the server documentation for details. - # @option options [ Object ] :comment A user-provided - # comment to attach to this command. # # @return [ Result ] The response from the database. # @@ -447,7 +480,11 @@ def update_many(spec, opts = {}) update_doc['upsert'] = true end - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) nro_write_with_retry(write_concern, context: context) do |connection, txn_num, context| gte_4_2 = connection.server.description.server_version_gte?('4.2') if !gte_4_2 && opts[:hint] && write_concern && !write_concern.acknowledged? @@ -476,22 +513,24 @@ def update_many(spec, opts = {}) # @param [ Hash | Array ] spec The update document or pipeline. # @param [ Hash ] opts The options. # - # @option opts [ true, false ] :upsert Whether to upsert if the - # document doesn't exist. + # @option opts [ Array ] :array_filters A set of filters specifying to + # which array elements an update should apply. # @option opts [ true, false ] :bypass_document_validation Whether or # not to skip document level validation. # @option opts [ Hash ] :collation The collation to use. - # @option opts [ Array ] :array_filters A set of filters specifying to - # which array elements an update should apply. - # @option opts [ Session ] :session The session to use. + # @option opts [ Object ] :comment A user-provided + # comment to attach to this command. # @option opts [ Hash | String ] :hint The index to use for this operation. # May be specified as a Hash (e.g. { _id: 1 }) or a String (e.g. "_id_"). + # @option opts [ Hash ] :let Mapping of variables to use in the command. + # See the server documentation for details. + # @option opts [ Session ] :session The session to use. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. + # @option opts [ true, false ] :upsert Whether to upsert if the + # document doesn't exist. # @option opts [ Hash ] :write_concern The write concern options. # Can be :w => Integer, :fsync => Boolean, :j => Boolean. - # @option options [ Hash ] :let Mapping of variables to use in the command. - # See the server documentation for details. - # @option options [ Object ] :comment A user-provided - # comment to attach to this command. # # @return [ Result ] The response from the database. # @@ -518,7 +557,11 @@ def update_one(spec, opts = {}) update_doc['upsert'] = true end - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) write_with_retry(write_concern, context: context) do |connection, txn_num, context| gte_4_2 = connection.server.description.server_version_gte?('4.2') if !gte_4_2 && opts[:hint] && write_concern && !write_concern.acknowledged? diff --git a/lib/mongo/crypt/auto_encrypter.rb b/lib/mongo/crypt/auto_encrypter.rb index a98f7d93a8..3fa8970783 100644 --- a/lib/mongo/crypt/auto_encrypter.rb +++ b/lib/mongo/crypt/auto_encrypter.rb @@ -119,8 +119,6 @@ def initialize(options) @options[:extra_options][:crypt_shared_lib_required] unless @options[:extra_options][:crypt_shared_lib_required] || @crypt_handle.crypt_shared_lib_available? || @options[:bypass_query_analysis] - # Set server selection timeout to 1 to prevent the client waiting for a - # long timeout before spawning mongocryptd @mongocryptd_client = Client.new( @options[:extra_options][:mongocryptd_uri], monitoring_io: @options[:client].options[:monitoring_io], @@ -189,13 +187,13 @@ def encrypt? # @param [ Hash ] command The command to be encrypted. # # @return [ BSON::Document ] The encrypted command. - def encrypt(database_name, command) + def encrypt(database_name, command, timeout_holder) AutoEncryptionContext.new( @crypt_handle, @encryption_io, database_name, command - ).run_state_machine + ).run_state_machine(timeout_holder) end # Decrypt a database command. @@ -203,12 +201,12 @@ def encrypt(database_name, command) # @param [ Hash ] command The command with encrypted fields. # # @return [ BSON::Document ] The decrypted command. - def decrypt(command) + def decrypt(command, timeout_holder) AutoDecryptionContext.new( @crypt_handle, @encryption_io, command - ).run_state_machine + ).run_state_machine(timeout_holder) end # Close the resources created by the AutoEncrypter. diff --git a/lib/mongo/crypt/context.rb b/lib/mongo/crypt/context.rb index 5ace2b9dde..d8c6772999 100644 --- a/lib/mongo/crypt/context.rb +++ b/lib/mongo/crypt/context.rb @@ -64,7 +64,10 @@ def state end # Runs the mongocrypt_ctx_t state machine and handles - # all I/O on behalf of libmongocrypt + # all I/O on behalf of + # + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeouts for the + # operation the state. # # @return [ BSON::Document ] A BSON document representing the outcome # of the state machine. Contents can differ depending on how the @@ -75,8 +78,9 @@ def state # # This method is not currently unit tested. It is integration tested # in spec/integration/explicit_encryption_spec.rb - def run_state_machine + def run_state_machine(timeout_holder) while true + timeout_ms = timeout_holder.remaining_timeout_ms! case state when :error Binding.check_ctx_status(self) @@ -88,7 +92,7 @@ def run_state_machine when :need_mongo_keys filter = Binding.ctx_mongo_op(self) - @encryption_io.find_keys(filter).each do |key| + @encryption_io.find_keys(filter, timeout_ms: timeout_ms).each do |key| mongocrypt_feed(key) if key end @@ -96,14 +100,14 @@ def run_state_machine when :need_mongo_collinfo filter = Binding.ctx_mongo_op(self) - result = @encryption_io.collection_info(@db_name, filter) + result = @encryption_io.collection_info(@db_name, filter, timeout_ms: timeout_ms) mongocrypt_feed(result) if result mongocrypt_done when :need_mongo_markings cmd = Binding.ctx_mongo_op(self) - result = @encryption_io.mark_command(cmd) + result = @encryption_io.mark_command(cmd, timeout_ms: timeout_ms) mongocrypt_feed(result) mongocrypt_done @@ -118,7 +122,7 @@ def run_state_machine when :need_kms_credentials Binding.ctx_provide_kms_providers( self, - retrieve_kms_credentials.to_document + retrieve_kms_credentials(timeout_holder).to_document ) else raise Error::CryptError.new( @@ -147,13 +151,15 @@ def mongocrypt_feed(doc) # Retrieves KMS credentials for providers that are configured # for automatic credentials retrieval. # + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeout. + # # @return [ Crypt::KMS::Credentials ] Credentials for the configured # KMS providers. - def retrieve_kms_credentials + def retrieve_kms_credentials(timeout_holder) providers = {} if kms_providers.aws&.empty? begin - aws_credentials = Mongo::Auth::Aws::CredentialsRetriever.new.credentials + aws_credentials = Mongo::Auth::Aws::CredentialsRetriever.new.credentials(timeout_holder) rescue Auth::Aws::CredentialsNotFound raise Error::CryptError.new( "Could not locate AWS credentials (checked environment variables, ECS and EC2 metadata)" @@ -162,10 +168,10 @@ def retrieve_kms_credentials providers[:aws] = aws_credentials.to_h end if kms_providers.gcp&.empty? - providers[:gcp] = { access_token: gcp_access_token } + providers[:gcp] = { access_token: gcp_access_token(timeout_holder) } end if kms_providers.azure&.empty? - providers[:azure] = { access_token: azure_access_token } + providers[:azure] = { access_token: azure_access_token(timeout_holder) } end KMS::Credentials.new(providers) end @@ -175,8 +181,8 @@ def retrieve_kms_credentials # @return [ String ] A GCP access token. # # @raise [ Error::CryptError ] If the GCP access token could not be - def gcp_access_token - KMS::GCP::CredentialsRetriever.fetch_access_token + def gcp_access_token(timeout_holder) + KMS::GCP::CredentialsRetriever.fetch_access_token(timeout_holder) rescue KMS::CredentialsNotFound => e raise Error::CryptError.new( "Could not locate GCP credentials: #{e.class}: #{e.message}" @@ -189,9 +195,9 @@ def gcp_access_token # # @raise [ Error::CryptError ] If the Azure access token could not be # retrieved. - def azure_access_token + def azure_access_token(timeout_holder) if @cached_azure_token.nil? || @cached_azure_token.expired? - @cached_azure_token = KMS::Azure::CredentialsRetriever.fetch_access_token + @cached_azure_token = KMS::Azure::CredentialsRetriever.fetch_access_token(timeout_holder: timeout_holder) end @cached_azure_token.access_token rescue KMS::CredentialsNotFound => e diff --git a/lib/mongo/crypt/encryption_io.rb b/lib/mongo/crypt/encryption_io.rb index 67dee5731f..3f6f7747c3 100644 --- a/lib/mongo/crypt/encryption_io.rb +++ b/lib/mongo/crypt/encryption_io.rb @@ -73,47 +73,58 @@ def initialize( # filter # # @param [ Hash ] filter + # @param [ Integer | nil ] :timeout_ms # # @return [ Array ] The query results - def find_keys(filter) - key_vault_collection.find(filter).to_a + def find_keys(filter, timeout_ms: nil) + key_vault_collection.find(filter, timeout_ms: timeout_ms).to_a end # Insert a document into the key vault collection # # @param [ Hash ] document + # @param [ Integer | nil ] :timeout_ms # # @return [ Mongo::Operation::Insert::Result ] The insertion result - def insert_data_key(document) - key_vault_collection.insert_one(document) + def insert_data_key(document, timeout_ms: nil) + key_vault_collection.insert_one(document, timeout_ms: timeout_ms) end # Get collection info for a collection matching the provided filter # # @param [ Hash ] filter + # @param [ Integer | nil ] :timeout_ms # # @return [ Hash ] The collection information - def collection_info(db_name, filter) + def collection_info(db_name, filter, timeout_ms: nil) unless @metadata_client raise ArgumentError, 'collection_info requires metadata_client to have been passed to the constructor, but it was not' end - @metadata_client.use(db_name).database.list_collections(filter: filter, deserialize_as_bson: true).first + @metadata_client + .use(db_name) + .database + .list_collections(filter: filter, deserialize_as_bson: true, timeout_ms: timeout_ms) + .first end # Send the command to mongocryptd to be marked with intent-to-encrypt markings # # @param [ Hash ] cmd + # @param [ Integer | nil ] :timeout_ms # # @return [ Hash ] The marked command - def mark_command(cmd) + def mark_command(cmd, timeout_ms: nil) unless @mongocryptd_client raise ArgumentError, 'mark_command requires mongocryptd_client to have been passed to the constructor, but it was not' end # Ensure the response from mongocryptd is deserialized with { mode: :bson } # to prevent losing type information in commands - options = { execution_options: { deserialize_as_bson: true } } + options = { + execution_options: { deserialize_as_bson: true }, + timeout_ms: timeout_ms + } begin response = @mongocryptd_client.database.command(cmd, options) @@ -136,9 +147,10 @@ def mark_command(cmd) # to send on that connection. # @param [ Hash ] tls_options. TLS options to connect to KMS provider. # The options are same as for Mongo::Client. - def feed_kms(kms_context, tls_options) + # @param [ Integer | nil ] :timeout_ms + def feed_kms(kms_context, tls_options, timeout_ms: nil) with_ssl_socket(kms_context.endpoint, tls_options) do |ssl_socket| - Timeout.timeout(SOCKET_TIMEOUT, Error::SocketTimeoutError, + Timeout.timeout(timeout_ms || SOCKET_TIMEOUT, Error::SocketTimeoutError, 'Socket write operation timed out' ) do ssl_socket.syswrite(kms_context.message) @@ -146,7 +158,7 @@ def feed_kms(kms_context, tls_options) bytes_needed = kms_context.bytes_needed while bytes_needed > 0 do - bytes = Timeout.timeout(SOCKET_TIMEOUT, Error::SocketTimeoutError, + bytes = Timeout.timeout(timeout_ms || SOCKET_TIMEOUT, Error::SocketTimeoutError, 'Socket read operation timed out' ) do ssl_socket.sysread(bytes_needed) @@ -160,38 +172,39 @@ def feed_kms(kms_context, tls_options) # Adds a key_alt_name to the key_alt_names array of the key document # in the key vault collection with the given id. - def add_key_alt_name(id, key_alt_name) + def add_key_alt_name(id, key_alt_name, timeout_ms: nil) key_vault_collection.find_one_and_update( { _id: id }, { '$addToSet' => { keyAltNames: key_alt_name } }, + timeout_ms: timeout_ms ) end # Removes the key document with the given id # from the key vault collection. - def delete_key(id) - key_vault_collection.delete_one(_id: id) + def delete_key(id, timeout_ms: nil) + key_vault_collection.delete_one(_id: id, timeout_ms: timeout_ms) end # Finds a single key document with the given id. - def get_key(id) - key_vault_collection.find(_id: id).first + def get_key(id, timeout_ms: nil) + key_vault_collection.find(_id: id, timeout_ms: timeout_ms).first end # Returns a key document in the key vault collection with # the given key_alt_name. - def get_key_by_alt_name(key_alt_name) - key_vault_collection.find(keyAltNames: key_alt_name).first + def get_key_by_alt_name(key_alt_name, timeout_ms: nil) + key_vault_collection.find(keyAltNames: key_alt_name, timeout_ms: timeout_ms).first end # Finds all documents in the key vault collection. - def get_keys - key_vault_collection.find + def get_keys(timeout_ms: nil) + key_vault_collection.find(nil, timeout_ms: timeout_ms) end # Removes a key_alt_name from the key_alt_names array of the key document # in the key vault collection with the given id. - def remove_key_alt_name(id, key_alt_name) + def remove_key_alt_name(id, key_alt_name, timeout_ms: nil) key_vault_collection.find_one_and_update( { _id: id }, [ @@ -211,7 +224,8 @@ def remove_key_alt_name(id, key_alt_name) } } } - ] + ], + timeout_ms: timeout_ms ) end @@ -220,8 +234,8 @@ def remove_key_alt_name(id, key_alt_name) # @param [ Array ] requests The bulk write requests. # # @return [ BulkWrite::Result ] The result of the operation. - def update_data_keys(updates) - key_vault_collection.bulk_write(updates) + def update_data_keys(updates, timeout_ms: nil) + key_vault_collection.bulk_write(updates, timeout_ms: timeout_ms) end private @@ -322,15 +336,21 @@ def spawn_mongocryptd # # @note The socket is always closed when the provided block has finished # executing - def with_ssl_socket(endpoint, tls_options) + def with_ssl_socket(endpoint, tls_options, timeout_ms: nil) + csot = !timeout_ms.nil? address = begin host, port = endpoint.split(':') port ||= 443 # All supported KMS APIs use this port by default. Address.new([host, port].join(':')) end + socket_options = { ssl: true, csot: csot }.tap do |opts| + if csot + opts[:connect_timeout] = (timeout_ms / 1_000.0) + end + end mongo_socket = address.socket( SOCKET_TIMEOUT, - tls_options.merge(ssl: true) + tls_options.merge(socket_options) ) yield(mongo_socket.socket) rescue => e diff --git a/lib/mongo/crypt/explicit_encrypter.rb b/lib/mongo/crypt/explicit_encrypter.rb index 946f97d7bd..b90a837b0f 100644 --- a/lib/mongo/crypt/explicit_encrypter.rb +++ b/lib/mongo/crypt/explicit_encrypter.rb @@ -35,7 +35,9 @@ class ExplicitEncrypter # providers. Keys of the hash should be KSM provider names; values # should be hashes of TLS connection options. The options are equivalent # to TLS connection options of Mongo::Client. - def initialize(key_vault_client, key_vault_namespace, kms_providers, kms_tls_options) + # @param [ Integer | nil ] timeout_ms Timeout for every operation executed + # on this object. + def initialize(key_vault_client, key_vault_namespace, kms_providers, kms_tls_options, timeout_ms = nil) Crypt.validate_ffi! @crypt_handle = Handle.new( kms_providers, @@ -47,6 +49,7 @@ def initialize(key_vault_client, key_vault_namespace, kms_providers, kms_tls_opt metadata_client: nil, key_vault_namespace: key_vault_namespace ) + @timeout_ms = timeout_ms end # Generates a data key used for encryption/decryption and stores @@ -71,9 +74,11 @@ def create_and_insert_data_key(master_key_document, key_alt_names, key_material master_key_document, key_alt_names, key_material - ).run_state_machine + ).run_state_machine(timeout_holder) - @encryption_io.insert_data_key(data_key_document).inserted_id + @encryption_io.insert_data_key( + data_key_document, timeout_ms: timeout_holder.remaining_timeout_ms! + ).inserted_id end # Encrypts a value using the specified encryption key and algorithm @@ -111,7 +116,7 @@ def encrypt(value, options) @encryption_io, { v: value }, options - ).run_state_machine['v'] + ).run_state_machine(timeout_holder)['v'] end # Encrypts a Match Expression or Aggregate Expression to query a range index. @@ -170,7 +175,7 @@ def encrypt_expression(expression, options) @encryption_io, { v: expression }, options - ).run_state_machine['v'] + ).run_state_machine(timeout_holder)['v'] end # Decrypts a value that has already been encrypted @@ -184,7 +189,7 @@ def decrypt(value) @crypt_handle, @encryption_io, { v: value } - ).run_state_machine['v'] + ).run_state_machine(timeout_holder)['v'] end # Adds a key_alt_name for the key in the key vault collection with the given id. @@ -195,7 +200,7 @@ def decrypt(value) # @return [ BSON::Document | nil ] Document describing the identified key # before adding the key alt name, or nil if no such key. def add_key_alt_name(id, key_alt_name) - @encryption_io.add_key_alt_name(id, key_alt_name) + @encryption_io.add_key_alt_name(id, key_alt_name, timeout_ms: @timeout_ms) end # Removes the key with the given id from the key vault collection. @@ -204,7 +209,9 @@ def add_key_alt_name(id, key_alt_name) # # @return [ Operation::Result ] The response from the database for the delete_one # operation that deletes the key. - def_delegators :@encryption_io, :delete_key + def delete_key(id) + @encryption_io.delete_key(id, timeout_ms: @timeout_ms) + end # Finds a single key with the given id. # @@ -212,7 +219,9 @@ def add_key_alt_name(id, key_alt_name) # # @return [ BSON::Document | nil ] The found key document or nil # if not found. - def_delegators :@encryption_io, :get_key + def get_key(id) + @encryption_io.get_key(id, timeout_ms: @timeout_ms) + end # Returns a key in the key vault collection with the given key_alt_name. # @@ -220,12 +229,19 @@ def add_key_alt_name(id, key_alt_name) # # @return [ BSON::Document | nil ] The found key document or nil # if not found. - def_delegators :@encryption_io, :get_key_by_alt_name + def get_key_by_alt_name(key_alt_name) + @encryption_io.get_key_by_alt_name(key_alt_name, timeout_ms: @timeout_ms) + end # Returns all keys in the key vault collection. # # @return [ Collection::View ] Keys in the key vault collection. - def_delegators :@encryption_io, :get_keys + # rubocop:disable Naming/AccessorMethodName + # Name of this method is defined in the FLE spec + def get_keys + @encryption_io.get_keys(timeout_ms: @timeout_ms) + end + # rubocop:enable Naming/AccessorMethodName # Removes a key_alt_name from a key in the key vault collection with the given id. # @@ -234,7 +250,9 @@ def add_key_alt_name(id, key_alt_name) # # @return [ BSON::Document | nil ] Document describing the identified key # before removing the key alt name, or nil if no such key. - def_delegators :@encryption_io, :remove_key_alt_name + def remove_key_alt_name(id, key_alt_name) + @encryption_io.remove_key_alt_name(id, key_alt_name, timeout_ms: @timeout_ms) + end # Decrypts multiple data keys and (re-)encrypts them with a new master_key, # or with their current master_key if a new one is not given. @@ -257,12 +275,14 @@ def rewrap_many_data_key(filter, opts = {}) @encryption_io, filter, master_key_document - ).run_state_machine + ).run_state_machine(timeout_holder) return RewrapManyDataKeyResult.new(nil) if rewrap_result.nil? updates = updates_from_data_key_documents(rewrap_result.fetch('v')) - RewrapManyDataKeyResult.new(@encryption_io.update_data_keys(updates)) + RewrapManyDataKeyResult.new( + @encryption_io.update_data_keys(updates, timeout_ms: @timeout_ms) + ) end private @@ -318,6 +338,14 @@ def updates_from_data_key_documents(documents) } end end + + def timeout_holder + CsotTimeoutHolder.new( + operation_timeouts: { + operation_timeout_ms: @timeout_ms + } + ) + end end end end diff --git a/lib/mongo/crypt/kms/azure/credentials_retriever.rb b/lib/mongo/crypt/kms/azure/credentials_retriever.rb index 88787d4409..c1b6898fa9 100644 --- a/lib/mongo/crypt/kms/azure/credentials_retriever.rb +++ b/lib/mongo/crypt/kms/azure/credentials_retriever.rb @@ -34,13 +34,16 @@ class CredentialsRetriever # request. This is used for testing. # @param [String | nil] metadata_host Azure metadata host. This # is used for testing. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [ KMS::Azure::AccessToken ] Azure access token. # # @raise [KMS::CredentialsNotFound] If credentials could not be found. - def self.fetch_access_token(extra_headers: {}, metadata_host: nil) + # @raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout. + def self.fetch_access_token(extra_headers: {}, metadata_host: nil, timeout_holder: nil) uri, req = prepare_request(extra_headers, metadata_host) - parsed_response = fetch_response(uri, req) + parsed_response = fetch_response(uri, req, timeout_holder) Azure::AccessToken.new( parsed_response.fetch('access_token'), Integer(parsed_response.fetch('expires_in')) @@ -78,13 +81,16 @@ def self.prepare_request(extra_headers, metadata_host) # # @param [URI] uri URI to Azure metadata host. # @param [Net::HTTP::Get] req Request object. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [Hash] Parsed response. # # @raise [KMS::CredentialsNotFound] If cannot fetch response or # response is invalid. - def self.fetch_response(uri, req) - resp = do_request(uri, req) + # @raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout. + def self.fetch_response(uri, req, timeout_holder) + resp = do_request(uri, req, timeout_holder) if resp.code != '200' raise KMS::CredentialsNotFound, "Azure metadata host responded with code #{resp.code}" @@ -100,12 +106,22 @@ def self.fetch_response(uri, req) # # @param [URI] uri URI to Azure metadata host. # @param [Net::HTTP::Get] req Request object. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [Net::HTTPResponse] Response object. # # @raise [KMS::CredentialsNotFound] If cannot execute request. - def self.do_request(uri, req) - ::Timeout.timeout(10) do + # @raise Error::TimeoutError if credentials cannot be retrieved within + # the timeout. + def self.do_request(uri, req, timeout_holder) + timeout_holder&.check_timeout! + timeout = timeout_holder&.remaining_timeout_sec || 10 + exception_class = if timeout_holder&.csot? + Error::TimeoutError + else + nil + end + ::Timeout.timeout(timeout, exception_class) do Net::HTTP.start(uri.hostname, uri.port, use_ssl: false) do |http| http.request(req) end diff --git a/lib/mongo/crypt/kms/gcp/credentials_retriever.rb b/lib/mongo/crypt/kms/gcp/credentials_retriever.rb index 723cda43c4..bbb2887fbd 100644 --- a/lib/mongo/crypt/kms/gcp/credentials_retriever.rb +++ b/lib/mongo/crypt/kms/gcp/credentials_retriever.rb @@ -29,14 +29,20 @@ class CredentialsRetriever DEFAULT_HOST = 'metadata.google.internal' - def self.fetch_access_token + # Fetch GCP access token. + # + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. + # + # @return [ String ] GCP access token. + # + # @raise [ KMS::CredentialsNotFound ] + # @raise [ Error::TimeoutError ] + def self.fetch_access_token(timeout_holder = nil) host = ENV.fetch(METADATA_HOST_ENV) { DEFAULT_HOST } uri = URI("http://#{host}/computeMetadata/v1/instance/service-accounts/default/token") req = Net::HTTP::Get.new(uri) req['Metadata-Flavor'] = 'Google' - resp = Net::HTTP.start(uri.hostname, uri.port, use_ssl: false) do |http| - http.request(req) - end + resp = fetch_response(uri, req, timeout_holder) if resp.code != '200' raise KMS::CredentialsNotFound, "GCE metadata host responded with code #{resp.code}" @@ -50,6 +56,25 @@ def self.fetch_access_token raise KMS::CredentialsNotFound, "Could not receive GCP metadata response; #{e.class}: #{e.message}" end + + def self.fetch_response(uri, req, timeout_holder) + timeout_holder&.check_timeout! + if timeout_holder&.timeout? + ::Timeout.timeout(timeout_holder.remaining_timeout_sec, Error:TimeoutError) do + do_fetch(uri, req) + end + else + do_fetch(uri, req) + end + end + private_class_method :fetch_response + + def self.do_fetch(uri, req) + Net::HTTP.start(uri.hostname, uri.port, use_ssl: false) do |http| + http.request(req) + end + end + private_class_method :do_fetch end end end diff --git a/lib/mongo/csot_timeout_holder.rb b/lib/mongo/csot_timeout_holder.rb new file mode 100644 index 0000000000..9d7d15c0a0 --- /dev/null +++ b/lib/mongo/csot_timeout_holder.rb @@ -0,0 +1,119 @@ +# frozen_string_literal: true + +# Copyright (C) 2024 MongoDB Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module Mongo + # This class stores operation timeout and provides corresponding helper methods. + # + # @api private + class CsotTimeoutHolder + def initialize(session: nil, operation_timeouts: {}) + @deadline = calculate_deadline(operation_timeouts, session) + @operation_timeouts = operation_timeouts + @timeout_sec = (@deadline - Utils.monotonic_time if @deadline) + end + + attr_reader :deadline, :timeout_sec, :operation_timeouts + + # @return [ true | false ] Whether CSOT is enabled for the operation + def csot? + !deadline.nil? + end + + # @return [ true | false ] Returns false if CSOT is not enabled, or if + # CSOT is set to 0 (means unlimited), otherwise true. + def timeout? + ![ nil, 0 ].include?(@deadline) + end + + # @return [ Float | nil ] Returns the remaining seconds of the timeout + # set for the operation; if no timeout is set, or the timeout is 0 + # (means unlimited) returns nil. + def remaining_timeout_sec + return nil unless timeout? + + deadline - Utils.monotonic_time + end + + def remaining_timeout_sec! + check_timeout! + remaining_timeout_sec + end + + # @return [ Integer | nil ] Returns the remaining milliseconds of the timeout + # set for the operation; if no timeout is set, or the timeout is 0 + # (means unlimited) returns nil. + def remaining_timeout_ms + seconds = remaining_timeout_sec + return nil if seconds.nil? + + (seconds * 1_000).to_i + end + + def remaining_timeout_ms! + check_timeout! + remaining_timeout_ms + end + + # @return [ true | false ] Whether the timeout for the operation expired. + # If no timeout set, this method returns false. + def timeout_expired? + if timeout? + Utils.monotonic_time >= deadline + else + false + end + end + + # Check whether the operation timeout expired, and raises an appropriate + # error if yes. + # + # @raise [ Error::TimeoutError ] + def check_timeout! + return unless timeout_expired? + + raise Error::TimeoutError, "Operation took more than #{timeout_sec} seconds" + end + + private + + def calculate_deadline(opts = {}, session = nil) + check_no_override_inside_transaction!(opts, session) + return session&.with_transaction_deadline if session&.with_transaction_deadline + + if (operation_timeout_ms = opts[:operation_timeout_ms]) + calculate_deadline_from_timeout_ms(operation_timeout_ms) + elsif (inherited_timeout_ms = opts[:inherited_timeout_ms]) + calculate_deadline_from_timeout_ms(inherited_timeout_ms) + end + end + + def check_no_override_inside_transaction!(opts, session) + return unless opts[:operation_timeout_ms] && session&.with_transaction_deadline + + raise ArgumentError, 'Cannot override timeout_ms inside with_transaction block' + end + + def calculate_deadline_from_timeout_ms(operation_timeout_ms) + if operation_timeout_ms.positive? + Utils.monotonic_time + (operation_timeout_ms / 1_000.0) + elsif operation_timeout_ms.zero? + 0 + elsif operation_timeout_ms.negative? + raise ArgumentError, "timeout_ms must be a non-negative integer but #{operation_timeout_ms} given" + end + end + end +end diff --git a/lib/mongo/cursor.rb b/lib/mongo/cursor.rb index d24e137b5b..618d30d032 100644 --- a/lib/mongo/cursor.rb +++ b/lib/mongo/cursor.rb @@ -49,6 +49,9 @@ class Cursor # @api private attr_reader :resume_token + # @return [ Operation::Context ] context the context for this cursor + attr_reader :context + # Creates a +Cursor+ object. # # @example Instantiate the cursor. @@ -59,6 +62,8 @@ class Cursor # @param [ Server ] server The server this cursor is locked to. # @param [ Hash ] options The cursor options. # + # @option options [ Operation::Context ] :context The operation context + # for this cursor. # @option options [ true, false ] :disable_retry Whether to disable # retrying on error when sending getMore operations (deprecated, getMore # operations are no longer retried) @@ -80,9 +85,10 @@ def initialize(view, result, server, options = {}) if @cursor_id.nil? raise ArgumentError, 'Cursor id must be present in the result' end - @connection_global_id = result.connection_global_id @options = options @session = @options[:session] + @connection_global_id = result.connection_global_id + @context = @options[:context]&.with(connection_global_id: connection_global_id_for_context) || fresh_context @explicitly_closed = false @lock = Mutex.new unless closed? @@ -284,9 +290,11 @@ def closed? # the server. # # @return [ nil ] Always nil. - def close + def close(opts = {}) return if closed? + ctx = context ? context.refresh(timeout_ms: opts[:timeout_ms]) : fresh_context(opts) + unregister read_with_one_retry do spec = { @@ -295,11 +303,11 @@ def close cursor_ids: [id], } op = Operation::KillCursors.new(spec) - execute_operation(op) + execute_operation(op, context: ctx) end nil - rescue Error::OperationFailure, Error::SocketError, Error::SocketTimeoutError, Error::ServerNotUsable + rescue Error::OperationFailure::Family, Error::SocketError, Error::SocketTimeoutError, Error::ServerNotUsable # Errors are swallowed since there is noting can be done by handling them. ensure end_session @@ -434,15 +442,7 @@ def get_more_operation # 3.2+ servers use batch_size, 3.0- servers use to_return. # TODO should to_return be calculated in the operation layer? batch_size: batch_size_for_get_more, - to_return: to_return, - max_time_ms: if view.respond_to?(:max_await_time_ms) && - view.max_await_time_ms && - view.options[:await_data] - then - view.max_await_time_ms - else - nil - end, + to_return: to_return } if view.respond_to?(:options) && view.options.is_a?(Hash) spec[:comment] = view.options[:comment] unless view.options[:comment].nil? @@ -495,13 +495,17 @@ def unregister cluster.unregister_cursor(@cursor_id) end - def execute_operation(op) - context = Operation::Context.new( - client: client, - session: @session, - connection_global_id: @connection_global_id, - ) - op.execute(@server, context: context) + def execute_operation(op, context: nil) + op.execute(@server, context: context || possibly_refreshed_context) + end + + # Considers the timeout mode and will either return the cursor's + # context directly, or will return a new (refreshed) context. + # + # @return [ Operation::Context ] the (possibly-refreshed) context. + def possibly_refreshed_context + return context if view.timeout_mode == :cursor_lifetime + context.refresh(view: view) end # Sets @cursor_id from the operation result. @@ -521,6 +525,26 @@ def set_cursor_id(result) end end + # Returns a newly instantiated operation context based on the + # default values from the view. + def fresh_context(opts = {}) + Operation::Context.new(client: view.client, + session: @session, + connection_global_id: connection_global_id_for_context, + operation_timeouts: view.operation_timeouts(opts), + view: view) + end + + # Because a context must not have a connection_global_id if the session + # is already pinned to one, this method checks to see whether or not there's + # pinned connection_global_id on the session and returns nil if so. + def connection_global_id_for_context + if @session&.pinned_connection_global_id + nil + else + @connection_global_id + end + end end end diff --git a/lib/mongo/cursor/nontailable.rb b/lib/mongo/cursor/nontailable.rb new file mode 100644 index 0000000000..d28863d3cc --- /dev/null +++ b/lib/mongo/cursor/nontailable.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module Mongo + class Cursor + # This module is used by cursor-implementing classes to indicate that + # the only cursors they generate are non-tailable, and iterable. + # + # @api private + module NonTailable + # These views are always non-tailable. + # + # @return [ nil ] indicating a non-tailable cursor. + def cursor_type + nil + end + + # These views apply timeouts to each iteration of a cursor, as + # opposed to the entire lifetime of the cursor. + # + # @return [ :iterable ] indicating a cursor with a timeout mode of + # "iterable". + def timeout_mode + :iterable + end + end + end +end diff --git a/lib/mongo/cursor_host.rb b/lib/mongo/cursor_host.rb new file mode 100644 index 0000000000..1d192527d9 --- /dev/null +++ b/lib/mongo/cursor_host.rb @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +module Mongo + # A shared concern implementing settings and configuration for entities that + # "host" (or spawn) cursors. + # + # The class or module that includes this concern must implement: + # * timeout_ms -- this must return either the operation level timeout_ms + # (if set) or an inherited timeout_ms from a hierarchically higher + # level (if any). + module CursorHost + # Returns the cursor associated with this view, if any. + # + # @return [ nil | Cursor ] The cursor, if any. + # + # @api private + attr_reader :cursor + + # @return [ :cursor_lifetime | :iteration ] The timeout mode to be + # used by this object. + attr_reader :timeout_mode + + # Ensure the timeout mode is appropriate for other options that + # have been given. + # + # @param [ Hash ] options The options to inspect. + # @param [ Array ] forbid The list of options to forbid for this + # class. + # + # @raise [ ArgumentError ] if inconsistent or incompatible options are + # detected. + # + # @api private + # rubocop:disable Metrics + def validate_timeout_mode!(options, forbid: []) + forbid.each do |key| + raise ArgumentError, "#{key} is not allowed here" if options.key?(key) + end + + cursor_type = options[:cursor_type] + timeout_mode = options[:timeout_mode] + + if timeout_ms + # "Tailable cursors only support the ITERATION value for the + # timeoutMode option. This is the default value and drivers MUST + # error if the option is set to CURSOR_LIFETIME." + if cursor_type + timeout_mode ||= :iteration + if timeout_mode == :cursor_lifetime + raise ArgumentError, 'tailable cursors only support `timeout_mode: :iteration`' + end + + # "Drivers MUST error if [the maxAwaitTimeMS] option is set, + # timeoutMS is set to a non-zero value, and maxAwaitTimeMS is + # greater than or equal to timeoutMS." + max_await_time_ms = options[:max_await_time_ms] || 0 + if cursor_type == :tailable_await && max_await_time_ms >= timeout_ms + raise ArgumentError, ':max_await_time_ms must not be >= :timeout_ms' + end + else + # "For non-tailable cursors, the default value of timeoutMode + # is CURSOR_LIFETIME." + timeout_mode ||= :cursor_lifetime + end + elsif timeout_mode + # "Drivers MUST error if timeoutMode is set and timeoutMS is not." + raise ArgumentError, ':timeout_ms must be set if :timeout_mode is set' + end + + if timeout_mode == :iteration && respond_to?(:write?) && write? + raise ArgumentError, 'timeout_mode=:iteration is not supported for aggregation pipelines with $out or $merge' + end + + # set it as an instance variable, rather than updating the options, + # because if the cursor type changes (e.g. via #configure()), the new + # View instance must be able to select a different default timeout_mode + # if no timeout_mode was set initially. + @timeout_mode = timeout_mode + end + # rubocop:enable Metrics + end +end diff --git a/lib/mongo/database.rb b/lib/mongo/database.rb index 9b656c5b99..ab23311895 100644 --- a/lib/mongo/database.rb +++ b/lib/mongo/database.rb @@ -128,6 +128,8 @@ def [](collection_name, options = {}) # required privilege to run the command when access control is enforced # @option options [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # See https://mongodb.com/docs/manual/reference/command/listCollections/ # for more information and usage. @@ -136,7 +138,7 @@ def [](collection_name, options = {}) # # @since 2.0.0 def collection_names(options = {}) - View.new(self).collection_names(options) + View.new(self, options).collection_names(options) end # Get info on all the non-system collections in the database. @@ -156,6 +158,8 @@ def collection_names(options = {}) # required privilege to run the command when access control is enforced. # @option options [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # See https://mongodb.com/docs/manual/reference/command/listCollections/ # for more information and usage. @@ -165,7 +169,7 @@ def collection_names(options = {}) # # @since 2.0.5 def list_collections(options = {}) - View.new(self).list_collections(options) + View.new(self, options).list_collections(options) end # Get all the non-system collections that belong to this database. @@ -181,6 +185,8 @@ def list_collections(options = {}) # required privilege to run the command when access control is enforced. # @option options [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # See https://mongodb.com/docs/manual/reference/command/listCollections/ # for more information and usage. @@ -202,6 +208,8 @@ def collections(options = {}) # # @option opts :read [ Hash ] The read preference for this command. # @option opts :session [ Session ] The session to use for this command. + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # @option opts :execution_options [ Hash ] Options to pass to the code that # executes this command. This is an internal option and is subject to # change. @@ -223,7 +231,7 @@ def command(operation, opts = {}) Lint.validate_underscore_read_preference(txn_read_pref) selector = ServerSelector.get(txn_read_pref) - client.send(:with_session, opts) do |session| + client.with_session(opts) do |session| server = selector.select_server(cluster, nil, session) op = Operation::Command.new( :selector => operation, @@ -233,7 +241,11 @@ def command(operation, opts = {}) ) op.execute(server, - context: Operation::Context.new(client: client, session: session), + context: Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ), options: execution_opts) end end @@ -245,6 +257,10 @@ def command(operation, opts = {}) # # @option opts :read [ Hash ] The read preference for this command. # @option opts :session [ Session ] The session to use for this command. + # @option opts [ Object ] :comment A user-provided + # comment to attach to this command. + # @option opts [ Integer | nil ] :timeout_ms Operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Hash ] The result of the command execution. # @api private @@ -258,15 +274,20 @@ def read_command(operation, opts = {}) Lint.validate_underscore_read_preference(txn_read_pref) preference = ServerSelector.get(txn_read_pref) - client.send(:with_session, opts) do |session| - read_with_retry(session, preference) do |server| + client.with_session(opts) do |session| + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) + read_with_retry(session, preference, context) do |server| Operation::Command.new( selector: operation.dup, db_name: name, read: preference, session: session, comment: opts[:comment], - ).execute(server, context: Operation::Context.new(client: client, session: session)) + ).execute(server, context: context) end end end @@ -279,14 +300,16 @@ def read_command(operation, opts = {}) # @param [ Hash ] options The options for the operation. # # @option options [ Session ] :session The session to use for the operation. - # @option opts [ Hash ] :write_concern The write concern options. + # @option options [ Hash ] :write_concern The write concern options. + # @option options [ Integer | nil ] :timeout_ms Operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Result ] The result of the command. # # @since 2.0.0 def drop(options = {}) operation = { :dropDatabase => 1 } - client.send(:with_session, options) do |session| + client.with_session(options) do |session| write_concern = if options[:write_concern] WriteConcern.get(options[:write_concern]) else @@ -297,7 +320,14 @@ def drop(options = {}) db_name: name, write_concern: write_concern, session: session - }).execute(next_primary(nil, session), context: Operation::Context.new(client: client, session: session)) + }).execute( + next_primary(nil, session), + context: Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(options) + ) + ) end end @@ -391,17 +421,13 @@ def users # @option options [ String ] :hint The index to use for the aggregation. # @option options [ Integer ] :max_time_ms The maximum amount of time in # milliseconds to allow the aggregation to run. - # @option options [ true, false ] :use_cursor Indicates whether the command - # will request that the server provide results using a cursor. Note that - # as of server version 3.6, aggregations always provide results using a - # cursor and this option is therefore not valid. # @option options [ Session ] :session The session to use. # # @return [ Collection::View::Aggregation ] The aggregation object. # # @since 2.10.0 def aggregate(pipeline, options = {}) - View.new(self).aggregate(pipeline, options) + View.new(self, options).aggregate(pipeline, options) end # As of version 3.6 of the MongoDB server, a ``$changeStream`` pipeline stage is supported @@ -471,7 +497,7 @@ def aggregate(pipeline, options = {}) # @since 2.6.0 def watch(pipeline = [], options = {}) view_options = options.dup - view_options[:await_data] = true if options[:max_await_time_ms] + view_options[:cursor_type] = :tailable_await if options[:max_await_time_ms] Mongo::Collection::View::ChangeStream.new( Mongo::Collection::View.new(collection("#{COMMAND}.aggregate"), {}, view_options), @@ -497,5 +523,28 @@ def self.create(client) database = Database.new(client, client.options[:database], client.options) client.instance_variable_set(:@database, database) end + + # @return [ Integer | nil ] Operation timeout that is for this database or + # for the corresponding client. + # + # @api private + def timeout_ms + options[:timeout_ms] || client.timeout_ms + end + + # @return [ Hash ] timeout_ms value set on the operation level (if any), + # and/or timeout_ms that is set on collection/database/client level (if any). + # + # @api private + def operation_timeouts(opts) + # TODO: We should re-evaluate if we need two timeouts separately. + {}.tap do |result| + if opts[:timeout_ms].nil? + result[:inherited_timeout_ms] = timeout_ms + else + result[:operation_timeout_ms] = opts.delete(:timeout_ms) + end + end + end end end diff --git a/lib/mongo/database/view.rb b/lib/mongo/database/view.rb index 4e8195fb67..99c46066f8 100644 --- a/lib/mongo/database/view.rb +++ b/lib/mongo/database/view.rb @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/cursor/nontailable' + module Mongo class Database @@ -25,6 +27,8 @@ class View extend Forwardable include Enumerable include Retryable + include Mongo::CursorHost + include Cursor::NonTailable def_delegators :@database, :cluster, :read_preference, :client # @api private @@ -56,6 +60,8 @@ class View # to run the command when access control is enforced. # @option options [ Object ] :comment A user-provided # comment to attach to this command. + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # See https://mongodb.com/docs/manual/reference/command/listCollections/ # for more information and usage. @@ -66,9 +72,14 @@ class View # @since 2.0.0 def collection_names(options = {}) @batch_size = options[:batch_size] - session = client.send(:get_session, options) - cursor = read_with_retry_cursor(session, ServerSelector.primary, self) do |server| - send_initial_query(server, session, options.merge(name_only: true)) + session = client.get_session(options) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(options) + ) + cursor = read_with_retry_cursor(session, ServerSelector.primary, self, context: context) do |server| + send_initial_query(server, session, context, options.merge(name_only: true)) end cursor.map do |info| if cursor.initial_result.connection_description.features.list_collections_enabled? @@ -112,20 +123,31 @@ def collection_names(options = {}) # # @since 2.0.5 def list_collections(options = {}) - session = client.send(:get_session, options) + session = client.get_session(options) collections_info(session, ServerSelector.primary, options) end # Create the new database view. # # @example Create the new database view. - # View::Index.new(database) + # Database::View.new(database) # # @param [ Database ] database The database. + # @param [ Hash ] options The options to configure the view with. + # + # @option options [ :cursor_lifetime | :iteration ] :timeout_mode How to interpret + # :timeout_ms (whether it applies to the lifetime of the cursor, or per + # iteration). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @since 2.0.0 - def initialize(database) + def initialize(database, options = {}) @database = database + @operation_timeout_ms = options.delete(:timeout_ms) + + validate_timeout_mode!(options) + @batch_size = nil @limit = nil @collection = @database[Database::COMMAND] @@ -134,6 +156,12 @@ def initialize(database) # @api private attr_reader :database + # @return [ Integer | nil | The timeout_ms value that was passed as an + # option to the view. + # + # @api private + attr_reader :operation_timeout_ms + # Execute an aggregation on the database view. # # @example Aggregate documents. @@ -152,15 +180,41 @@ def aggregate(pipeline, options = {}) Collection::View::Aggregation.new(self, pipeline, options) end + # The timeout_ms value to use for this operation; either specified as an + # option to the view, or inherited from the database. + # + # @return [ Integer | nil ] the timeout_ms for this operation + def timeout_ms + operation_timeout_ms || database.timeout_ms + end + + # @return [ Hash ] timeout_ms value set on the operation level (if any). + # + # @api private + def operation_timeouts(opts = {}) + {}.tap do |result| + if opts[:timeout_ms] || operation_timeout_ms + result[:operation_timeout_ms] = opts.delete(:timeout_ms) || operation_timeout_ms + else + result[:inherited_timeout_ms] = database.timeout_ms + end + end + end + private def collections_info(session, server_selector, options = {}, &block) description = nil - cursor = read_with_retry_cursor(session, server_selector, self) do |server| + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(options) + ) + cursor = read_with_retry_cursor(session, server_selector, self, context: context) do |server| # TODO take description from the connection used to send the query # once https://jira.mongodb.org/browse/RUBY-1601 is fixed. description = server.description - send_initial_query(server, session, options) + send_initial_query(server, session, context, options) end # On 3.0+ servers, we get just the collection names. # On 2.6 server, we get collection names prefixed with the database @@ -224,7 +278,7 @@ def initial_query_op(session, options = {}) # types (where possible). # # @return [ Operation::Result ] Result of the query. - def send_initial_query(server, session, options = {}) + def send_initial_query(server, session, context, options = {}) opts = options.dup execution_opts = {} if opts.key?(:deserialize_as_bson) @@ -232,7 +286,7 @@ def send_initial_query(server, session, options = {}) end initial_query_op(session, opts).execute( server, - context: Operation::Context.new(client: client, session: session), + context: context, options: execution_opts ) end diff --git a/lib/mongo/error.rb b/lib/mongo/error.rb index 92d6d5f4b3..8750301076 100644 --- a/lib/mongo/error.rb +++ b/lib/mongo/error.rb @@ -217,7 +217,9 @@ def write_concern_error_labels require 'mongo/error/server_api_conflict' require 'mongo/error/server_api_not_supported' require 'mongo/error/server_not_usable' +require 'mongo/error/server_timeout_error' require 'mongo/error/transactions_not_supported' +require 'mongo/error/timeout_error' require 'mongo/error/unknown_payload_type' require 'mongo/error/unmet_dependency' require 'mongo/error/unsupported_option' diff --git a/lib/mongo/error/operation_failure.rb b/lib/mongo/error/operation_failure.rb index 1cc47f520b..236eb7083f 100644 --- a/lib/mongo/error/operation_failure.rb +++ b/lib/mongo/error/operation_failure.rb @@ -18,242 +18,247 @@ module Mongo class Error - # Raised when an operation fails for some reason. - # - # @since 2.0.0 class OperationFailure < Error - extend Forwardable - include SdamErrorDetection - include ReadWriteRetryable + # Implements the behavior for an OperationFailure error. Other errors + # (e.g. ServerTimeoutError) may also implement this, so that they may + # be recognized and treated as OperationFailure errors. + module OperationFailure::Family + extend Forwardable + include SdamErrorDetection + include ReadWriteRetryable - def_delegators :@result, :operation_time + def_delegators :@result, :operation_time - # @!method connection_description - # - # @return [ Server::Description ] Server description of the server that - # the operation that this exception refers to was performed on. - # - # @api private - def_delegator :@result, :connection_description + # @!method connection_description + # + # @return [ Server::Description ] Server description of the server that + # the operation that this exception refers to was performed on. + # + # @api private + def_delegator :@result, :connection_description - # @return [ Integer ] The error code parsed from the document. - # - # @since 2.6.0 - attr_reader :code + # @return [ Integer ] The error code parsed from the document. + # + # @since 2.6.0 + attr_reader :code - # @return [ String ] The error code name parsed from the document. - # - # @since 2.6.0 - attr_reader :code_name + # @return [ String ] The error code name parsed from the document. + # + # @since 2.6.0 + attr_reader :code_name - # @return [ String ] The server-returned error message - # parsed from the response. - # - # @api experimental - attr_reader :server_message + # @return [ String ] The server-returned error message + # parsed from the response. + # + # @api experimental + attr_reader :server_message - # Error codes and code names that should result in a failing getMore - # command on a change stream NOT being resumed. - # - # @api private - CHANGE_STREAM_RESUME_ERRORS = [ - {code_name: 'HostUnreachable', code: 6}, - {code_name: 'HostNotFound', code: 7}, - {code_name: 'NetworkTimeout', code: 89}, - {code_name: 'ShutdownInProgress', code: 91}, - {code_name: 'PrimarySteppedDown', code: 189}, - {code_name: 'ExceededTimeLimit', code: 262}, - {code_name: 'SocketException', code: 9001}, - {code_name: 'NotMaster', code: 10107}, - {code_name: 'InterruptedAtShutdown', code: 11600}, - {code_name: 'InterruptedDueToReplStateChange', code: 11602}, - {code_name: 'NotPrimaryNoSecondaryOk', code: 13435}, - {code_name: 'NotMasterOrSecondary', code: 13436}, + # Error codes and code names that should result in a failing getMore + # command on a change stream NOT being resumed. + # + # @api private + CHANGE_STREAM_RESUME_ERRORS = [ + {code_name: 'HostUnreachable', code: 6}, + {code_name: 'HostNotFound', code: 7}, + {code_name: 'NetworkTimeout', code: 89}, + {code_name: 'ShutdownInProgress', code: 91}, + {code_name: 'PrimarySteppedDown', code: 189}, + {code_name: 'ExceededTimeLimit', code: 262}, + {code_name: 'SocketException', code: 9001}, + {code_name: 'NotMaster', code: 10107}, + {code_name: 'InterruptedAtShutdown', code: 11600}, + {code_name: 'InterruptedDueToReplStateChange', code: 11602}, + {code_name: 'NotPrimaryNoSecondaryOk', code: 13435}, + {code_name: 'NotMasterOrSecondary', code: 13436}, - {code_name: 'StaleShardVersion', code: 63}, - {code_name: 'FailedToSatisfyReadPreference', code: 133}, - {code_name: 'StaleEpoch', code: 150}, - {code_name: 'RetryChangeStream', code: 234}, - {code_name: 'StaleConfig', code: 13388}, - ].freeze + {code_name: 'StaleShardVersion', code: 63}, + {code_name: 'FailedToSatisfyReadPreference', code: 133}, + {code_name: 'StaleEpoch', code: 150}, + {code_name: 'RetryChangeStream', code: 234}, + {code_name: 'StaleConfig', code: 13388}, + ].freeze - # Change stream can be resumed when these error messages are encountered. - # - # @since 2.6.0 - # @api private - CHANGE_STREAM_RESUME_MESSAGES = ReadWriteRetryable::WRITE_RETRY_MESSAGES + # Change stream can be resumed when these error messages are encountered. + # + # @since 2.6.0 + # @api private + CHANGE_STREAM_RESUME_MESSAGES = ReadWriteRetryable::WRITE_RETRY_MESSAGES - # Can the change stream on which this error occurred be resumed, - # provided the operation that triggered this error was a getMore? - # - # @example Is the error resumable for the change stream? - # error.change_stream_resumable? - # - # @return [ true, false ] Whether the error is resumable. - # - # @since 2.6.0 - def change_stream_resumable? - if @result && @result.is_a?(Mongo::Operation::GetMore::Result) - # CursorNotFound exceptions are always resumable because the server - # is not aware of the cursor id, and thus cannot determine if - # the cursor is a change stream and cannot add the - # ResumableChangeStreamError label. - return true if code == 43 + # Can the change stream on which this error occurred be resumed, + # provided the operation that triggered this error was a getMore? + # + # @example Is the error resumable for the change stream? + # error.change_stream_resumable? + # + # @return [ true, false ] Whether the error is resumable. + # + # @since 2.6.0 + def change_stream_resumable? + if @result && @result.is_a?(Mongo::Operation::GetMore::Result) + # CursorNotFound exceptions are always resumable because the server + # is not aware of the cursor id, and thus cannot determine if + # the cursor is a change stream and cannot add the + # ResumableChangeStreamError label. + return true if code == 43 - # Connection description is not populated for unacknowledged writes. - if connection_description.max_wire_version >= 9 - label?('ResumableChangeStreamError') + # Connection description is not populated for unacknowledged writes. + if connection_description.max_wire_version >= 9 + label?('ResumableChangeStreamError') + else + change_stream_resumable_code? + end else - change_stream_resumable_code? + false end - else - false end - end - def change_stream_resumable_code? - CHANGE_STREAM_RESUME_ERRORS.any? { |e| e[:code] == code } - end - private :change_stream_resumable_code? + def change_stream_resumable_code? + CHANGE_STREAM_RESUME_ERRORS.any? { |e| e[:code] == code } + end + private :change_stream_resumable_code? - # @return [ true | false ] Whether the failure includes a write - # concern error. A failure may have a top level error and a write - # concern error or either one of the two. - # - # @since 2.10.0 - def write_concern_error? - !!@write_concern_error_document - end + # @return [ true | false ] Whether the failure includes a write + # concern error. A failure may have a top level error and a write + # concern error or either one of the two. + # + # @since 2.10.0 + def write_concern_error? + !!@write_concern_error_document + end - # Returns the write concern error document as it was reported by the - # server, if any. - # - # @return [ Hash | nil ] Write concern error as reported to the server. - attr_reader :write_concern_error_document + # Returns the write concern error document as it was reported by the + # server, if any. + # + # @return [ Hash | nil ] Write concern error as reported to the server. + attr_reader :write_concern_error_document - # @return [ Integer | nil ] The error code for the write concern error, - # if a write concern error is present and has a code. - # - # @since 2.10.0 - attr_reader :write_concern_error_code + # @return [ Integer | nil ] The error code for the write concern error, + # if a write concern error is present and has a code. + # + # @since 2.10.0 + attr_reader :write_concern_error_code - # @return [ String | nil ] The code name for the write concern error, - # if a write concern error is present and has a code name. - # - # @since 2.10.0 - attr_reader :write_concern_error_code_name + # @return [ String | nil ] The code name for the write concern error, + # if a write concern error is present and has a code name. + # + # @since 2.10.0 + attr_reader :write_concern_error_code_name - # @return [ String | nil ] The details of the error. - # For WriteConcernErrors this is `document['writeConcernError']['errInfo']`. - # For WriteErrors this is `document['writeErrors'][0]['errInfo']`. - # For all other errors this is nil. - attr_reader :details + # @return [ String | nil ] The details of the error. + # For WriteConcernErrors this is `document['writeConcernError']['errInfo']`. + # For WriteErrors this is `document['writeErrors'][0]['errInfo']`. + # For all other errors this is nil. + attr_reader :details - # @return [ BSON::Document | nil ] The server-returned error document. - # - # @api experimental - attr_reader :document + # @return [ BSON::Document | nil ] The server-returned error document. + # + # @api experimental + attr_reader :document - # Create the operation failure. - # - # @example Create the error object - # OperationFailure.new(message, result) - # - # @example Create the error object with a code and a code name - # OperationFailure.new(message, result, :code => code, :code_name => code_name) - # - # @param [ String ] message The error message. - # @param [ Operation::Result ] result The result object. - # @param [ Hash ] options Additional parameters. - # - # @option options [ Integer ] :code Error code. - # @option options [ String ] :code_name Error code name. - # @option options [ BSON::Document ] :document The server-returned - # error document. - # @option options [ String ] server_message The server-returned - # error message parsed from the response. - # @option options [ Hash ] :write_concern_error_document The - # server-supplied write concern error document, if any. - # @option options [ Integer ] :write_concern_error_code Error code for - # write concern error, if any. - # @option options [ String ] :write_concern_error_code_name Error code - # name for write concern error, if any. - # @option options [ Array ] :write_concern_error_labels Error - # labels for the write concern error, if any. - # @option options [ Array ] :labels The set of labels associated - # with the error. - # @option options [ true | false ] :wtimeout Whether the error is a wtimeout. - def initialize(message = nil, result = nil, options = {}) - @details = retrieve_details(options[:document]) - super(append_details(message, @details)) + # @return [ Operation::Result ] the result object for the operation. + # + # @api private + attr_reader :result - @result = result - @code = options[:code] - @code_name = options[:code_name] - @write_concern_error_document = options[:write_concern_error_document] - @write_concern_error_code = options[:write_concern_error_code] - @write_concern_error_code_name = options[:write_concern_error_code_name] - @write_concern_error_labels = options[:write_concern_error_labels] || [] - @labels = options[:labels] || [] - @wtimeout = !!options[:wtimeout] - @document = options[:document] - @server_message = options[:server_message] - end + # Create the operation failure. + # + # @param [ String ] message The error message. + # @param [ Operation::Result ] result The result object. + # @param [ Hash ] options Additional parameters. + # + # @option options [ Integer ] :code Error code. + # @option options [ String ] :code_name Error code name. + # @option options [ BSON::Document ] :document The server-returned + # error document. + # @option options [ String ] server_message The server-returned + # error message parsed from the response. + # @option options [ Hash ] :write_concern_error_document The + # server-supplied write concern error document, if any. + # @option options [ Integer ] :write_concern_error_code Error code for + # write concern error, if any. + # @option options [ String ] :write_concern_error_code_name Error code + # name for write concern error, if any. + # @option options [ Array ] :write_concern_error_labels Error + # labels for the write concern error, if any. + # @option options [ Array ] :labels The set of labels associated + # with the error. + # @option options [ true | false ] :wtimeout Whether the error is a wtimeout. + def initialize(message = nil, result = nil, options = {}) + @details = retrieve_details(options[:document]) + super(append_details(message, @details)) - # Whether the error is a write concern timeout. - # - # @return [ true | false ] Whether the error is a write concern timeout. - # - # @since 2.7.1 - def wtimeout? - @wtimeout - end + @result = result + @code = options[:code] + @code_name = options[:code_name] + @write_concern_error_document = options[:write_concern_error_document] + @write_concern_error_code = options[:write_concern_error_code] + @write_concern_error_code_name = options[:write_concern_error_code_name] + @write_concern_error_labels = options[:write_concern_error_labels] || [] + @labels = options[:labels] || [] + @wtimeout = !!options[:wtimeout] + @document = options[:document] + @server_message = options[:server_message] + end - # Whether the error is MaxTimeMSExpired. - # - # @return [ true | false ] Whether the error is MaxTimeMSExpired. - # - # @since 2.10.0 - def max_time_ms_expired? - code == 50 # MaxTimeMSExpired - end + # Whether the error is a write concern timeout. + # + # @return [ true | false ] Whether the error is a write concern timeout. + # + # @since 2.7.1 + def wtimeout? + @wtimeout + end - # Whether the error is caused by an attempted retryable write - # on a storage engine that does not support retryable writes. - # - # @return [ true | false ] Whether the error is caused by an attempted - # retryable write on a storage engine that does not support retryable writes. - # - # @since 2.10.0 - def unsupported_retryable_write? - # code 20 is IllegalOperation. - # Note that the document is expected to be a BSON::Document, thus - # either having string keys or providing indifferent access. - code == 20 && server_message&.start_with?("Transaction numbers") || false - end + # Whether the error is MaxTimeMSExpired. + # + # @return [ true | false ] Whether the error is MaxTimeMSExpired. + # + # @since 2.10.0 + def max_time_ms_expired? + code == 50 # MaxTimeMSExpired + end + + # Whether the error is caused by an attempted retryable write + # on a storage engine that does not support retryable writes. + # + # @return [ true | false ] Whether the error is caused by an attempted + # retryable write on a storage engine that does not support retryable writes. + # + # @since 2.10.0 + def unsupported_retryable_write? + # code 20 is IllegalOperation. + # Note that the document is expected to be a BSON::Document, thus + # either having string keys or providing indifferent access. + code == 20 && server_message&.start_with?("Transaction numbers") || false + end - private + private - # Retrieve the details from a document - # - # @return [ Hash | nil ] the details extracted from the document - def retrieve_details(document) - return nil unless document - if wce = document['writeConcernError'] - return wce['errInfo'] - elsif we = document['writeErrors']&.first - return we['errInfo'] + # Retrieve the details from a document + # + # @return [ Hash | nil ] the details extracted from the document + def retrieve_details(document) + return nil unless document + if wce = document['writeConcernError'] + return wce['errInfo'] + elsif we = document['writeErrors']&.first + return we['errInfo'] + end end - end - # Append the details to the message - # - # @return [ String ] the message with the details appended to it - def append_details(message, details) - return message unless details && message - message + " -- #{details.to_json}" + # Append the details to the message + # + # @return [ String ] the message with the details appended to it + def append_details(message, details) + return message unless details && message + message + " -- #{details.to_json}" + end end + + # OperationFailure is the canonical implementor of the + # OperationFailure::Family concern. + include OperationFailure::Family end end end diff --git a/lib/mongo/error/server_timeout_error.rb b/lib/mongo/error/server_timeout_error.rb new file mode 100644 index 0000000000..d3e66a0eaf --- /dev/null +++ b/lib/mongo/error/server_timeout_error.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +require 'mongo/error/timeout_error' + +module Mongo + class Error + # Raised when the server returns error code 50. + class ServerTimeoutError < TimeoutError + include OperationFailure::Family + end + end +end diff --git a/lib/mongo/error/socket_timeout_error.rb b/lib/mongo/error/socket_timeout_error.rb index 25b5980fd8..b8332f61f1 100644 --- a/lib/mongo/error/socket_timeout_error.rb +++ b/lib/mongo/error/socket_timeout_error.rb @@ -15,13 +15,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/error/timeout_error' + module Mongo class Error # Raised when a socket connection times out. # # @since 2.0.0 - class SocketTimeoutError < Error + class SocketTimeoutError < TimeoutError include WriteRetryable include ChangeStreamResumable end diff --git a/lib/mongo/error/timeout_error.rb b/lib/mongo/error/timeout_error.rb new file mode 100644 index 0000000000..a607f002dd --- /dev/null +++ b/lib/mongo/error/timeout_error.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +# Copyright (C) 2015-present MongoDB Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module Mongo + class Error + # Raised when a Client Side Operation Timeout times out. + class TimeoutError < Error + end + end +end diff --git a/lib/mongo/grid/fs_bucket.rb b/lib/mongo/grid/fs_bucket.rb index 8d723cdf55..4fbc5218c1 100644 --- a/lib/mongo/grid/fs_bucket.rb +++ b/lib/mongo/grid/fs_bucket.rb @@ -201,8 +201,8 @@ def prefix # @return [ Result ] The result of the remove. # # @since 2.0.0 - def delete_one(file) - delete(file.id) + def delete_one(file, opts = {}) + delete(file.id, opts) end # Remove a single file, identified by its id from the GridFS. @@ -217,9 +217,14 @@ def delete_one(file) # @raise [ Error::FileNotFound ] If the file is not found. # # @since 2.1.0 - def delete(id) - result = files_collection.find({ :_id => id }, @options).delete_one - chunks_collection.find({ :files_id => id }, @options).delete_many + def delete(id, opts = {}) + timeout_holder = CsotTimeoutHolder.new(operation_timeouts: operation_timeouts(opts)) + result = files_collection + .find({ :_id => id }, @options.merge(timeout_ms: timeout_holder.remaining_timeout_ms)) + .delete_one(timeout_ms: timeout_holder.remaining_timeout_ms) + chunks_collection + .find({ :files_id => id }, @options.merge(timeout_ms: timeout_holder.remaining_timeout_ms)) + .delete_many(timeout_ms: timeout_holder.remaining_timeout_ms) raise Error::FileNotFound.new(id, :id) if result.n == 0 result end @@ -485,9 +490,10 @@ def write_concern end # Drop the collections that implement this bucket. - def drop - files_collection.drop - chunks_collection.drop + def drop(opts = {}) + context = Operation::Context.new(operation_timeouts: operation_timeouts(opts)) + files_collection.drop(timeout_ms: context.remaining_timeout_ms) + chunks_collection.drop(timeout_ms: context.remaining_timeout_ms) end private @@ -512,12 +518,24 @@ def files_name "#{prefix}.#{Grid::File::Info::COLLECTION}" end - def ensure_indexes! - if files_collection.find({}, limit: 1, projection: { _id: 1 }).first.nil? + def ensure_indexes!(timeout_holder = nil) + fc_idx = files_collection.find( + {}, + limit: 1, + projection: { _id: 1 }, + timeout_ms: timeout_holder&.remaining_timeout_ms + ).first + if fc_idx.nil? create_index_if_missing!(files_collection, FSBucket::FILES_INDEX) end - if chunks_collection.find({}, limit: 1, projection: { _id: 1 }).first.nil? + cc_idx = chunks_collection.find( + {}, + limit: 1, + projection: { _id: 1 }, + timeout_ms: timeout_holder&.remaining_timeout_ms + ).first + if cc_idx.nil? create_index_if_missing!(chunks_collection, FSBucket::CHUNKS_INDEX, :unique => true) end end @@ -528,7 +546,7 @@ def create_index_if_missing!(collection, index_spec, options = {}) if indexes_view.get(index_spec).nil? indexes_view.create_one(index_spec, options) end - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # proceed with index creation if a NamespaceNotFound error is thrown if e.code == 26 indexes_view.create_one(index_spec, options) @@ -537,6 +555,21 @@ def create_index_if_missing!(collection, index_spec, options = {}) end end end + + # @return [ Hash ] timeout_ms value set on the operation level (if any), + # and/or timeout_ms that is set on collection/database/client level (if any). + # + # @api private + def operation_timeouts(opts = {}) + # TODO: We should re-evaluate if we need two timeouts separately. + {}.tap do |result| + if opts[:timeout_ms].nil? + result[:inherited_timeout_ms] = database.timeout_ms + else + result[:operation_timeout_ms] = opts[:timeout_ms] + end + end + end end end end diff --git a/lib/mongo/grid/stream/read.rb b/lib/mongo/grid/stream/read.rb index f33b4a5126..796eaa129b 100644 --- a/lib/mongo/grid/stream/read.rb +++ b/lib/mongo/grid/stream/read.rb @@ -59,6 +59,12 @@ def initialize(fs, options) @file_id = @options.delete(:file_id) @options.freeze @open = true + @timeout_holder = CsotTimeoutHolder.new( + operation_timeouts: { + operation_timeout_ms: options[:timeout_ms], + inherited_timeout_ms: fs.database.timeout_ms + } + ) end # Iterate through chunk data streamed from the FSBucket. @@ -178,7 +184,11 @@ def read_preference # @since 2.1.0 def file_info @file_info ||= begin - doc = options[:file_info_doc] || fs.files_collection.find(_id: file_id).first + doc = options[:file_info_doc] || + fs.files_collection.find( + { _id: file_id }, + { timeout_ms: @timeout_holder.remaining_timeout_ms! } + ).first if doc File::Info.new(Options::Mapper.transform(doc, File::Info::MAPPINGS.invert)) else @@ -209,6 +219,10 @@ def view else options end + if @timeout_holder.csot? + opts[:timeout_ms] = @timeout_holder.remaining_timeout_ms! + opts[:timeout_mode] = :cursor_lifetime + end fs.chunks_collection.find({ :files_id => file_id }, opts).sort(:n => 1) end diff --git a/lib/mongo/grid/stream/write.rb b/lib/mongo/grid/stream/write.rb index 75ef68c56b..4ff2dc0a34 100644 --- a/lib/mongo/grid/stream/write.rb +++ b/lib/mongo/grid/stream/write.rb @@ -83,6 +83,12 @@ def initialize(fs, options) @options.freeze @filename = @options[:filename] @open = true + @timeout_holder = CsotTimeoutHolder.new( + operation_timeouts: { + operation_timeout_ms: options[:timeout_ms], + inherited_timeout_ms: fs.database.timeout_ms + } + ) end # Write to the GridFS bucket from the source stream or a string. @@ -107,7 +113,12 @@ def write(io) end chunks = File::Chunk.split(io, file_info, @n) @n += chunks.size - chunks_collection.insert_many(chunks) unless chunks.empty? + unless chunks.empty? + chunks_collection.insert_many( + chunks, + timeout_ms: @timeout_holder.remaining_timeout_ms! + ) + end self end @@ -124,7 +135,10 @@ def write(io) def close ensure_open! update_length - files_collection.insert_one(file_info, @options) + files_collection.insert_one( + file_info, + @options.merge(timeout_ms: @timeout_holder.remaining_timeout_ms!) + ) @open = false file_id end @@ -166,7 +180,10 @@ def closed? # # @since 2.1.0 def abort - fs.chunks_collection.find({ :files_id => file_id }, @options).delete_many + fs.chunks_collection.find( + { :files_id => file_id }, + @options.merge(timeout_ms: @timeout_holder.remaining_timeout_ms!) + ).delete_many (@open = false) || true end @@ -200,7 +217,7 @@ def file_info end def ensure_indexes! - fs.send(:ensure_indexes!) + fs.send(:ensure_indexes!, @timeout_holder) end def ensure_open! diff --git a/lib/mongo/index/view.rb b/lib/mongo/index/view.rb index 3a45842899..be9e1e7f88 100644 --- a/lib/mongo/index/view.rb +++ b/lib/mongo/index/view.rb @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +require 'mongo/cursor/nontailable' + module Mongo module Index @@ -25,6 +27,8 @@ class View extend Forwardable include Enumerable include Retryable + include Mongo::CursorHost + include Cursor::NonTailable # @return [ Collection ] collection The indexes collection. attr_reader :collection @@ -33,6 +37,12 @@ class View # when sending the listIndexes command. attr_reader :batch_size + # @return [ Integer | nil | The timeout_ms value that was passed as an + # option to the view. + # + # @api private + attr_reader :operation_timeout_ms + def_delegators :@collection, :cluster, :database, :read_preference, :write_concern, :client def_delegators :cluster, :next_primary @@ -90,7 +100,7 @@ class View # @since 2.0.0 def drop_one(name, options = {}) raise Error::MultiIndexDrop.new if name == Index::ALL - drop_by_name(name, comment: options[:comment]) + drop_by_name(name, options) end # Drop all indexes on the collection. @@ -107,7 +117,7 @@ def drop_one(name, options = {}) # # @since 2.0.0 def drop_all(options = {}) - drop_by_name(Index::ALL, comment: options[:comment]) + drop_by_name(Index::ALL, options) end # Creates an index on the collection. @@ -161,7 +171,7 @@ def create_one(keys, options = {}) if session = @options[:session] create_options[:session] = session end - %i(commit_quorum session comment).each do |key| + %i(commit_quorum session comment timeout_ms max_time_ms).each do |key| if value = options.delete(key) create_options[key] = value end @@ -210,7 +220,7 @@ def create_many(*models) options = models.pop end - client.send(:with_session, @options.merge(options)) do |session| + client.with_session(@options.merge(options)) do |session| server = next_primary(nil, session) indexes = normalize_models(models, server) @@ -229,8 +239,12 @@ def create_many(*models) write_concern: write_concern, comment: options[:comment], } - - Operation::CreateIndex.new(spec).execute(server, context: Operation::Context.new(client: client, session: session)) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(options) + ) + Operation::CreateIndex.new(spec).execute(server, context: context) end end @@ -263,9 +277,15 @@ def get(keys_or_name) # # @since 2.0.0 def each(&block) - session = client.send(:get_session, @options) - cursor = read_with_retry_cursor(session, ServerSelector.primary, self) do |server| - send_initial_query(server, session) + session = client.get_session(@options) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(@options) + ) + + cursor = read_with_retry_cursor(session, ServerSelector.primary, self, context: context) do |server| + send_initial_query(server, session, context) end if block_given? cursor.each do |doc| @@ -283,22 +303,51 @@ def each(&block) # # @param [ Collection ] collection The collection. # @param [ Hash ] options Options for getting a list of indexes. - # Only relevant for when the listIndexes command is used with server - # versions >=2.8. # # @option options [ Integer ] :batch_size The batch size for results # returned from the listIndexes command. + # @option options [ :cursor_lifetime | :iteration ] :timeout_mode How to interpret + # :timeout_ms (whether it applies to the lifetime of the cursor, or per + # iteration). + # @option options [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @since 2.0.0 def initialize(collection, options = {}) @collection = collection + @operation_timeout_ms = options.delete(:timeout_ms) + + validate_timeout_mode!(options) + @batch_size = options[:batch_size] @options = options end + # The timeout_ms value to use for this operation; either specified as an + # option to the view, or inherited from the collection. + # + # @return [ Integer | nil ] the timeout_ms for this operation + def timeout_ms + operation_timeout_ms || collection.timeout_ms + end + + # @return [ Hash ] timeout_ms value set on the operation level (if any), + # and/or timeout_ms that is set on collection/database/client level (if any). + # + # @api private + def operation_timeouts(opts = {}) + {}.tap do |result| + if opts[:timeout_ms] || operation_timeout_ms + result[:operation_timeout_ms] = opts.delete(:timeout_ms) || operation_timeout_ms + else + result[:inherited_timeout_ms] = collection.timeout_ms + end + end + end + private - def drop_by_name(name, comment: nil) + def drop_by_name(name, opts = {}) client.send(:with_session, @options) do |session| spec = { db_name: database.name, @@ -307,9 +356,14 @@ def drop_by_name(name, comment: nil) session: session, write_concern: write_concern, } - spec[:comment] = comment unless comment.nil? + spec[:comment] = opts[:comment] unless opts[:comment].nil? server = next_primary(nil, session) - Operation::DropIndex.new(spec).execute(server, context: Operation::Context.new(client: client, session: session)) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) + Operation::DropIndex.new(spec).execute(server, context: context) end end @@ -347,8 +401,8 @@ def normalize_models(models, server) end end - def send_initial_query(server, session) - initial_query_op(session).execute(server, context: Operation::Context.new(client: client, session: session)) + def send_initial_query(server, session, context) + initial_query_op(session).execute(server, context: context) end end end diff --git a/lib/mongo/operation.rb b/lib/mongo/operation.rb index 8def25dbe3..c7d7140121 100644 --- a/lib/mongo/operation.rb +++ b/lib/mongo/operation.rb @@ -22,6 +22,7 @@ require 'mongo/operation/shared/validatable' require 'mongo/operation/shared/object_id_generator' require 'mongo/operation/shared/op_msg_executable' +require 'mongo/operation/shared/timed' require 'mongo/operation/op_msg_base' require 'mongo/operation/command' diff --git a/lib/mongo/operation/context.rb b/lib/mongo/operation/context.rb index b7cae91d5a..03d6e0957d 100644 --- a/lib/mongo/operation/context.rb +++ b/lib/mongo/operation/context.rb @@ -34,8 +34,15 @@ module Operation # operations. # # @api private - class Context - def initialize(client: nil, session: nil, connection_global_id: nil, options: nil) + class Context < CsotTimeoutHolder + def initialize( + client: nil, + session: nil, + connection_global_id: nil, + operation_timeouts: {}, + view: nil, + options: nil + ) if options if client raise ArgumentError, 'Client and options cannot both be specified' @@ -52,14 +59,33 @@ def initialize(client: nil, session: nil, connection_global_id: nil, options: ni @client = client @session = session + @view = view @connection_global_id = connection_global_id @options = options + super(session: session, operation_timeouts: operation_timeouts) end attr_reader :client attr_reader :session + attr_reader :view attr_reader :options + # Returns a new Operation::Context with the deadline refreshed + # and relative to the current moment. + # + # @return [ Operation::Context ] the refreshed context + def refresh(connection_global_id: @connection_global_id, timeout_ms: nil, view: nil) + operation_timeouts = @operation_timeouts + operation_timeouts = operation_timeouts.merge(operation_timeout_ms: timeout_ms) if timeout_ms + + self.class.new(client: client, + session: session, + connection_global_id: connection_global_id, + operation_timeouts: operation_timeouts, + view: view || self.view, + options: options) + end + def connection_global_id @connection_global_id || session&.pinned_connection_global_id end @@ -122,10 +148,18 @@ def encrypt? client&.encrypter&.encrypt? || false end + def encrypt(db_name, cmd) + encrypter.encrypt(db_name, cmd, self) + end + def decrypt? !!client&.encrypter end + def decrypt(cmd) + encrypter.decrypt(cmd, self) + end + def encrypter if client&.encrypter client.encrypter @@ -133,6 +167,10 @@ def encrypter raise Error::InternalDriverError, 'Encrypter should only be accessed when encryption is to be performed' end end + + def inspect + "#<#{self.class} connection_global_id=#{connection_global_id.inspect} deadline=#{deadline.inspect} options=#{options.inspect} operation_timeouts=#{operation_timeouts.inspect}>" + end end end end diff --git a/lib/mongo/operation/create_search_indexes/op_msg.rb b/lib/mongo/operation/create_search_indexes/op_msg.rb index 444d35721b..a036d1e394 100644 --- a/lib/mongo/operation/create_search_indexes/op_msg.rb +++ b/lib/mongo/operation/create_search_indexes/op_msg.rb @@ -14,11 +14,11 @@ class OpMsg < OpMsgBase # Returns the command to send to the database, describing the # desired createSearchIndexes operation. # - # @param [ Mongo::Server ] _server the server that will receive the + # @param [ Connection ] _connection the connection that will receive the # command # # @return [ Hash ] the selector - def selector(_server) + def selector(_connection) { createSearchIndexes: coll_name, :$db => db_name, diff --git a/lib/mongo/operation/delete/op_msg.rb b/lib/mongo/operation/delete/op_msg.rb index f1435f9564..4ee081478d 100644 --- a/lib/mongo/operation/delete/op_msg.rb +++ b/lib/mongo/operation/delete/op_msg.rb @@ -49,7 +49,8 @@ def selector(connection) def message(connection) section = Protocol::Msg::Section1.new(IDENTIFIER, send(IDENTIFIER)) - Protocol::Msg.new(flags, {}, command(connection), section) + cmd = apply_relevant_timeouts_to(command(connection), connection) + Protocol::Msg.new(flags, {}, cmd, section) end end end diff --git a/lib/mongo/operation/drop_search_index/op_msg.rb b/lib/mongo/operation/drop_search_index/op_msg.rb index 8f4d323c55..1a27b6d0ba 100644 --- a/lib/mongo/operation/drop_search_index/op_msg.rb +++ b/lib/mongo/operation/drop_search_index/op_msg.rb @@ -14,11 +14,11 @@ class OpMsg < OpMsgBase # Returns the command to send to the database, describing the # desired dropSearchIndex operation. # - # @param [ Mongo::Server ] _server the server that will receive the + # @param [ Connection ] _connection the connection that will receive the # command # # @return [ Hash ] the selector - def selector(_server) + def selector(_connection) { dropSearchIndex: coll_name, :$db => db_name, diff --git a/lib/mongo/operation/find/op_msg.rb b/lib/mongo/operation/find/op_msg.rb index f29ec4686b..28b8908ba3 100644 --- a/lib/mongo/operation/find/op_msg.rb +++ b/lib/mongo/operation/find/op_msg.rb @@ -31,6 +31,51 @@ class OpMsg < OpMsgBase private + # Applies the relevant CSOT timeouts for a find command. + # Considers the cursor type and timeout mode and will add (or omit) a + # maxTimeMS field accordingly. + def apply_relevant_timeouts_to(spec, connection) + with_max_time(connection) do |max_time_sec| + timeout_ms = max_time_sec ? (max_time_sec * 1_000).to_i : nil + apply_find_timeouts_to(spec, timeout_ms) unless connection.description.mongocryptd? + end + end + + def apply_find_timeouts_to(spec, timeout_ms) + view = context&.view + return spec unless view + + case view.cursor_type + when nil # non-tailable + if view.timeout_mode == :cursor_lifetime + spec[:maxTimeMS] = timeout_ms || view.options[:max_time_ms] + else # timeout_mode == :iterable + # drivers MUST honor the timeoutMS option for the initial command + # but MUST NOT append a maxTimeMS field to the command sent to the + # server + if !timeout_ms && view.options[:max_time_ms] + spec[:maxTimeMS] = view.options[:max_time_ms] + end + end + + when :tailable + # If timeoutMS is set, drivers...MUST NOT append a maxTimeMS field to any commands. + if !timeout_ms && view.options[:max_time_ms] + spec[:maxTimeMS] = view.options[:max_time_ms] + end + + when :tailable_await + # The server supports the maxTimeMS option for the original command. + if timeout_ms || view.options[:max_time_ms] + spec[:maxTimeMS] = timeout_ms || view.options[:max_time_ms] + end + end + + spec.tap do |spc| + spc.delete(:maxTimeMS) if spc[:maxTimeMS].nil? + end + end + def selector(connection) # The mappings are BSON::Documents and as such store keys as # strings, the spec here has symbol keys. diff --git a/lib/mongo/operation/get_more/op_msg.rb b/lib/mongo/operation/get_more/op_msg.rb index cad0ab3bbe..777717b25b 100644 --- a/lib/mongo/operation/get_more/op_msg.rb +++ b/lib/mongo/operation/get_more/op_msg.rb @@ -28,6 +28,39 @@ class OpMsg < OpMsgBase include ExecutableTransactionLabel include PolymorphicResult include CommandBuilder + + private + + # Applies the relevant CSOT timeouts for a getMore command. + # Considers the cursor type and timeout mode and will add (or omit) a + # maxTimeMS field accordingly. + def apply_relevant_timeouts_to(spec, connection) + with_max_time(connection) do |max_time_sec| + timeout_ms = max_time_sec ? (max_time_sec * 1_000).to_i : nil + apply_get_more_timeouts_to(spec, timeout_ms) + end + end + + def apply_get_more_timeouts_to(spec, timeout_ms) + view = context&.view + return spec unless view + + if view.cursor_type == :tailable_await + # If timeoutMS is set, drivers MUST apply it to the original operation. + # Drivers MUST also apply the original timeoutMS value to each next + # call on the resulting cursor but MUST NOT use it to derive a + # maxTimeMS value for getMore commands. Helpers for operations that + # create tailable awaitData cursors MUST also support the + # maxAwaitTimeMS option. Drivers MUST error if this option is set, + # timeoutMS is set to a non-zero value, and maxAwaitTimeMS is greater + # than or equal to timeoutMS. If this option is set, drivers MUST use + # it as the maxTimeMS field on getMore commands. + max_await_time_ms = view.respond_to?(:max_await_time_ms) ? view.max_await_time_ms : nil + spec[:maxTimeMS] = max_await_time_ms if max_await_time_ms + end + + spec + end end end end diff --git a/lib/mongo/operation/insert/op_msg.rb b/lib/mongo/operation/insert/op_msg.rb index 7ab863e6af..39b299ef76 100644 --- a/lib/mongo/operation/insert/op_msg.rb +++ b/lib/mongo/operation/insert/op_msg.rb @@ -35,7 +35,7 @@ class OpMsg < OpMsgBase def get_result(connection, context, options = {}) # This is a Mongo::Operation::Insert::Result - Result.new(*dispatch_message(connection, context), @ids) + Result.new(*dispatch_message(connection, context), @ids, context: context) end def selector(connection) @@ -49,7 +49,8 @@ def selector(connection) def message(connection) section = Protocol::Msg::Section1.new(IDENTIFIER, send(IDENTIFIER)) - Protocol::Msg.new(flags, {}, command(connection), section) + cmd = apply_relevant_timeouts_to(command(connection), connection) + Protocol::Msg.new(flags, {}, cmd, section) end end end diff --git a/lib/mongo/operation/insert/result.rb b/lib/mongo/operation/insert/result.rb index 05995de079..cdde68e252 100644 --- a/lib/mongo/operation/insert/result.rb +++ b/lib/mongo/operation/insert/result.rb @@ -47,11 +47,13 @@ class Result < Operation::Result # Global id of the connection on which the operation that # this result is for was performed. # @param [ Array ] ids The ids of the inserted documents. + # @param [ Operation::Context | nil ] context the operation context that + # was active when this result was produced. # # @since 2.0.0 # @api private - def initialize(replies, connection_description, connection_global_id, ids) - super(replies, connection_description, connection_global_id) + def initialize(replies, connection_description, connection_global_id, ids, context: nil) + super(replies, connection_description, connection_global_id, context: context) @inserted_ids = ids end diff --git a/lib/mongo/operation/list_collections/result.rb b/lib/mongo/operation/list_collections/result.rb index 9b45e8b4fb..e964882f04 100644 --- a/lib/mongo/operation/list_collections/result.rb +++ b/lib/mongo/operation/list_collections/result.rb @@ -85,7 +85,7 @@ def validate! if successful? self else - raise Error::OperationFailure.new( + raise operation_failure_class.new( parser.message, self, code: parser.code, diff --git a/lib/mongo/operation/map_reduce/result.rb b/lib/mongo/operation/map_reduce/result.rb index 8959a99d15..6e6660ee93 100644 --- a/lib/mongo/operation/map_reduce/result.rb +++ b/lib/mongo/operation/map_reduce/result.rb @@ -108,7 +108,7 @@ def time # @example Validate the result. # result.validate! # - # @raise [ Error::OperationFailure ] If an error is in the result. + # @raise [ Error::OperationFailure::Family ] If an error is in the result. # # @return [ Result ] The result if verification passed. # diff --git a/lib/mongo/operation/op_msg_base.rb b/lib/mongo/operation/op_msg_base.rb index 5f00d42aec..5716227cd1 100644 --- a/lib/mongo/operation/op_msg_base.rb +++ b/lib/mongo/operation/op_msg_base.rb @@ -22,11 +22,13 @@ class OpMsgBase include Specifiable include Executable include SessionsSupported + include Timed private def message(connection) - Protocol::Msg.new(flags, options(connection), command(connection)) + cmd = apply_relevant_timeouts_to(command(connection), connection) + Protocol::Msg.new(flags, options(connection), cmd) end end end diff --git a/lib/mongo/operation/result.rb b/lib/mongo/operation/result.rb index d6989444b8..9508432ccb 100644 --- a/lib/mongo/operation/result.rb +++ b/lib/mongo/operation/result.rb @@ -100,9 +100,13 @@ class Result # @param [ Integer ] connection_global_id # Global id of the connection on which the operation that # this result is for was performed. + # @param [ Operation::Context | nil ] context the context that was active + # when this result was produced. # # @api private - def initialize(replies, connection_description = nil, connection_global_id = nil) + def initialize(replies, connection_description = nil, connection_global_id = nil, context: nil) + @context = context + if replies if replies.is_a?(Array) if replies.length != 1 @@ -138,6 +142,12 @@ def initialize(replies, connection_description = nil, connection_global_id = nil # @api private attr_reader :connection_global_id + # @return [ Operation::Context | nil ] the operation context (if any) + # that was active when this result was produced. + # + # @api private + attr_reader :context + # @api private def_delegators :parser, :not_master?, :node_recovering?, :node_shutting_down? @@ -320,7 +330,7 @@ def ok? # @example Validate the result. # result.validate! # - # @raise [ Error::OperationFailure ] If an error is in the result. + # @raise [ Error::OperationFailure::Family ] If an error is in the result. # # @return [ Result ] The result if verification passed. # @@ -330,16 +340,16 @@ def validate! !successful? ? raise_operation_failure : self end - # The exception instance (of the Error::OperationFailure class) + # The exception instance (of Error::OperationFailure::Family) # that would be raised during processing of this result. # # This method should only be called when result is not successful. # - # @return [ Error::OperationFailure ] The exception. + # @return [ Error::OperationFailure::Family ] The exception. # # @api private def error - @error ||= Error::OperationFailure.new( + @error ||= operation_failure_class.new( parser.message, self, code: parser.code, @@ -453,6 +463,14 @@ def snapshot_timestamp private + def operation_failure_class + if context&.csot? && parser.code == 50 + Error::ServerTimeoutError + else + Error::OperationFailure + end + end + def aggregate_returned_count replies.reduce(0) do |n, reply| n += reply.number_returned diff --git a/lib/mongo/operation/shared/executable.rb b/lib/mongo/operation/shared/executable.rb index 9b61476631..19e98013de 100644 --- a/lib/mongo/operation/shared/executable.rb +++ b/lib/mongo/operation/shared/executable.rb @@ -28,7 +28,18 @@ module Executable include ResponseHandling + # @return [ Operation::Context | nil ] the operation context used to + # execute this operation. + attr_accessor :context + def do_execute(connection, context, options = {}) + # Save the context on the instance, to avoid having to pass it as a + # parameter to every single method. There are many legacy methods that + # still accept it as a parameter, which are left as-is for now to + # minimize the impact of this change. Moving forward, it may be + # reasonable to refactor things so this saved reference is used instead. + @context = context + session&.materialize_if_needed unpin_maybe(session, connection) do add_error_labels(connection, context) do @@ -93,7 +104,7 @@ def result_class end def get_result(connection, context, options = {}) - result_class.new(*dispatch_message(connection, context, options)) + result_class.new(*dispatch_message(connection, context, options), context: context) end # Returns a Protocol::Message or nil as reply. diff --git a/lib/mongo/operation/shared/op_msg_executable.rb b/lib/mongo/operation/shared/op_msg_executable.rb index a97b8fd48d..99d890de9b 100644 --- a/lib/mongo/operation/shared/op_msg_executable.rb +++ b/lib/mongo/operation/shared/op_msg_executable.rb @@ -32,7 +32,10 @@ module OpMsgExecutable # # @return [ Mongo::Operation::Result ] The operation result. def execute(server, context:, options: {}) - server.with_connection(connection_global_id: context.connection_global_id) do |connection| + server.with_connection( + connection_global_id: context.connection_global_id, + context: context + ) do |connection| execute_with_connection(connection, context: context, options: options) end end diff --git a/lib/mongo/operation/shared/response_handling.rb b/lib/mongo/operation/shared/response_handling.rb index 799721a8de..36f4f8dafd 100644 --- a/lib/mongo/operation/shared/response_handling.rb +++ b/lib/mongo/operation/shared/response_handling.rb @@ -42,7 +42,7 @@ def validate_result(result, connection, context) # Adds error labels to exceptions raised in the yielded to block, # which should perform MongoDB operations and raise Mongo::Errors on # failure. This method handles network errors (Error::SocketError) - # and server-side errors (Error::OperationFailure); it does not + # and server-side errors (Error::OperationFailure::Family); it does not # handle server selection errors (Error::NoServerAvailable), for which # labels are added in the server selection code. # @@ -65,7 +65,7 @@ def add_error_labels(connection, context) rescue Mongo::Error::SocketTimeoutError => e maybe_add_retryable_write_error_label!(e, connection, context) raise e - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e if context.committing_transaction? if e.write_retryable? || e.wtimeout? || (e.write_concern_error? && !Session::UNLABELED_WRITE_CONCERN_CODES.include?(e.write_concern_error_code) @@ -104,7 +104,7 @@ def unpin_maybe(session, connection) # raised during execution of operations on servers. def add_server_diagnostics(connection) yield - rescue Error::SocketError, Error::SocketTimeoutError + rescue Error::SocketError, Error::SocketTimeoutError, Error::TimeoutError # Diagnostics should have already been added by the connection code, # do not add them again. raise diff --git a/lib/mongo/operation/shared/sessions_supported.rb b/lib/mongo/operation/shared/sessions_supported.rb index aed9bd23e6..0eb9d216b9 100644 --- a/lib/mongo/operation/shared/sessions_supported.rb +++ b/lib/mongo/operation/shared/sessions_supported.rb @@ -114,7 +114,7 @@ def apply_read_pref!(selector) end def apply_txn_opts!(selector) - session.add_txn_opts!(selector, read_command?(selector)) + session.add_txn_opts!(selector, read_command?(selector), context) end def suppress_read_write_concern!(selector) diff --git a/lib/mongo/operation/shared/timed.rb b/lib/mongo/operation/shared/timed.rb new file mode 100644 index 0000000000..a023e0a3a8 --- /dev/null +++ b/lib/mongo/operation/shared/timed.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module Mongo + module Operation + # Defines the behavior of operations that have the default timeout + # behavior described by the client-side operation timeouts (CSOT) + # spec. + # + # @api private + module Timed + # If a timeout is active (as defined by the current context), and it has + # not yet expired, add :maxTimeMS to the spec. + # + # @param [ Hash ] spec The spec to modify + # @param [ Connection ] connection The connection that will be used to + # execute the operation + # + # @return [ Hash ] the spec + # + # @raises [ Mongo::Error::TimeoutError ] if the current timeout has + # expired. + def apply_relevant_timeouts_to(spec, connection) + with_max_time(connection) do |max_time_sec| + return spec if max_time_sec.nil? + return spec if connection.description.mongocryptd? + + spec.tap { spec[:maxTimeMS] = (max_time_sec * 1_000).to_i } + end + end + + # A helper method that computes the remaining timeout (in seconds) and + # yields it to the associated block. If no timeout is present, yields + # nil. If the timeout has expired, raises Mongo::Error::TimeoutError. + # + # @param [ Connection ] connection The connection that will be used to + # execute the operation + # + # @return [ Hash ] the result of yielding to the block (which must be + # a Hash) + def with_max_time(connection) + if context&.timeout? + max_time_sec = context.remaining_timeout_sec - connection.server.minimum_round_trip_time + raise Mongo::Error::TimeoutError if max_time_sec <= 0 + + yield max_time_sec + else + yield nil + end + end + end + end +end diff --git a/lib/mongo/operation/shared/write.rb b/lib/mongo/operation/shared/write.rb index 68214fac36..2f9f50cd47 100644 --- a/lib/mongo/operation/shared/write.rb +++ b/lib/mongo/operation/shared/write.rb @@ -35,7 +35,10 @@ module Write # # @since 2.5.2 def execute(server, context:) - server.with_connection(connection_global_id: context.connection_global_id) do |connection| + server.with_connection( + connection_global_id: context.connection_global_id, + context: context + ) do |connection| execute_with_connection(connection, context: context) end end diff --git a/lib/mongo/operation/update/op_msg.rb b/lib/mongo/operation/update/op_msg.rb index 23ac1f4b4b..9606cd7d86 100644 --- a/lib/mongo/operation/update/op_msg.rb +++ b/lib/mongo/operation/update/op_msg.rb @@ -45,7 +45,8 @@ def selector(connection) def message(connection) updates = validate_updates(connection, send(IDENTIFIER)) section = Protocol::Msg::Section1.new(IDENTIFIER, updates) - Protocol::Msg.new(flags, {}, command(connection), section) + cmd = apply_relevant_timeouts_to(command(connection), connection) + Protocol::Msg.new(flags, {}, cmd, section) end end end diff --git a/lib/mongo/operation/update_search_index/op_msg.rb b/lib/mongo/operation/update_search_index/op_msg.rb index c6d21aaf0d..2030b30735 100644 --- a/lib/mongo/operation/update_search_index/op_msg.rb +++ b/lib/mongo/operation/update_search_index/op_msg.rb @@ -14,11 +14,11 @@ class OpMsg < OpMsgBase # Returns the command to send to the database, describing the # desired updateSearchIndex operation. # - # @param [ Mongo::Server ] _server the server that will receive the + # @param [ Connection ] _connection the connection that will receive the # command # # @return [ Hash ] the selector - def selector(_server) + def selector(_connection) { updateSearchIndex: coll_name, :$db => db_name, diff --git a/lib/mongo/protocol/message.rb b/lib/mongo/protocol/message.rb index e0baf5ed5a..2ea6330735 100644 --- a/lib/mongo/protocol/message.rb +++ b/lib/mongo/protocol/message.rb @@ -244,10 +244,7 @@ def self.deserialize(io, # timeout option. For compatibility with whoever might call this # method with some other IO-like object, pass options only when they # are not empty. - read_options = {} - if timeout = options[:socket_timeout] - read_options[:timeout] = timeout - end + read_options = options.slice(:timeout, :socket_timeout) if read_options.empty? chunk = io.read(16) diff --git a/lib/mongo/protocol/msg.rb b/lib/mongo/protocol/msg.rb index 73df7a31b6..46aa7092e6 100644 --- a/lib/mongo/protocol/msg.rb +++ b/lib/mongo/protocol/msg.rb @@ -226,7 +226,7 @@ def maybe_encrypt(connection, context) db_name = @main_document[DATABASE_IDENTIFIER] cmd = merge_sections - enc_cmd = context.encrypter.encrypt(db_name, cmd) + enc_cmd = context.encrypt(db_name, cmd) if cmd.key?('$db') && !enc_cmd.key?('$db') enc_cmd['$db'] = cmd['$db'] end @@ -251,7 +251,7 @@ def maybe_encrypt(connection, context) def maybe_decrypt(context) if context.decrypt? cmd = merge_sections - enc_cmd = context.encrypter.decrypt(cmd) + enc_cmd = context.decrypt(cmd) Msg.new(@flags, @options, enc_cmd) else self diff --git a/lib/mongo/retryable.rb b/lib/mongo/retryable.rb index 2508e13efd..f93fd22daa 100644 --- a/lib/mongo/retryable.rb +++ b/lib/mongo/retryable.rb @@ -46,8 +46,14 @@ module Retryable # @api private # # @return [ Mongo::Server ] A server matching the server preference. - def select_server(cluster, server_selector, session, failed_server = nil) - server_selector.select_server(cluster, nil, session, deprioritized: [failed_server].compact) + def select_server(cluster, server_selector, session, failed_server = nil, timeout: nil) + server_selector.select_server( + cluster, + nil, + session, + deprioritized: [failed_server].compact, + timeout: timeout + ) end # Returns the read worker for handling retryable reads. diff --git a/lib/mongo/retryable/read_worker.rb b/lib/mongo/retryable/read_worker.rb index a82e67a051..0a1f437901 100644 --- a/lib/mongo/retryable/read_worker.rb +++ b/lib/mongo/retryable/read_worker.rb @@ -60,19 +60,21 @@ class ReadWorker < BaseWorker # @param [ Mongo::ServerSelector::Selectable ] server_selector Server # selector for the operation. # @param [ CollectionView ] view The +CollectionView+ defining the query. + # @param [ Operation::Context | nil ] context the operation context to use + # with the cursor. # @param [ Proc ] block The block to execute. # # @return [ Cursor ] The cursor for the result set. - def read_with_retry_cursor(session, server_selector, view, &block) - read_with_retry(session, server_selector) do |server| + def read_with_retry_cursor(session, server_selector, view, context: nil, &block) + read_with_retry(session, server_selector, context) do |server| result = yield server # RUBY-2367: This will be updated to allow the query cache to # cache cursors with multi-batch results. if QueryCache.enabled? && !view.collection.system_collection? - CachingCursor.new(view, result, server, session: session) + CachingCursor.new(view, result, server, session: session, context: context) else - Cursor.new(view, result, server, session: session) + Cursor.new(view, result, server, session: session, context: context) end end end @@ -107,16 +109,18 @@ def read_with_retry_cursor(session, server_selector, view, &block) # is being run on. # @param [ Mongo::ServerSelector::Selectable | nil ] server_selector # Server selector for the operation. + # @param [ Mongo::Operation::Context | nil ] context Context for the + # read operation. # @param [ Proc ] block The block to execute. # # @return [ Result ] The result of the operation. - def read_with_retry(session = nil, server_selector = nil, &block) + def read_with_retry(session = nil, server_selector = nil, context = nil, &block) if session.nil? && server_selector.nil? deprecated_legacy_read_with_retry(&block) elsif session&.retry_reads? - modern_read_with_retry(session, server_selector, &block) + modern_read_with_retry(session, server_selector, context, &block) elsif client.max_read_retries > 0 - legacy_read_with_retry(session, server_selector, &block) + legacy_read_with_retry(session, server_selector, context, &block) else read_without_retry(session, server_selector, &block) end @@ -186,19 +190,26 @@ def deprecated_legacy_read_with_retry(&block) # being run on. # @param [ Mongo::ServerSelector::Selectable ] server_selector Server # selector for the operation. + # @param [ Mongo::Operation::Context ] context Context for the + # read operation. # @param [ Proc ] block The block to execute. # # @return [ Result ] The result of the operation. - def modern_read_with_retry(session, server_selector, &block) - server = select_server(cluster, server_selector, session) + def modern_read_with_retry(session, server_selector, context, &block) + server = select_server( + cluster, + server_selector, + session, + timeout: context&.remaining_timeout_sec + ) yield server - rescue *retryable_exceptions, Error::OperationFailure, Auth::Unauthorized, Error::PoolError => e + rescue *retryable_exceptions, Error::OperationFailure::Family, Auth::Unauthorized, Error::PoolError => e e.add_notes('modern retry', 'attempt 1') raise e if session.in_transaction? raise e if !is_retryable_exception?(e) && !e.write_retryable? - retry_read(e, session, server_selector, failed_server: server, &block) + retry_read(e, session, server_selector, context: context, failed_server: server, &block) end - + # Attempts to do a "legacy" read with retry. The operation will be # attempted multiple times, up to the client's `max_read_retries` # setting. @@ -207,15 +218,18 @@ def modern_read_with_retry(session, server_selector, &block) # being run on. # @param [ Mongo::ServerSelector::Selectable ] server_selector Server # selector for the operation. + # @param [ Mongo::Operation::Context | nil ] context Context for the + # read operation. # @param [ Proc ] block The block to execute. # # @return [ Result ] The result of the operation. - def legacy_read_with_retry(session, server_selector, &block) + def legacy_read_with_retry(session, server_selector, context = nil, &block) + context&.check_timeout! attempt = attempt ? attempt + 1 : 1 yield select_server(cluster, server_selector, session) - rescue *retryable_exceptions, Error::OperationFailure, Error::PoolError => e + rescue *retryable_exceptions, Error::OperationFailure::Family, Error::PoolError => e e.add_notes('legacy retry', "attempt #{attempt}") - + if is_retryable_exception?(e) raise e if attempt > client.max_read_retries || session&.in_transaction? elsif e.retryable? && !session&.in_transaction? @@ -223,7 +237,7 @@ def legacy_read_with_retry(session, server_selector, &block) else raise e end - + log_retry(e, message: 'Legacy read retry') sleep(client.read_retry_interval) unless is_retryable_exception?(e) retry @@ -244,7 +258,7 @@ def read_without_retry(session, server_selector, &block) begin yield server - rescue *retryable_exceptions, Error::PoolError, Error::OperationFailure => e + rescue *retryable_exceptions, Error::PoolError, Error::OperationFailure::Family => e e.add_note('retries disabled') raise e end @@ -258,40 +272,67 @@ def read_without_retry(session, server_selector, &block) # being run on. # @param [ Mongo::ServerSelector::Selectable ] server_selector Server # selector for the operation. - # @param [ Mongo::Server ] failed_server The server on which the original + # @param [ Mongo::Operation::Context | nil ] :context Context for the + # read operation. + # @param [ Mongo::Server | nil ] :failed_server The server on which the original # operation failed. # @param [ Proc ] block The block to execute. - # + # # @return [ Result ] The result of the operation. - def retry_read(original_error, session, server_selector, failed_server: nil, &block) - begin - server = select_server(cluster, server_selector, session, failed_server) - rescue Error, Error::AuthError => e - original_error.add_note("later retry failed: #{e.class}: #{e}") - raise original_error - end - + def retry_read(original_error, session, server_selector, context: nil, failed_server: nil, &block) + server = select_server_for_retry( + original_error, session, server_selector, context, failed_server + ) + log_retry(original_error, message: 'Read retry') - + begin + context&.check_timeout! + attempt = attempt ? attempt + 1 : 2 yield server, true + rescue Error::TimeoutError + raise rescue *retryable_exceptions => e - e.add_notes('modern retry', 'attempt 2') - raise e - rescue Error::OperationFailure, Error::PoolError => e + e.add_notes('modern retry', "attempt #{attempt}") + if context&.csot? + failed_server = server + retry + else + raise e + end + rescue Error::OperationFailure::Family, Error::PoolError => e e.add_note('modern retry') - unless e.write_retryable? + if e.write_retryable? + e.add_note("attempt #{attempt}") + if context&.csot? + failed_server = server + retry + else + raise e + end + else original_error.add_note("later retry failed: #{e.class}: #{e}") raise original_error end - e.add_note("attempt 2") - raise e rescue Error, Error::AuthError => e e.add_note('modern retry') original_error.add_note("later retry failed: #{e.class}: #{e}") raise original_error end end + + def select_server_for_retry(original_error, session, server_selector, context, failed_server) + select_server( + cluster, + server_selector, + session, + failed_server, + timeout: context&.remaining_timeout_sec + ) + rescue Error, Error::AuthError => e + original_error.add_note("later retry failed: #{e.class}: #{e}") + raise original_error + end end end end diff --git a/lib/mongo/retryable/write_worker.rb b/lib/mongo/retryable/write_worker.rb index 339a28b2f1..f40d3ae9ca 100644 --- a/lib/mongo/retryable/write_worker.rb +++ b/lib/mongo/retryable/write_worker.rb @@ -74,7 +74,11 @@ def write_with_retry(write_concern, ending_transaction: false, context:, &block) # If we are here, session is not nil. A session being nil would have # failed retry_write_allowed? check. - server = select_server(cluster, ServerSelector.primary, session) + server = select_server( + cluster, ServerSelector.primary, + session, + timeout: context.remaining_timeout_sec + ) unless ending_transaction || server.retry_writes? return legacy_write_with_retry(server, context: context, &block) @@ -104,13 +108,13 @@ def nro_write_with_retry(write_concern, context:, &block) session = context.session server = select_server(cluster, ServerSelector.primary, session) options = session&.client&.options || {} - + if options[:retry_writes] begin server.with_connection(connection_global_id: context.connection_global_id) do |connection| yield connection, nil, context end - rescue *retryable_exceptions, Error::PoolError, Error::OperationFailure => e + rescue *retryable_exceptions, Error::PoolError, Error::OperationFailure::Family => e e.add_note('retries disabled') raise e end @@ -170,6 +174,7 @@ def ensure_valid_state!(ending_transaction, session) # @api private def legacy_write_with_retry(server = nil, context:) session = context.session + context.check_timeout! # This is the pre-session retry logic, and is not subject to # current retryable write specifications. @@ -177,12 +182,20 @@ def legacy_write_with_retry(server = nil, context:) attempt = 0 begin attempt += 1 - server ||= select_server(cluster, ServerSelector.primary, session) - server.with_connection(connection_global_id: context.connection_global_id) do |connection| + server ||= select_server( + cluster, + ServerSelector.primary, + session, + timeout: context.remaining_timeout_sec + ) + server.with_connection( + connection_global_id: context.connection_global_id, + context: context + ) do |connection| # Legacy retries do not use txn_num yield connection, nil, context.dup end - rescue Error::OperationFailure => e + rescue Error::OperationFailure::Family => e e.add_note('legacy retry') e.add_note("attempt #{attempt}") server = nil @@ -219,8 +232,11 @@ def legacy_write_with_retry(server = nil, context:) def modern_write_with_retry(session, server, context, &block) txn_num = nil connection_succeeded = false - - server.with_connection(connection_global_id: context.connection_global_id) do |connection| + + server.with_connection( + connection_global_id: context.connection_global_id, + context: context + ) do |connection| connection_succeeded = true session.materialize_if_needed @@ -230,10 +246,10 @@ def modern_write_with_retry(session, server, context, &block) # it later for the retry as well. yield connection, txn_num, context.dup end - rescue *retryable_exceptions, Error::PoolError, Auth::Unauthorized, Error::OperationFailure => e + rescue *retryable_exceptions, Error::PoolError, Auth::Unauthorized, Error::OperationFailure::Family => e e.add_notes('modern retry', 'attempt 1') - if e.is_a?(Error::OperationFailure) + if e.is_a?(Error::OperationFailure::Family) ensure_retryable!(e) else ensure_labeled_retryable!(e, connection_succeeded, session) @@ -256,6 +272,8 @@ def modern_write_with_retry(session, server, context, &block) # # @return [ Result ] The result of the operation. def retry_write(original_error, txn_num, context:, failed_server: nil, &block) + context&.check_timeout! + session = context.session # We do not request a scan of the cluster here, because error handling @@ -263,8 +281,14 @@ def retry_write(original_error, txn_num, context:, failed_server: nil, &block) # server description and/or topology as necessary (specifically, # a socket error or a not master error should have marked the respective # server unknown). Here we just need to wait for server selection. - server = select_server(cluster, ServerSelector.primary, session, failed_server) - + server = select_server( + cluster, + ServerSelector.primary, + session, + failed_server, + timeout: context.remaining_timeout_sec + ) + unless server.retry_writes? # Do not need to add "modern retry" here, it should already be on # the first exception. @@ -278,15 +302,22 @@ def retry_write(original_error, txn_num, context:, failed_server: nil, &block) # special marker class to bypass the ordinarily applicable rescues. raise Error::RaiseOriginalError end - + + attempt = attempt ? attempt + 1 : 2 log_retry(original_error, message: 'Write retry') server.with_connection(connection_global_id: context.connection_global_id) do |connection| yield(connection, txn_num, context) end rescue *retryable_exceptions, Error::PoolError => e - fail_on_retryable!(e, original_error) - rescue Error::OperationFailure => e - fail_on_operation_failure!(e, original_error) + maybe_fail_on_retryable(e, original_error, context, attempt) + failed_server = server + retry + rescue Error::OperationFailure::Family => e + maybe_fail_on_operation_failure(e, original_error, context, attempt) + failed_server = server + retry + rescue Mongo::Error::TimeoutError + raise rescue Error, Error::AuthError => e fail_on_other_error!(e, original_error) rescue Error::RaiseOriginalError @@ -332,10 +363,10 @@ def ensure_retryable!(e) # Raise either e, or original_error, depending on whether e is # write_retryable. - def fail_on_retryable!(e, original_error) + def maybe_fail_on_retryable(e, original_error, context, attempt) if e.write_retryable? - e.add_notes('modern retry', 'attempt 2') - raise e + e.add_notes('modern retry', "attempt #{attempt}") + raise e unless context&.deadline else original_error.add_note("later retry failed: #{e.class}: #{e}") raise original_error @@ -344,11 +375,11 @@ def fail_on_retryable!(e, original_error) # Raise either e, or original_error, depending on whether e is # appropriately labeled. - def fail_on_operation_failure!(e, original_error) + def maybe_fail_on_operation_failure(e, original_error, context, attempt) e.add_note('modern retry') if e.label?('RetryableWriteError') && !e.label?('NoWritesPerformed') - e.add_note('attempt 2') - raise e + e.add_note("attempt #{attempt}") + raise e unless context&.deadline else original_error.add_note("later retry failed: #{e.class}: #{e}") raise original_error diff --git a/lib/mongo/server.rb b/lib/mongo/server.rb index d87e70a4f5..c00285034e 100644 --- a/lib/mongo/server.rb +++ b/lib/mongo/server.rb @@ -80,7 +80,7 @@ def initialize(address, cluster, monitoring, event_listeners, options = {}) include Id end @scan_semaphore = DistinguishingSemaphore.new - @round_trip_time_averager = RoundTripTimeAverager.new + @round_trip_time_calculator = RoundTripTimeCalculator.new @description = Description.new(address, {}, load_balancer: !!@options[:load_balancer], force_load_balancer: force_load_balancer?, @@ -197,6 +197,7 @@ def compressor :max_message_size, :tags, :average_round_trip_time, + :minimum_round_trip_time, :mongos?, :other?, :primary?, @@ -228,9 +229,9 @@ def compressor # @api private attr_reader :scan_semaphore - # @return [ RoundTripTimeAverager ] Round trip time averager object. + # @return [ RoundTripTimeCalculator ] Round trip time calculator object. # @api private - attr_reader :round_trip_time_averager + attr_reader :round_trip_time_calculator # Is this server equal to another? # @@ -490,8 +491,12 @@ def reconnect! # @return [ Object ] The result of the block execution. # # @since 2.3.0 - def with_connection(connection_global_id: nil, &block) - pool.with_connection(connection_global_id: connection_global_id, &block) + def with_connection(connection_global_id: nil, context: nil, &block) + pool.with_connection( + connection_global_id: connection_global_id, + context: context, + &block + ) end # Handle handshake failure. @@ -697,5 +702,5 @@ def update_last_scan require 'mongo/server/connection_pool' require 'mongo/server/description' require 'mongo/server/monitor' -require 'mongo/server/round_trip_time_averager' +require 'mongo/server/round_trip_time_calculator' require 'mongo/server/push_monitor' diff --git a/lib/mongo/server/connection.rb b/lib/mongo/server/connection.rb index 349948b61d..f9874764cf 100644 --- a/lib/mongo/server/connection.rb +++ b/lib/mongo/server/connection.rb @@ -226,11 +226,11 @@ def unpin # @return [ true ] If the connection succeeded. # # @since 2.0.0 - def connect! + def connect!(context = nil) raise_if_closed! unless @socket - @socket = create_socket + @socket = create_socket(context) @description, @compressor = do_connect if server.load_balancer? @@ -256,10 +256,16 @@ def connect! # # # @return [ Socket ] The created socket. - private def create_socket + private def create_socket(context = nil) add_server_diagnostics do - address.socket(socket_timeout, ssl_options.merge( - connection_address: address, connection_generation: generation, pipe: options[:pipe])) + opts = ssl_options.merge( + connection_address: address, + connection_generation: generation, + pipe: options[:pipe], + connect_timeout: context&.remaining_timeout_sec, + csot: !!context&.csot? + ) + address.socket(socket_timeout, opts) end end diff --git a/lib/mongo/server/connection_base.rb b/lib/mongo/server/connection_base.rb index 709f1e920d..2803cb9c45 100644 --- a/lib/mongo/server/connection_base.rb +++ b/lib/mongo/server/connection_base.rb @@ -169,6 +169,7 @@ def deliver(message, context, options = {}) raise Error::LintError, "Trying to deliver a message over a disconnected connection (to #{address})" end buffer = serialize(message, context) + check_timeout!(context) ensure_connected do |socket| operation_id = Monitoring.next_operation_id started_event = command_started(address, operation_id, message.payload, @@ -181,9 +182,10 @@ def deliver(message, context, options = {}) result = nil begin result = add_server_diagnostics do - socket.write(buffer.to_s) + socket.write(buffer.to_s, timeout: context.remaining_timeout_sec) if message.replyable? - Protocol::Message.deserialize(socket, max_message_size, message.request_id, options) + check_timeout!(context) + Protocol::Message.deserialize(socket, max_message_size, message.request_id, options.merge(timeout: context.remaining_timeout_sec)) else nil end @@ -273,6 +275,24 @@ def serialize(message, context, buffer = BSON::ByteBuffer.new) buffer end + + # If timeoutMS is set for the operation context, checks whether there is + # enough time left to send the corresponding message to the server + # (remaining timeout is bigger than minimum round trip time for + # the server) + # + # @param [ Mongo::Operation::Context ] context Context of the operation. + # + # @raise [ Mongo::Error::TimeoutError ] if timeout expired or there is + # not enough time to send the message to the server. + def check_timeout!(context) + return if [nil, 0].include?(context.deadline) + + time_to_execute = context.remaining_timeout_sec - server.minimum_round_trip_time + if time_to_execute <= 0 + raise Mongo::Error::TimeoutError + end + end end end end diff --git a/lib/mongo/server/connection_pool.rb b/lib/mongo/server/connection_pool.rb index dfbc718522..4cc0c9a7e2 100644 --- a/lib/mongo/server/connection_pool.rb +++ b/lib/mongo/server/connection_pool.rb @@ -205,11 +205,18 @@ def min_size # The time to wait, in seconds, for a connection to become available. # + # @param [ Mongo::Operation:Context | nil ] context Context of the operation + # the connection is requested for, if any. + # # @return [ Float ] The queue wait timeout. # # @since 2.9.0 - def wait_timeout - @wait_timeout ||= options[:wait_timeout] || DEFAULT_WAIT_TIMEOUT + def wait_timeout(context = nil) + if context&.remaining_timeout_sec.nil? + options[:wait_timeout] || DEFAULT_WAIT_TIMEOUT + else + context&.remaining_timeout_sec + end end # The maximum seconds a socket can remain idle since it has been @@ -345,6 +352,10 @@ def summary # The returned connection counts toward the pool's max size. When the # caller is finished using the connection, the connection should be # checked back in via the check_in method. + # @param [ Integer | nil ] :connection_global_id The global id for the + # connection to check out. + # @param [ Mongo::Operation:Context | nil ] :context Context of the operation + # the connection is requested for, if any. # # @return [ Mongo::Server::Connection ] The checked out connection. # @raise [ Error::PoolClosedError ] If the pool has been closed. @@ -352,7 +363,7 @@ def summary # and remains so for longer than the wait timeout. # # @since 2.9.0 - def check_out(connection_global_id: nil) + def check_out(connection_global_id: nil, context: nil) check_invariants publish_cmap_event( @@ -362,7 +373,9 @@ def check_out(connection_global_id: nil) raise_if_pool_closed! raise_if_pool_paused_locked! - connection = retrieve_and_connect_connection(connection_global_id) + connection = retrieve_and_connect_connection( + connection_global_id, context + ) publish_cmap_event( Monitoring::Event::Cmap::ConnectionCheckedOut.new(@server.address, connection.id, self), @@ -698,10 +711,13 @@ def inspect # @return [ Object ] The result of the block. # # @since 2.0.0 - def with_connection(connection_global_id: nil) + def with_connection(connection_global_id: nil, context: nil) raise_if_closed! - connection = check_out(connection_global_id: connection_global_id) + connection = check_out( + connection_global_id: connection_global_id, + context: context + ) yield(connection) rescue Error::SocketError, Error::SocketTimeoutError, Error::ConnectionPerished => e maybe_raise_pool_cleared!(connection, e) @@ -975,9 +991,9 @@ def maybe_raise_pool_cleared!(connection, e) # Attempts to connect (handshake and auth) the connection. If an error is # encountered, closes the connection and raises the error. - def connect_connection(connection) + def connect_connection(connection, context = nil) begin - connection.connect! + connection.connect!(context) rescue Exception connection.disconnect!(reason: :error) raise @@ -1242,16 +1258,18 @@ def get_connection(pid, connection_global_id) # Retrieves a connection and connects it. # - # @param [ Integer ] connection_global_id The global id for the + # @param [ Integer | nil ] connection_global_id The global id for the # connection to check out. + # @param [ Mongo::Operation:Context | nil ] context Context of the operation + # the connection is requested for, if any. # # @return [ Mongo::Server::Connection ] The checked out connection. # # @raise [ Error::PoolClosedError ] If the pool has been closed. # @raise [ Timeout::Error ] If the connection pool is at maximum size # and remains so for longer than the wait timeout. - def retrieve_and_connect_connection(connection_global_id) - deadline = Utils.monotonic_time + wait_timeout + def retrieve_and_connect_connection(connection_global_id, context = nil) + deadline = Utils.monotonic_time + wait_timeout(context) connection = nil @lock.synchronize do @@ -1267,7 +1285,7 @@ def retrieve_and_connect_connection(connection_global_id) connection = wait_for_connection(connection_global_id, deadline) end - connect_or_raise(connection) unless connection.connected? + connect_or_raise(connection, context) unless connection.connected? @lock.synchronize do @checked_out_connections << connection @@ -1327,8 +1345,8 @@ def wait_for_connection(connection_global_id, deadline) # cannot be connected. # This method also publish corresponding event and ensures that counters # and condition variables are updated. - def connect_or_raise(connection) - connect_connection(connection) + def connect_or_raise(connection, context) + connect_connection(connection, context) rescue Exception # Handshake or authentication failed @lock.synchronize do diff --git a/lib/mongo/server/description.rb b/lib/mongo/server/description.rb index 3e21b6a635..20b1448721 100644 --- a/lib/mongo/server/description.rb +++ b/lib/mongo/server/description.rb @@ -209,8 +209,8 @@ class Description # @param [ Hash ] config The result of the hello command. # @param [ Float ] average_round_trip_time The moving average time (sec) the hello # command took to complete. - # @param [ Float ] average_round_trip_time The moving average time (sec) - # the ismaster call took to complete. + # @param [ Float ] minimum_round_trip_time The minimum round trip time + # of ten last hello commands. # @param [ true | false ] load_balancer Whether the server is treated as # a load balancer. # @param [ true | false ] force_load_balancer Whether the server is @@ -218,7 +218,8 @@ class Description # # @api private def initialize(address, config = {}, average_round_trip_time: nil, - load_balancer: false, force_load_balancer: false + minimum_round_trip_time: 0, load_balancer: false, + force_load_balancer: false ) @address = address @config = config @@ -226,6 +227,7 @@ def initialize(address, config = {}, average_round_trip_time: nil, @force_load_balancer = !!force_load_balancer @features = Features.new(wire_versions, me || @address.to_s) @average_round_trip_time = average_round_trip_time + @minimum_round_trip_time = minimum_round_trip_time @last_update_time = Time.now.freeze @last_update_monotime = Utils.monotonic_time @@ -302,6 +304,10 @@ def features # @return [ Float ] The moving average time the hello call took to complete. attr_reader :average_round_trip_time + # @return [ Float ] The minimum time from the ten last hello calls took + # to complete. + attr_reader :minimum_round_trip_time + # Returns whether this server is an arbiter, per the SDAM spec. # # @example Is the server an arbiter? @@ -723,8 +729,7 @@ def unknown? # @api private def ok? - config[Operation::Result::OK] && - config[Operation::Result::OK] == 1 || false + config[Operation::Result::OK] == 1 end # Get the range of supported wire versions for the server. @@ -802,6 +807,14 @@ def me_mismatch? !!(address.to_s.downcase != me.downcase if me) end + # Whether this description is from a mongocryptd server. + # + # @return [ true, false ] Whether this description is from a mongocryptd + # server. + def mongocryptd? + ok? && config['iscryptd'] == true + end + # opTime in lastWrite subdocument of the hello response. # # @return [ BSON::Timestamp ] The timestamp. diff --git a/lib/mongo/server/monitor.rb b/lib/mongo/server/monitor.rb index c2ae278f1e..9130fe7128 100644 --- a/lib/mongo/server/monitor.rb +++ b/lib/mongo/server/monitor.rb @@ -237,8 +237,11 @@ def run_sdam_flow(result, awaited: false, scan_error: nil) @sdam_mutex.synchronize do old_description = server.description - new_description = Description.new(server.address, result, - average_round_trip_time: server.round_trip_time_averager.average_round_trip_time + new_description = Description.new( + server.address, + result, + average_round_trip_time: server.round_trip_time_calculator.average_round_trip_time, + minimum_round_trip_time: server.round_trip_time_calculator.minimum_round_trip_time ) server.cluster.run_sdam_flow(server.description, new_description, awaited: awaited, scan_error: scan_error) @@ -306,7 +309,7 @@ def check end if @connection - result = server.round_trip_time_averager.measure do + result = server.round_trip_time_calculator.measure do begin doc = @connection.check_document cmd = Protocol::Query.new( @@ -323,7 +326,7 @@ def check else connection = Connection.new(server.address, options) connection.connect! - result = server.round_trip_time_averager.measure do + result = server.round_trip_time_calculator.measure do connection.handshake! end @connection = connection diff --git a/lib/mongo/server/pending_connection.rb b/lib/mongo/server/pending_connection.rb index 1981e32812..5d2c62a6dd 100644 --- a/lib/mongo/server/pending_connection.rb +++ b/lib/mongo/server/pending_connection.rb @@ -120,7 +120,7 @@ def handshake_and_authenticate! # # @return [ Mongo::Protocol::Reply ] Deserialized server response. def get_handshake_response(hello_command) - @server.round_trip_time_averager.measure do + @server.round_trip_time_calculator.measure do add_server_diagnostics do socket.write(hello_command.serialize.to_s) Protocol::Message.deserialize(socket, Protocol::Message::MAX_MESSAGE_SIZE) @@ -168,7 +168,11 @@ def handshake!(speculative_auth_doc: nil) doc['serviceId'] ||= "fake:#{rand(2**32-1)+1}" end - post_handshake(doc, @server.round_trip_time_averager.average_round_trip_time) + post_handshake( + doc, + @server.round_trip_time_calculator.average_round_trip_time, + @server.round_trip_time_calculator.minimum_round_trip_time + ) doc end @@ -218,7 +222,7 @@ def ensure_connected # # @return [ Server::Description ] The server description calculated from # the handshake response for this particular connection. - def post_handshake(response, average_rtt) + def post_handshake(response, average_rtt, minimum_rtt) if response["ok"] == 1 # Auth mechanism is entirely dependent on the contents of # hello response *for this connection*. diff --git a/lib/mongo/server/round_trip_time_averager.rb b/lib/mongo/server/round_trip_time_calculator.rb similarity index 74% rename from lib/mongo/server/round_trip_time_averager.rb rename to lib/mongo/server/round_trip_time_calculator.rb index 1634691d54..99ee7eb60e 100644 --- a/lib/mongo/server/round_trip_time_averager.rb +++ b/lib/mongo/server/round_trip_time_calculator.rb @@ -18,20 +18,29 @@ module Mongo class Server # @api private - class RoundTripTimeAverager + class RoundTripTimeCalculator # The weighting factor (alpha) for calculating the average moving # round trip time. RTT_WEIGHT_FACTOR = 0.2.freeze private_constant :RTT_WEIGHT_FACTOR + RTT_SAMPLES_FOR_MINIMUM = 10 + private_constant :RTT_SAMPLES_FOR_MINIMUM + + MIN_SAMPLES = 3 + private_constant :MIN_SAMPLES + def initialize @last_round_trip_time = nil @average_round_trip_time = nil + @minimum_round_trip_time = 0 + @rtts = [] end attr_reader :last_round_trip_time attr_reader :average_round_trip_time + attr_reader :minimum_round_trip_time def measure start = Utils.monotonic_time @@ -44,14 +53,15 @@ def measure rescue Error, Error::AuthError => exc # For other errors, RTT is valid. end - last_round_trip_time = Utils.monotonic_time - start + last_rtt = Utils.monotonic_time - start # If hello fails, we need to return the last round trip time # because it is used in the heartbeat failed SDAM event, # but we must not update the round trip time recorded in the server. unless exc - @last_round_trip_time = last_round_trip_time + @last_round_trip_time = last_rtt update_average_round_trip_time + update_minimum_round_trip_time end if exc @@ -61,9 +71,6 @@ def measure end end - private - - # This method is separate for testing purposes. def update_average_round_trip_time @average_round_trip_time = if average_round_trip_time RTT_WEIGHT_FACTOR * last_round_trip_time + (1 - RTT_WEIGHT_FACTOR) * average_round_trip_time @@ -71,6 +78,14 @@ def update_average_round_trip_time last_round_trip_time end end + + def update_minimum_round_trip_time + @rtts.push(last_round_trip_time) unless last_round_trip_time.nil? + @minimum_round_trip_time = 0 and return if @rtts.size < MIN_SAMPLES + + @rtts.shift if @rtts.size > RTT_SAMPLES_FOR_MINIMUM + @minimum_round_trip_time = @rtts.compact.min + end end end end diff --git a/lib/mongo/server_selector/base.rb b/lib/mongo/server_selector/base.rb index 10eb478449..ae0a495440 100644 --- a/lib/mongo/server_selector/base.rb +++ b/lib/mongo/server_selector/base.rb @@ -33,11 +33,11 @@ class Base # # @option options [ Integer ] :local_threshold The local threshold boundary for # nearest selection in seconds. - # @option options [ Integer ] max_staleness The maximum replication lag, + # @option options [ Integer ] :max_staleness The maximum replication lag, # in seconds, that a secondary can suffer and still be eligible for a read. # A value of -1 is treated identically to nil, which is to not # have a maximum staleness. - # @option options [ Hash | nil ] hedge A Hash specifying whether to enable hedged + # @option options [ Hash | nil ] :hedge A Hash specifying whether to enable hedged # reads on the server. Hedged reads are not enabled by default. When # specifying this option, it must be in the format: { enabled: true }, # where the value of the :enabled key is a boolean value. @@ -168,6 +168,8 @@ def ==(other) # be selected from only if no other servers are available. This is # used to avoid selecting the same server twice in a row when # retrying a command. + # @param [ Float | nil ] :timeout Timeout in seconds for the operation, + # if any. # # @return [ Mongo::Server ] A server matching the server preference. # @@ -178,21 +180,35 @@ def ==(other) # lint mode is enabled. # # @since 2.0.0 - def select_server(cluster, ping = nil, session = nil, write_aggregation: false, deprioritized: []) - select_server_impl(cluster, ping, session, write_aggregation, deprioritized).tap do |server| + def select_server( + cluster, + ping = nil, + session = nil, + write_aggregation: false, + deprioritized: [], + timeout: nil + ) + select_server_impl(cluster, ping, session, write_aggregation, deprioritized, timeout).tap do |server| if Lint.enabled? && !server.pool.ready? raise Error::LintError, 'Server selector returning a server with a pool which is not ready' end end end - # Parameters and return values are the same as for select_server. - private def select_server_impl(cluster, ping, session, write_aggregation, deprioritized) + # Parameters and return values are the same as for select_server, only + # the +timeout+ param is renamed to +csot_timeout+. + private def select_server_impl(cluster, ping, session, write_aggregation, deprioritized, csot_timeout) if cluster.topology.is_a?(Cluster::Topology::LoadBalanced) return cluster.servers.first end - server_selection_timeout = cluster.options[:server_selection_timeout] || SERVER_SELECTION_TIMEOUT + timeout = cluster.options[:server_selection_timeout] || SERVER_SELECTION_TIMEOUT + + server_selection_timeout = if csot_timeout && csot_timeout > 0 + [timeout, csot_timeout].min + else + timeout + end # Special handling for zero timeout: if we have to select a server, # and the timeout is zero, fail immediately (since server selection @@ -638,9 +654,9 @@ def validate_max_staleness_value!(cluster) # state resulting from SDAM will immediately wake up this method and # cause it to return. # - # If the cluster des not have a server selection semaphore, waits + # If the cluster does not have a server selection semaphore, waits # the smaller of 0.25 seconds and the specified remaining time. - # This functionality is provided for backwards compatibilty only for + # This functionality is provided for backwards compatibility only for # applications directly invoking the server selection process. # If lint mode is enabled and the cluster does not have a server # selection semaphore, Error::LintError will be raised. diff --git a/lib/mongo/session.rb b/lib/mongo/session.rb index b22519efc1..5fbd3801fa 100644 --- a/lib/mongo/session.rb +++ b/lib/mongo/session.rb @@ -57,6 +57,12 @@ class Session # # @option options [ true|false ] :causal_consistency Whether to enable # causal consistency for this session. + # @option options [ Integer ] :default_timeout_ms The timeoutMS value for + # the following operations executed on the session: + # - commitTransaction + # - abortTransaction + # - withTransaction + # - endSession # @option options [ Hash ] :default_transaction_options Options to pass # to start_transaction by default, can contain any of the options that # start_transaction accepts. @@ -96,6 +102,7 @@ def initialize(server_session, client, options = {}) @options = options.dup.freeze @cluster_time = nil @state = NO_TRANSACTION_STATE + @with_transaction_deadline = nil end # @return [ Hash ] The options for this session. @@ -438,9 +445,21 @@ def end_session # progress or if the write concern is unacknowledged. # # @since 2.7.0 - def with_transaction(options=nil) - # Non-configurable 120 second timeout for the entire operation - deadline = Utils.monotonic_time + 120 + def with_transaction(options = nil) + if timeout_ms = (options || {})[:timeout_ms] + timeout_sec = timeout_ms / 1_000.0 + deadline = Utils.monotonic_time + timeout_sec + @with_transaction_deadline = deadline + elsif default_timeout_ms = @options[:default_timeout_ms] + timeout_sec = default_timeout_ms / 1_000.0 + deadline = Utils.monotonic_time + timeout_sec + @with_transaction_deadline = deadline + elsif @client.timeout_sec + deadline = Utils.monotonic_time + @client.timeout_sec + @with_transaction_deadline = deadline + else + deadline = Utils.monotonic_time + 120 + end transaction_in_progress = false loop do commit_options = {} @@ -454,6 +473,7 @@ def with_transaction(options=nil) rescue Exception => e if within_states?(STARTING_TRANSACTION_STATE, TRANSACTION_IN_PROGRESS_STATE) log_warn("Aborting transaction due to #{e.class}: #{e}") + @with_transaction_deadline = nil abort_transaction transaction_in_progress = false end @@ -481,7 +501,7 @@ def with_transaction(options=nil) rescue Mongo::Error => e if e.label?('UnknownTransactionCommitResult') if Utils.monotonic_time >= deadline || - e.is_a?(Error::OperationFailure) && e.max_time_ms_expired? + e.is_a?(Error::OperationFailure::Family) && e.max_time_ms_expired? then transaction_in_progress = false raise @@ -522,9 +542,10 @@ def with_transaction(options=nil) log_warn('with_transaction callback broke out of with_transaction loop, aborting transaction') begin abort_transaction - rescue Error::OperationFailure, Error::InvalidTransactionOperation + rescue Error::OperationFailure::Family, Error::InvalidTransactionOperation end end + @with_transaction_deadline = nil end # Places subsequent operations in this session into a new transaction. @@ -549,6 +570,8 @@ def with_transaction(options=nil) # items: # - *:mode* -- read preference specified as a symbol; the only valid value is # *:primary*. + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds + # that is applied to the whole transaction. Must a positive integer. # # @raise [ Error::InvalidTransactionOperation ] If a transaction is already in # progress or if the write concern is unacknowledged. @@ -611,6 +634,8 @@ def start_transaction(options = nil) # # @option options :write_concern [ nil | WriteConcern::Base ] The write # concern to use for this operation. + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds. + # Must a positive integer. # # @raise [ Error::InvalidTransactionOperation ] If there is no active transaction. # @@ -647,7 +672,11 @@ def commit_transaction(options=nil) write_concern = WriteConcern.get(write_concern) end - context = Operation::Context.new(client: @client, session: self) + context = Operation::Context.new( + client: @client, + session: self, + operation_timeouts: operation_timeouts(options) + ) write_with_retry(write_concern, ending_transaction: true, context: context, ) do |connection, txn_num, context| @@ -685,10 +714,13 @@ def commit_transaction(options=nil) # @example Abort the transaction. # session.abort_transaction # + # @option options [ Integer ] :timeout_ms The operation timeout in milliseconds. + # Must a positive integer. + # # @raise [ Error::InvalidTransactionOperation ] If there is no active transaction. # # @since 2.6.0 - def abort_transaction + def abort_transaction(options = nil) QueryCache.clear check_if_ended! @@ -705,10 +737,16 @@ def abort_transaction Mongo::Error::InvalidTransactionOperation.cannot_call_twice_msg(:abortTransaction)) end + options ||= {} + begin unless starting_transaction? @aborting_transaction = true - context = Operation::Context.new(client: @client, session: self) + context = Operation::Context.new( + client: @client, + session: self, + operation_timeouts: operation_timeouts(options) + ) write_with_retry(txn_options[:write_concern], ending_transaction: true, context: context, ) do |connection, txn_num, context| @@ -898,7 +936,7 @@ def add_txn_num!(command) # # @since 2.6.0 # @api private - def add_txn_opts!(command, read) + def add_txn_opts!(command, read, context) command.tap do |c| # The read concern should be added to any command that starts a transaction. if starting_transaction? @@ -952,6 +990,14 @@ def add_txn_opts!(command, read) if c[:writeConcern] && c[:writeConcern][:w] && c[:writeConcern][:w].is_a?(Symbol) c[:writeConcern][:w] = c[:writeConcern][:w].to_s end + + # Ignore wtimeout if csot + if context&.csot? + c[:writeConcern]&.delete(:wtimeout) + end + + # We must not send an empty (server default) write concern. + c.delete(:writeConcern) if c[:writeConcern]&.empty? end end @@ -1138,6 +1184,8 @@ def txn_num # @api private attr_accessor :snapshot_timestamp + attr_reader :with_transaction_deadline + private # Get the read concern the session will use when starting a transaction. @@ -1217,5 +1265,19 @@ def check_transactions_supported! end end end + + def operation_timeouts(opts) + { + inherited_timeout_ms: @client.timeout_ms + }.tap do |result| + if @with_transaction_deadline.nil? + if timeout_ms = opts[:timeout_ms] + result[:operation_timeout_ms] = timeout_ms + elsif default_timeout_ms = options[:default_timeout_ms] + result[:operation_timeout_ms] = default_timeout_ms + end + end + end + end end end diff --git a/lib/mongo/socket.rb b/lib/mongo/socket.rb index 62e3ef464e..3134f2aa87 100644 --- a/lib/mongo/socket.rb +++ b/lib/mongo/socket.rb @@ -192,27 +192,24 @@ def gets(*args) # socket.read(4096) # # @param [ Integer ] length The number of bytes to read. - # @param [ Numeric ] timeout The timeout to use for each chunk read. + # @param [ Numeric ] socket_timeout The timeout to use for each chunk read, + # mutually exclusive to +timeout+. + # @param [ Numeric ] timeout The total timeout to the whole read operation, + # mutually exclusive to +socket_timeout+. # # @raise [ Mongo::SocketError ] If not all data is returned. # # @return [ Object ] The data from the socket. # # @since 2.0.0 - def read(length, timeout: nil) - map_exceptions do - data = read_from_socket(length, timeout: timeout) - unless (data.length > 0 || length == 0) - raise IOError, "Expected to read > 0 bytes but read 0 bytes" - end - while data.length < length - chunk = read_from_socket(length - data.length, timeout: timeout) - unless (chunk.length > 0 || length == 0) - raise IOError, "Expected to read > 0 bytes but read 0 bytes" - end - data << chunk - end - data + def read(length, socket_timeout: nil, timeout: nil) + if !socket_timeout.nil? && !timeout.nil? + raise ArgumentError, 'Both timeout and socket_timeout cannot be set' + end + if !socket_timeout.nil? || timeout.nil? + read_without_timeout(length, socket_timeout) + else + read_with_timeout(length, timeout) end end @@ -233,15 +230,16 @@ def readbyte # Writes data to the socket instance. # # @param [ Array ] args The data to be written. + # @param [ Numeric ] timeout The total timeout to the whole write operation. # # @return [ Integer ] The length of bytes written to the socket. # # @raise [ Error::SocketError | Error::SocketTimeoutError ] When there is a network error during the write. # # @since 2.0.0 - def write(*args) + def write(*args, timeout: nil) map_exceptions do - do_write(*args) + do_write(*args, timeout: timeout) end end @@ -254,7 +252,7 @@ def eof? true end - # For backwards compatibilty only, do not use. + # For backwards compatibility only, do not use. # # @return [ true ] Always true. # @@ -265,18 +263,76 @@ def connectable? private - def read_from_socket(length, timeout: nil) + # Reads the +length+ bytes from the socket, the read operation duration is + # limited to +timeout+ second. + # + # @param [ Integer ] length The number of bytes to read. + # @param [ Numeric ] timeout The total timeout to the whole read operation. + # + # @return [ Object ] The data from the socket. + def read_with_timeout(length, timeout) + deadline = Utils.monotonic_time + timeout + map_exceptions do + String.new.tap do |data| + while data.length < length + socket_timeout = deadline - Utils.monotonic_time + if socket_timeout <= 0 + raise Mongo::Error::TimeoutError + end + chunk = read_from_socket(length - data.length, socket_timeout: socket_timeout, csot: true) + unless chunk.length > 0 + raise IOError, "Expected to read > 0 bytes but read 0 bytes" + end + data << chunk + end + end + end + end + + # Reads the +length+ bytes from the socket. The read operation may involve + # multiple socket reads, each read is limited to +timeout+ second, + # if the parameter is provided. + # + # @param [ Integer ] length The number of bytes to read. + # @param [ Numeric ] socket_timeout The timeout to use for each chunk read. + # + # @return [ Object ] The data from the socket. + def read_without_timeout(length, socket_timeout = nil) + map_exceptions do + String.new.tap do |data| + while data.length < length + chunk = read_from_socket(length - data.length, socket_timeout: socket_timeout) + unless chunk.length > 0 + raise IOError, "Expected to read > 0 bytes but read 0 bytes" + end + data << chunk + end + end + end + end + + + # Reads the +length+ bytes from the socket. The read operation may involve + # multiple socket reads, each read is limited to +timeout+ second, + # if the parameter is provided. + # + # @param [ Integer ] length The number of bytes to read. + # @param [ Numeric ] :socket_timeout The timeout to use for each chunk read. + # @param [ true | false ] :csot Whether the CSOT timeout is set for the operation. + # + # @return [ Object ] The data from the socket. + def read_from_socket(length, socket_timeout: nil, csot: false) # Just in case if length == 0 return ''.force_encoding('BINARY') end - _timeout = timeout || self.timeout + _timeout = socket_timeout || self.timeout if _timeout if _timeout > 0 deadline = Utils.monotonic_time + _timeout elsif _timeout < 0 - raise Errno::ETIMEDOUT, "Negative timeout #{_timeout} given to socket" + raise_timeout_error!("Negative timeout #{_timeout} given to socket", csot) end end @@ -331,7 +387,7 @@ def read_from_socket(length, timeout: nil) if deadline select_timeout = deadline - Utils.monotonic_time if select_timeout <= 0 - raise Errno::ETIMEDOUT, "Took more than #{_timeout} seconds to receive data" + raise_timeout_error!("Took more than #{_timeout} seconds to receive data", csot) end end pipe = options[:pipe] @@ -373,11 +429,11 @@ def read_from_socket(length, timeout: nil) if deadline select_timeout = deadline - Utils.monotonic_time if select_timeout <= 0 - raise Errno::ETIMEDOUT, "Took more than #{_timeout} seconds to receive data" + raise_timeout_error!("Took more than #{_timeout} seconds to receive data", csot) end end elsif rv.nil? - raise Errno::ETIMEDOUT, "Took more than #{_timeout} seconds to receive data (select call timed out)" + raise_timeout_error!("Took more than #{_timeout} seconds to receive data (select call timed out)", csot) end retry end @@ -402,9 +458,23 @@ def read_buffer_size # sholud map exceptions. # # @param [ Array ] args The data to be written. + # @param [ Numeric ] :timeout The total timeout to the whole write operation. + # + # @return [ Integer ] The length of bytes written to the socket. + def do_write(*args, timeout: nil) + if timeout.nil? + write_without_timeout(*args) + else + write_with_timeout(*args, timeout: timeout) + end + end + + # Writes data to to the socket. + # + # @param [ Array ] args The data to be written. # # @return [ Integer ] The length of bytes written to the socket. - def do_write(*args) + def write_without_timeout(*args) # This method used to forward arguments to @socket.write in a # single call like so: # @@ -428,6 +498,57 @@ def do_write(*args) end end + # Writes data to to the socket, the write duration is limited to +timeout+. + # + # @param [ Array ] args The data to be written. + # @param [ Numeric ] :timeout The total timeout to the whole write operation. + # + # @return [ Integer ] The length of bytes written to the socket. + def write_with_timeout(*args, timeout:) + raise ArgumentError, 'timeout cannot be nil' if timeout.nil? + raise_timeout_error!("Negative timeout #{timeout} given to socket", true) if timeout < 0 + + written = 0 + args.each do |buf| + buf = buf.to_s + i = 0 + while i < buf.length + chunk = buf[i...(i + WRITE_CHUNK_SIZE)] + written += write_chunk(chunk, timeout) + i += WRITE_CHUNK_SIZE + end + end + written + end + + def write_chunk(chunk, timeout) + deadline = Utils.monotonic_time + timeout + written = 0 + begin + written += @socket.write_nonblock(chunk[written..-1]) + rescue IO::WaitWritable, Errno::EINTR + select_timeout = deadline - Utils.monotonic_time + rv = Kernel.select(nil, [@socket], nil, select_timeout) + if BSON::Environment.jruby? + # Ignore the return value of Kernel.select. + # On JRuby, select appears to return nil prior to timeout expiration + # (apparently due to a EAGAIN) which then causes us to fail the read + # even though we could have retried it. + # Check the deadline ourselves. + if deadline + select_timeout = deadline - Utils.monotonic_time + if select_timeout <= 0 + raise_timeout_error!("Took more than #{timeout} seconds to receive data", true) + end + end + elsif rv.nil? + raise_timeout_error!("Took more than #{timeout} seconds to receive data (select call timed out)", true) + end + retry + end + written + end + def unix_socket?(sock) defined?(UNIXSocket) && sock.is_a?(UNIXSocket) end @@ -482,5 +603,13 @@ def map_exceptions def human_address raise NotImplementedError end + + def raise_timeout_error!(message = nil, csot = false) + if csot + raise Mongo::Error::TimeoutError + else + raise Errno::ETIMEDOUT, message + end + end end end diff --git a/lib/mongo/socket/ssl.rb b/lib/mongo/socket/ssl.rb index 10bb7b4d6c..d9e5d7cb52 100644 --- a/lib/mongo/socket/ssl.rb +++ b/lib/mongo/socket/ssl.rb @@ -142,26 +142,37 @@ def initialize(host, port, host_name, timeout, family, options = {}) # # @since 2.0.0 def connect! - Timeout.timeout(options[:connect_timeout], Error::SocketTimeoutError, "The socket took over #{options[:connect_timeout]} seconds to connect") do - map_exceptions do - @tcp_socket.connect(::Socket.pack_sockaddr_in(port, host)) - end - @socket = OpenSSL::SSL::SSLSocket.new(@tcp_socket, context) - begin - @socket.hostname = @host_name - @socket.sync_close = true - map_exceptions do - @socket.connect + sockaddr = ::Socket.pack_sockaddr_in(port, host) + connect_timeout = options[:connect_timeout] + map_exceptions do + if connect_timeout && connect_timeout != 0 + deadline = Utils.monotonic_time + connect_timeout + if BSON::Environment.jruby? + # We encounter some strange problems with connect_nonblock for + # ssl sockets on JRuby. Therefore, we use the old +Timeout.timeout+ + # solution, even though it is known to be not very reliable. + raise Error::SocketTimeoutError, 'connect_timeout expired' if connect_timeout < 0 + + Timeout.timeout(connect_timeout, Error::SocketTimeoutError, "The socket took over #{options[:connect_timeout]} seconds to connect") do + connect_without_timeout(sockaddr) + end + else + connect_with_timeout(sockaddr, connect_timeout) end + remaining_timeout = deadline - Utils.monotonic_time + verify_certificate!(@socket) + verify_ocsp_endpoint!(@socket, remaining_timeout) + else + connect_without_timeout(sockaddr) verify_certificate!(@socket) verify_ocsp_endpoint!(@socket) - rescue - @socket.close - @socket = nil - raise end - self end + self + rescue + @socket&.close + @socket = nil + raise end private :connect! @@ -182,6 +193,87 @@ def readbyte private + # Connects the socket without a timeout provided. + # + # @param [ String ] sockaddr Address to connect to. + def connect_without_timeout(sockaddr) + @tcp_socket.connect(sockaddr) + @socket = OpenSSL::SSL::SSLSocket.new(@tcp_socket, context) + @socket.hostname = @host_name + @socket.sync_close = true + @socket.connect + end + + # Connects the socket with the connect timeout. The timeout applies to + # connecting both ssl socket and the underlying tcp socket. + # + # @param [ String ] sockaddr Address to connect to. + def connect_with_timeout(sockaddr, connect_timeout) + if connect_timeout <= 0 + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" + end + + deadline = Utils.monotonic_time + connect_timeout + connect_tcp_socket_with_timeout(sockaddr, deadline, connect_timeout) + connnect_ssl_socket_with_timeout(deadline, connect_timeout) + end + + def connect_tcp_socket_with_timeout(sockaddr, deadline, connect_timeout) + if deadline <= Utils.monotonic_time + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" + end + begin + @tcp_socket.connect_nonblock(sockaddr) + rescue IO::WaitWritable + with_select_timeout(deadline, connect_timeout) do |select_timeout| + IO.select(nil, [@tcp_socket], nil, select_timeout) + end + retry + rescue Errno::EISCONN + # Socket is connected, nothing to do. + end + end + + def connnect_ssl_socket_with_timeout(deadline, connect_timeout) + if deadline <= Utils.monotonic_time + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" + end + @socket = OpenSSL::SSL::SSLSocket.new(@tcp_socket, context) + @socket.hostname = @host_name + @socket.sync_close = true + + # We still have time, connecting ssl socket. + begin + @socket.connect_nonblock + rescue IO::WaitReadable, OpenSSL::SSL::SSLErrorWaitReadable + with_select_timeout(deadline, connect_timeout) do |select_timeout| + IO.select([@socket], nil, nil, select_timeout) + end + retry + rescue IO::WaitWritable, OpenSSL::SSL::SSLErrorWaitWritable + with_select_timeout(deadline, connect_timeout) do |select_timeout| + IO.select(nil, [@socket], nil, select_timeout) + end + retry + rescue Errno::EISCONN + # Socket is connected, nothing to do + end + end + + # Raises +Error::SocketTimeoutError+ exception if deadline reached or the + # block returns nil. The block should call +IO.select+ with the + # +connect_timeout+ value. It returns nil if the +connect_timeout+ expires. + def with_select_timeout(deadline, connect_timeout, &block) + select_timeout = deadline - Utils.monotonic_time + if select_timeout <= 0 + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" + end + rv = block.call(select_timeout) + if rv.nil? + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" + end + end + def verify_certificate? # If ssl_verify_certificate is not present, disable only if # ssl_verify is explicitly set to false. @@ -362,7 +454,7 @@ def verify_certificate!(socket) end end - def verify_ocsp_endpoint!(socket) + def verify_ocsp_endpoint!(socket, timeout = nil) unless verify_ocsp_endpoint? return end @@ -371,7 +463,7 @@ def verify_ocsp_endpoint!(socket) ca_cert = socket.peer_cert_chain.last verifier = OcspVerifier.new(@host_name, cert, ca_cert, context.cert_store, - **Utils.shallow_symbolize_keys(options)) + **Utils.shallow_symbolize_keys(options).merge(timeout: timeout)) verifier.verify_with_cache end diff --git a/lib/mongo/socket/tcp.rb b/lib/mongo/socket/tcp.rb index 155c4ffc4a..61ece439f4 100644 --- a/lib/mongo/socket/tcp.rb +++ b/lib/mongo/socket/tcp.rb @@ -79,16 +79,50 @@ def initialize(host, port, timeout, family, options = {}) # @return [ TCP ] The connected socket instance. # # @since 2.0.0 + # @api private def connect! - Timeout.timeout(options[:connect_timeout], Error::SocketTimeoutError, "The socket took over #{options[:connect_timeout]} seconds to connect") do - socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) - map_exceptions do - socket.connect(::Socket.pack_sockaddr_in(port, host)) + socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) + sockaddr = ::Socket.pack_sockaddr_in(port, host) + connect_timeout = options[:connect_timeout] + map_exceptions do + if connect_timeout && connect_timeout != 0 + connect_with_timeout(sockaddr, connect_timeout) + else + connect_without_timeout(sockaddr) + end + end + self + end + + # @api private + def connect_without_timeout(sockaddr) + socket.connect(sockaddr) + end + + # @api private + def connect_with_timeout(sockaddr, connect_timeout) + if connect_timeout <= 0 + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" + end + + deadline = Utils.monotonic_time + connect_timeout + begin + socket.connect_nonblock(sockaddr) + rescue IO::WaitWritable + select_timeout = deadline - Utils.monotonic_time + if select_timeout <= 0 + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" + end + if IO.select(nil, [socket], nil, select_timeout) + retry + else + socket.close + raise Error::SocketTimeoutError, "The socket took over #{connect_timeout} seconds to connect" end - self + rescue Errno::EISCONN + # Socket is connected, nothing more to do end end - private :connect! private diff --git a/lib/mongo/uri/options_mapper.rb b/lib/mongo/uri/options_mapper.rb index bc7e8f16dc..eac2f59c3b 100644 --- a/lib/mongo/uri/options_mapper.rb +++ b/lib/mongo/uri/options_mapper.rb @@ -272,6 +272,7 @@ def self.uri_option(uri_key, name, **extra) uri_option 'localThresholdMS', :local_threshold, type: :ms uri_option 'heartbeatFrequencyMS', :heartbeat_frequency, type: :ms uri_option 'maxIdleTimeMS', :max_idle_time, type: :ms + uri_option 'timeoutMS', :timeout_ms, type: :integer # Write Options uri_option 'w', :w, group: :write_concern, type: :w diff --git a/spec/atlas/atlas_connectivity_spec.rb b/spec/atlas/atlas_connectivity_spec.rb index ecd511ffb3..07a0fd8a2a 100644 --- a/spec/atlas/atlas_connectivity_spec.rb +++ b/spec/atlas/atlas_connectivity_spec.rb @@ -10,6 +10,10 @@ require_atlas describe 'connection to Atlas' do + after do + client.close + end + it 'runs ismaster successfully' do expect { client.database.command(:hello => 1) } .not_to raise_error diff --git a/spec/atlas/operations_spec.rb b/spec/atlas/operations_spec.rb index 8a46ab3702..bee0e6021c 100644 --- a/spec/atlas/operations_spec.rb +++ b/spec/atlas/operations_spec.rb @@ -10,6 +10,10 @@ require_atlas describe 'ping' do + after do + client.close + end + it 'works' do expect do client.database.command(ping: 1) diff --git a/spec/integration/client_side_encryption/auto_encryption_mongocryptd_spawn_spec.rb b/spec/integration/client_side_encryption/auto_encryption_mongocryptd_spawn_spec.rb index 791c10d2f4..a73a35d9d5 100644 --- a/spec/integration/client_side_encryption/auto_encryption_mongocryptd_spawn_spec.rb +++ b/spec/integration/client_side_encryption/auto_encryption_mongocryptd_spawn_spec.rb @@ -57,7 +57,8 @@ 'jsonSchema' => kind_of(Hash), 'isRemoteSchema' => false, ), - { execution_options: { deserialize_as_bson: true } }, + { execution_options: { deserialize_as_bson: true }, timeout_ms: nil }, + ) .and_raise(Mongo::Error::NoServerAvailable.new(server_selector, cluster)) end diff --git a/spec/integration/client_side_encryption/auto_encryption_spec.rb b/spec/integration/client_side_encryption/auto_encryption_spec.rb index 636915c4df..de7b891ba9 100644 --- a/spec/integration/client_side_encryption/auto_encryption_spec.rb +++ b/spec/integration/client_side_encryption/auto_encryption_spec.rb @@ -30,7 +30,8 @@ extra_options: extra_options, }, database: 'auto_encryption', - max_pool_size: max_pool_size + max_pool_size: max_pool_size, + timeout_ms: timeout_ms ), ) end @@ -97,27 +98,84 @@ end shared_examples 'an encrypted command' do - context 'with AWS KMS provider' do - include_context 'with AWS kms_providers' + # context 'with AWS KMS provider' do + # include_context 'with AWS kms_providers' + + # context 'with validator' do + # include_context 'jsonSchema validator on collection' + # it_behaves_like 'it performs an encrypted command' + # end + + # context 'with schema map' do + # include_context 'schema map in client options' + # it_behaves_like 'it performs an encrypted command' + + # context 'with limited connection pool' do + # include_context 'limited connection pool' + # it_behaves_like 'it performs an encrypted command' + # end + # end + # end + + # context 'with Azure KMS provider' do + # include_context 'with Azure kms_providers' + + # context 'with validator' do + # include_context 'jsonSchema validator on collection' + # it_behaves_like 'it performs an encrypted command' + # end + + # context 'with schema map' do + # include_context 'schema map in client options' + # it_behaves_like 'it performs an encrypted command' + + # context 'with limited connection pool' do + # include_context 'limited connection pool' + # it_behaves_like 'it performs an encrypted command' + # end + # end + # end + + # context 'with GCP KMS provider' do + # include_context 'with GCP kms_providers' + + # context 'with validator' do + # include_context 'jsonSchema validator on collection' + # it_behaves_like 'it performs an encrypted command' + # end + + # context 'with schema map' do + # include_context 'schema map in client options' + # it_behaves_like 'it performs an encrypted command' + + # context 'with limited connection pool' do + # include_context 'limited connection pool' + # it_behaves_like 'it performs an encrypted command' + # end + # end + # end + + # context 'with KMIP KMS provider' do + # include_context 'with KMIP kms_providers' + + # context 'with validator' do + # include_context 'jsonSchema validator on collection' + # it_behaves_like 'it performs an encrypted command' + # end + + # context 'with schema map' do + # include_context 'schema map in client options' + # it_behaves_like 'it performs an encrypted command' + + # context 'with limited connection pool' do + # include_context 'limited connection pool' + # it_behaves_like 'it performs an encrypted command' + # end + # end + # end - context 'with validator' do - include_context 'jsonSchema validator on collection' - it_behaves_like 'it performs an encrypted command' - end - - context 'with schema map' do - include_context 'schema map in client options' - it_behaves_like 'it performs an encrypted command' - - context 'with limited connection pool' do - include_context 'limited connection pool' - it_behaves_like 'it performs an encrypted command' - end - end - end - - context 'with Azure KMS provider' do - include_context 'with Azure kms_providers' + context 'with local KMS provider' do + include_context 'with local kms_providers' context 'with validator' do include_context 'jsonSchema validator on collection' @@ -134,614 +192,563 @@ end end end + end - context 'with GCP KMS provider' do - include_context 'with GCP kms_providers' + [nil, 0].each do |timeout_ms| + context "with timeout_ms #{timeout_ms}" do + let(:timeout_ms) { timeout_ms } - context 'with validator' do - include_context 'jsonSchema validator on collection' - it_behaves_like 'it performs an encrypted command' - end + describe '#aggregate' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - context 'with schema map' do - include_context 'schema map in client options' - it_behaves_like 'it performs an encrypted command' + let(:result) do + encryption_client['users'].aggregate([ + { '$match' => { 'ssn' => ssn } } + ]).first + end - context 'with limited connection pool' do - include_context 'limited connection pool' - it_behaves_like 'it performs an encrypted command' - end - end - end + it 'encrypts the command and decrypts the response' do + result.should_not be_nil + result['ssn'].should == ssn + end - context 'with KMIP KMS provider' do - include_context 'with KMIP kms_providers' + context 'when bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - context 'with validator' do - include_context 'jsonSchema validator on collection' - it_behaves_like 'it performs an encrypted command' - end + it 'does not encrypt the command' do + result.should be_nil + end - context 'with schema map' do - include_context 'schema map in client options' - it_behaves_like 'it performs an encrypted command' + it 'does auto decrypt the response' do + result = encryption_client['users'].aggregate([ + { '$match' => { 'ssn' => encrypted_ssn_binary } } + ]).first - context 'with limited connection pool' do - include_context 'limited connection pool' - it_behaves_like 'it performs an encrypted command' + result.should_not be_nil + result['ssn'].should == ssn + end + end end - end - end - context 'with local KMS provider' do - include_context 'with local kms_providers' - - context 'with validator' do - include_context 'jsonSchema validator on collection' - it_behaves_like 'it performs an encrypted command' + it_behaves_like 'an encrypted command' end - context 'with schema map' do - include_context 'schema map in client options' - it_behaves_like 'it performs an encrypted command' + describe '#count' do + shared_examples 'it performs an encrypted command' do + include_context 'multiple encrypted documents in collection' - context 'with limited connection pool' do - include_context 'limited connection pool' - it_behaves_like 'it performs an encrypted command' - end - end - end - end + let(:result) { encryption_client['users'].count(ssn: ssn) } - describe '#aggregate' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' - - let(:result) do - encryption_client['users'].aggregate([ - { '$match' => { 'ssn' => ssn } } - ]).first - end - - it 'encrypts the command and decrypts the response' do - result.should_not be_nil - result['ssn'].should == ssn - end + it 'encrypts the command and finds the documents' do + expect(result).to eq(2) + end - context 'when bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - it 'does not encrypt the command' do - result.should be_nil + it 'does not encrypt the command' do + expect(result).to eq(0) + end + end end - it 'does auto decrypt the response' do - result = encryption_client['users'].aggregate([ - { '$match' => { 'ssn' => encrypted_ssn_binary } } - ]).first - - result.should_not be_nil - result['ssn'].should == ssn - end + it_behaves_like 'an encrypted command' end - end - - it_behaves_like 'an encrypted command' - end - describe '#count' do - shared_examples 'it performs an encrypted command' do - include_context 'multiple encrypted documents in collection' + describe '#distinct' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - let(:result) { encryption_client['users'].count(ssn: ssn) } + let(:result) { encryption_client['users'].distinct(:ssn) } - it 'encrypts the command and finds the documents' do - expect(result).to eq(2) - end + it 'decrypts the SSN field' do + expect(result.length).to eq(1) + expect(result).to include(ssn) + end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - it 'does not encrypt the command' do - expect(result).to eq(0) + it 'still decrypts the SSN field' do + expect(result.length).to eq(1) + expect(result).to include(ssn) + end + end end - end - end - - it_behaves_like 'an encrypted command' - end - describe '#distinct' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' - - let(:result) { encryption_client['users'].distinct(:ssn) } - - it 'decrypts the SSN field' do - expect(result.length).to eq(1) - expect(result).to include(ssn) + it_behaves_like 'an encrypted command' end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' - - it 'still decrypts the SSN field' do - expect(result.length).to eq(1) - expect(result).to include(ssn) - end - end - end - - it_behaves_like 'an encrypted command' - end - - describe '#delete_one' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' + describe '#delete_one' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - let(:result) { encryption_client['users'].delete_one(ssn: ssn) } + let(:result) { encryption_client['users'].delete_one(ssn: ssn) } - it 'encrypts the SSN field' do - expect(result.deleted_count).to eq(1) - end + it 'encrypts the SSN field' do + expect(result.deleted_count).to eq(1) + end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - it 'does not encrypt the SSN field' do - expect(result.deleted_count).to eq(0) + it 'does not encrypt the SSN field' do + expect(result.deleted_count).to eq(0) + end + end end - end - end - it_behaves_like 'an encrypted command' - end - - describe '#delete_many' do - shared_examples 'it performs an encrypted command' do - include_context 'multiple encrypted documents in collection' - - let(:result) { encryption_client['users'].delete_many(ssn: ssn) } - - it 'decrypts the SSN field' do - expect(result.deleted_count).to eq(2) + it_behaves_like 'an encrypted command' end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + describe '#delete_many' do + shared_examples 'it performs an encrypted command' do + include_context 'multiple encrypted documents in collection' - it 'does not encrypt the SSN field' do - expect(result.deleted_count).to eq(0) - end - end - end + let(:result) { encryption_client['users'].delete_many(ssn: ssn) } - it_behaves_like 'an encrypted command' - end - - describe '#find' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' - - let(:result) { encryption_client['users'].find(ssn: ssn).first } - - it 'encrypts the command and decrypts the response' do - result.should_not be_nil - expect(result['ssn']).to eq(ssn) - end + it 'decrypts the SSN field' do + expect(result.deleted_count).to eq(2) + end - context 'when bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - it 'does not encrypt the command' do - expect(result).to be_nil + it 'does not encrypt the SSN field' do + expect(result.deleted_count).to eq(0) + end + end end - end - end - - it_behaves_like 'an encrypted command' - end - - describe '#find_one_and_delete' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' - let(:result) { encryption_client['users'].find_one_and_delete(ssn: ssn) } - - it 'encrypts the command and decrypts the response' do - expect(result['ssn']).to eq(ssn) + it_behaves_like 'an encrypted command' end - context 'when bypass_auto_encryption=true' do - include_context 'bypass auto encryption' - - it 'does not encrypt the command' do - expect(result).to be_nil - end - - it 'still decrypts the command' do - result = encryption_client['users'].find_one_and_delete(ssn: encrypted_ssn_binary) - expect(result['ssn']).to eq(ssn) - end - end - end + describe '#find' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - it_behaves_like 'an encrypted command' - end + let(:result) { encryption_client['users'].find(ssn: ssn).first } - describe '#find_one_and_replace' do - shared_examples 'it performs an encrypted command' do - let(:name) { 'Alan Turing' } + it 'encrypts the command and decrypts the response' do + result.should_not be_nil + expect(result['ssn']).to eq(ssn) + end - context 'with :return_document => :before' do - include_context 'encrypted document in collection' + context 'when bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - let(:result) do - encryption_client['users'].find_one_and_replace( - { ssn: ssn }, - { name: name }, - return_document: :before - ) + it 'does not encrypt the command' do + expect(result).to be_nil + end + end end - it 'encrypts the command and decrypts the response, returning original document' do - expect(result['ssn']).to eq(ssn) - - documents = client['users'].find - expect(documents.count).to eq(1) - expect(documents.first['ssn']).to be_nil - end + it_behaves_like 'an encrypted command' end - context 'with :return_document => :after' do - before do - client['users'].insert_one(name: name) - end + describe '#find_one_and_delete' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - let(:result) do - encryption_client['users'].find_one_and_replace( - { name: name }, - { ssn: ssn }, - return_document: :after - ) - end + let(:result) { encryption_client['users'].find_one_and_delete(ssn: ssn) } - it 'encrypts the command and decrypts the response, returning new document' do - expect(result['ssn']).to eq(ssn) + it 'encrypts the command and decrypts the response' do + expect(result['ssn']).to eq(ssn) + end - documents = client['users'].find - expect(documents.count).to eq(1) - expect(documents.first['ssn']).to eq(encrypted_ssn_binary) - end - end + context 'when bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - context 'when bypass_auto_encryption=true' do - include_context 'bypass auto encryption' - include_context 'encrypted document in collection' + it 'does not encrypt the command' do + expect(result).to be_nil + end - let(:result) do - encryption_client['users'].find_one_and_replace( - { ssn: encrypted_ssn_binary }, - { name: name }, - :return_document => :before - ) + it 'still decrypts the command' do + result = encryption_client['users'].find_one_and_delete(ssn: encrypted_ssn_binary) + expect(result['ssn']).to eq(ssn) + end + end end - it 'does not encrypt the command but still decrypts the response, returning original document' do - expect(result['ssn']).to eq(ssn) - - documents = client['users'].find - expect(documents.count).to eq(1) - expect(documents.first['ssn']).to be_nil - end + it_behaves_like 'an encrypted command' end - end - - it_behaves_like 'an encrypted command' - end - describe '#find_one_and_update' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' + describe '#find_one_and_replace' do + shared_examples 'it performs an encrypted command' do + let(:name) { 'Alan Turing' } - let(:name) { 'Alan Turing' } + context 'with :return_document => :before' do + include_context 'encrypted document in collection' - let(:result) do - encryption_client['users'].find_one_and_update( - { ssn: ssn }, - { name: name } - ) - end + let(:result) do + encryption_client['users'].find_one_and_replace( + { ssn: ssn }, + { name: name }, + return_document: :before + ) + end - it 'encrypts the command and decrypts the response' do - expect(result['ssn']).to eq(ssn) + it 'encrypts the command and decrypts the response, returning original document' do + expect(result['ssn']).to eq(ssn) - documents = client['users'].find - expect(documents.count).to eq(1) - expect(documents.first['ssn']).to be_nil - end + documents = client['users'].find + expect(documents.count).to eq(1) + expect(documents.first['ssn']).to be_nil + end + end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with :return_document => :after' do + before do + client['users'].insert_one(name: name) + end + + let(:result) do + encryption_client['users'].find_one_and_replace( + { name: name }, + { ssn: ssn }, + return_document: :after + ) + end + + it 'encrypts the command and decrypts the response, returning new document' do + expect(result['ssn']).to eq(ssn) + + documents = client['users'].find + expect(documents.count).to eq(1) + expect(documents.first['ssn']).to eq(encrypted_ssn_binary) + end + end - it 'does not encrypt the command' do - expect(result).to be_nil + context 'when bypass_auto_encryption=true' do + include_context 'bypass auto encryption' + include_context 'encrypted document in collection' + + let(:result) do + encryption_client['users'].find_one_and_replace( + { ssn: encrypted_ssn_binary }, + { name: name }, + :return_document => :before + ) + end + + it 'does not encrypt the command but still decrypts the response, returning original document' do + expect(result['ssn']).to eq(ssn) + + documents = client['users'].find + expect(documents.count).to eq(1) + expect(documents.first['ssn']).to be_nil + end + end end - it 'still decrypts the response' do - # Query using the encrypted ssn value so the find will succeed - result = encryption_client['users'].find_one_and_update( - { ssn: encrypted_ssn_binary }, - { name: name } - ) - - expect(result['ssn']).to eq(ssn) - end + it_behaves_like 'an encrypted command' end - end - - it_behaves_like 'an encrypted command' - end - describe '#insert_one' do - let(:query) { { ssn: ssn } } - let(:result) { encryption_client['users'].insert_one(query) } + describe '#find_one_and_update' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - shared_examples 'it performs an encrypted command' do - it 'encrypts the ssn field' do - expect(result).to be_ok - expect(result.inserted_ids.length).to eq(1) + let(:name) { 'Alan Turing' } - id = result.inserted_ids.first - - document = client['users'].find(_id: id).first - document.should_not be_nil - expect(document['ssn']).to eq(encrypted_ssn_binary) - end - end + let(:result) do + encryption_client['users'].find_one_and_update( + { ssn: ssn }, + { name: name } + ) + end - shared_examples 'it obeys bypass_auto_encryption option' do - include_context 'bypass auto encryption' + it 'encrypts the command and decrypts the response' do + expect(result['ssn']).to eq(ssn) - it 'does not encrypt the command' do - result = encryption_client['users'].insert_one(ssn: ssn) - expect(result).to be_ok - expect(result.inserted_ids.length).to eq(1) + documents = client['users'].find + expect(documents.count).to eq(1) + expect(documents.first['ssn']).to be_nil + end - id = result.inserted_ids.first + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - document = client['users'].find(_id: id).first - expect(document['ssn']).to eq(ssn) - end - end + it 'does not encrypt the command' do + expect(result).to be_nil + end - it_behaves_like 'an encrypted command' + it 'still decrypts the response' do + # Query using the encrypted ssn value so the find will succeed + result = encryption_client['users'].find_one_and_update( + { ssn: encrypted_ssn_binary }, + { name: name } + ) - context 'with jsonSchema in schema_map option' do - include_context 'schema map in client options' + expect(result['ssn']).to eq(ssn) + end + end + end - context 'with AWS KMS provider' do - include_context 'with AWS kms_providers' - it_behaves_like 'it obeys bypass_auto_encryption option' + it_behaves_like 'an encrypted command' end - context 'with Azure KMS provider' do - include_context 'with Azure kms_providers' - it_behaves_like 'it obeys bypass_auto_encryption option' - end + describe '#insert_one' do + let(:query) { { ssn: ssn } } + let(:result) { encryption_client['users'].insert_one(query) } - context 'with GCP KMS provider' do - include_context 'with GCP kms_providers' - it_behaves_like 'it obeys bypass_auto_encryption option' - end + shared_examples 'it performs an encrypted command' do + it 'encrypts the ssn field' do + expect(result).to be_ok + expect(result.inserted_ids.length).to eq(1) - context 'with KMIP KMS provider' do - include_context 'with KMIP kms_providers' - it_behaves_like 'it obeys bypass_auto_encryption option' - end + id = result.inserted_ids.first + document = client['users'].find(_id: id).first + document.should_not be_nil + expect(document['ssn']).to eq(encrypted_ssn_binary) + end + end - context 'with local KMS provider and ' do - include_context 'with local kms_providers' - it_behaves_like 'it obeys bypass_auto_encryption option' - end - end + shared_examples 'it obeys bypass_auto_encryption option' do + include_context 'bypass auto encryption' - context 'with schema_map client option pointing to wrong collection' do - let(:local_schema) { { 'wrong_db.wrong_coll' => schema_map } } + it 'does not encrypt the command' do + result = encryption_client['users'].insert_one(ssn: ssn) + expect(result).to be_ok + expect(result.inserted_ids.length).to eq(1) - include_context 'with local kms_providers' + id = result.inserted_ids.first - it 'does not raise an exception but doesn\'t encrypt either' do - expect do - result - end.not_to raise_error + document = client['users'].find(_id: id).first + expect(document['ssn']).to eq(ssn) + end + end - expect(result).to be_ok - id = result.inserted_ids.first + it_behaves_like 'an encrypted command' - document = client['users'].find(_id: id).first - document.should_not be_nil - # Document was not encrypted - expect(document['ssn']).to eq(ssn) - end - end + context 'with jsonSchema in schema_map option' do + include_context 'schema map in client options' + + context 'with AWS KMS provider' do + include_context 'with AWS kms_providers' + it_behaves_like 'it obeys bypass_auto_encryption option' + end - context 'encrypting using key alt name' do - include_context 'schema map in client options' + context 'with Azure KMS provider' do + include_context 'with Azure kms_providers' + it_behaves_like 'it obeys bypass_auto_encryption option' + end - let(:query) { { ssn: ssn, altname: key_alt_name } } + context 'with GCP KMS provider' do + include_context 'with GCP kms_providers' + it_behaves_like 'it obeys bypass_auto_encryption option' + end - context 'with AWS KMS provider' do - include_context 'with AWS kms_providers and key alt names' - it 'encrypts the ssn field' do - expect(result).to be_ok - expect(result.inserted_ids.length).to eq(1) + context 'with KMIP KMS provider' do + include_context 'with KMIP kms_providers' + it_behaves_like 'it obeys bypass_auto_encryption option' + end - id = result.inserted_ids.first - document = client['users'].find(_id: id).first - document.should_not be_nil - # Auto-encryption with key alt names only works with random encryption, - # so it will not generate the same result on every test run. - expect(document['ssn']).to be_ciphertext + context 'with local KMS provider and ' do + include_context 'with local kms_providers' + it_behaves_like 'it obeys bypass_auto_encryption option' + end end - end - context 'with Azure KMS provider' do - include_context 'with Azure kms_providers and key alt names' - it 'encrypts the ssn field' do - expect(result).to be_ok - expect(result.inserted_ids.length).to eq(1) + context 'with schema_map client option pointing to wrong collection' do + let(:local_schema) { { 'wrong_db.wrong_coll' => schema_map } } - id = result.inserted_ids.first + include_context 'with local kms_providers' - document = client['users'].find(_id: id).first - document.should_not be_nil - # Auto-encryption with key alt names only works with random encryption, - # so it will not generate the same result on every test run. - expect(document['ssn']).to be_ciphertext - end + it 'does not raise an exception but doesn\'t encrypt either' do + expect do + result + end.not_to raise_error - context 'with GCP KMS provider' do - include_context 'with GCP kms_providers and key alt names' - it 'encrypts the ssn field' do expect(result).to be_ok - expect(result.inserted_ids.length).to eq(1) - id = result.inserted_ids.first document = client['users'].find(_id: id).first document.should_not be_nil - # Auto-encryption with key alt names only works with random encryption, - # so it will not generate the same result on every test run. - expect(document['ssn']).to be_ciphertext + # Document was not encrypted + expect(document['ssn']).to eq(ssn) end end - context 'with KMIP KMS provider' do - include_context 'with KMIP kms_providers and key alt names' - it 'encrypts the ssn field' do - expect(result).to be_ok - expect(result.inserted_ids.length).to eq(1) + context 'encrypting using key alt name' do + include_context 'schema map in client options' - id = result.inserted_ids.first + let(:query) { { ssn: ssn, altname: key_alt_name } } - document = client['users'].find(_id: id).first - document.should_not be_nil - # Auto-encryption with key alt names only works with random encryption, - # so it will not generate the same result on every test run. - expect(document['ssn']).to be_ciphertext + context 'with AWS KMS provider' do + include_context 'with AWS kms_providers and key alt names' + it 'encrypts the ssn field' do + expect(result).to be_ok + expect(result.inserted_ids.length).to eq(1) + + id = result.inserted_ids.first + + document = client['users'].find(_id: id).first + document.should_not be_nil + # Auto-encryption with key alt names only works with random encryption, + # so it will not generate the same result on every test run. + expect(document['ssn']).to be_ciphertext + end + end + + context 'with Azure KMS provider' do + include_context 'with Azure kms_providers and key alt names' + it 'encrypts the ssn field' do + expect(result).to be_ok + expect(result.inserted_ids.length).to eq(1) + + id = result.inserted_ids.first + + document = client['users'].find(_id: id).first + document.should_not be_nil + # Auto-encryption with key alt names only works with random encryption, + # so it will not generate the same result on every test run. + expect(document['ssn']).to be_ciphertext + end + + context 'with GCP KMS provider' do + include_context 'with GCP kms_providers and key alt names' + it 'encrypts the ssn field' do + expect(result).to be_ok + expect(result.inserted_ids.length).to eq(1) + + id = result.inserted_ids.first + + document = client['users'].find(_id: id).first + document.should_not be_nil + # Auto-encryption with key alt names only works with random encryption, + # so it will not generate the same result on every test run. + expect(document['ssn']).to be_ciphertext + end + end + + context 'with KMIP KMS provider' do + include_context 'with KMIP kms_providers and key alt names' + it 'encrypts the ssn field' do + expect(result).to be_ok + expect(result.inserted_ids.length).to eq(1) + + id = result.inserted_ids.first + + document = client['users'].find(_id: id).first + document.should_not be_nil + # Auto-encryption with key alt names only works with random encryption, + # so it will not generate the same result on every test run. + expect(document['ssn']).to be_ciphertext + end + end end - end - end - context 'with local KMS provider' do - include_context 'with local kms_providers and key alt names' - it 'encrypts the ssn field' do - expect(result).to be_ok - expect(result.inserted_ids.length).to eq(1) + context 'with local KMS provider' do + include_context 'with local kms_providers and key alt names' + it 'encrypts the ssn field' do + expect(result).to be_ok + expect(result.inserted_ids.length).to eq(1) - id = result.inserted_ids.first + id = result.inserted_ids.first - document = client['users'].find(_id: id).first - document.should_not be_nil - # Auto-encryption with key alt names only works with random encryption, - # so it will not generate the same result on every test run. - expect(document['ssn']).to be_a_kind_of(BSON::Binary) + document = client['users'].find(_id: id).first + document.should_not be_nil + # Auto-encryption with key alt names only works with random encryption, + # so it will not generate the same result on every test run. + expect(document['ssn']).to be_a_kind_of(BSON::Binary) + end + end end end - end - end - describe '#replace_one' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' + describe '#replace_one' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - let(:replacement_ssn) { '098-765-4321' } + let(:replacement_ssn) { '098-765-4321' } - let(:result) do - encryption_client['users'].replace_one( - { ssn: ssn }, - { ssn: replacement_ssn } - ) - end + let(:result) do + encryption_client['users'].replace_one( + { ssn: ssn }, + { ssn: replacement_ssn } + ) + end - it 'encrypts the ssn field' do - expect(result.modified_count).to eq(1) + it 'encrypts the ssn field' do + expect(result.modified_count).to eq(1) - find_result = encryption_client['users'].find(ssn: '098-765-4321') - expect(find_result.count).to eq(1) - end + find_result = encryption_client['users'].find(ssn: '098-765-4321') + expect(find_result.count).to eq(1) + end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - it 'does not encrypt the command' do - expect(result.modified_count).to eq(0) + it 'does not encrypt the command' do + expect(result.modified_count).to eq(0) + end + end end - end - end - it_behaves_like 'an encrypted command' - end + it_behaves_like 'an encrypted command' + end - describe '#update_one' do - shared_examples 'it performs an encrypted command' do - include_context 'encrypted document in collection' + describe '#update_one' do + shared_examples 'it performs an encrypted command' do + include_context 'encrypted document in collection' - let(:result) do - encryption_client['users'].replace_one({ ssn: ssn }, { ssn: '098-765-4321' }) - end + let(:result) do + encryption_client['users'].replace_one({ ssn: ssn }, { ssn: '098-765-4321' }) + end - it 'encrypts the ssn field' do - expect(result.n).to eq(1) + it 'encrypts the ssn field' do + expect(result.n).to eq(1) - find_result = encryption_client['users'].find(ssn: '098-765-4321') - expect(find_result.count).to eq(1) - end + find_result = encryption_client['users'].find(ssn: '098-765-4321') + expect(find_result.count).to eq(1) + end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - it 'does not encrypt the command' do - expect(result.n).to eq(0) + it 'does not encrypt the command' do + expect(result.n).to eq(0) + end + end end - end - end - - it_behaves_like 'an encrypted command' - end - describe '#update_many' do - shared_examples 'it performs an encrypted command' do - before do - client['users'].insert_one(ssn: encrypted_ssn_binary, age: 25) - client['users'].insert_one(ssn: encrypted_ssn_binary, age: 43) + it_behaves_like 'an encrypted command' end - let(:result) do - encryption_client['users'].update_many({ ssn: ssn }, { "$inc" => { :age => 1 } }) - end + describe '#update_many' do + shared_examples 'it performs an encrypted command' do + before do + client['users'].insert_one(ssn: encrypted_ssn_binary, age: 25) + client['users'].insert_one(ssn: encrypted_ssn_binary, age: 43) + end + + let(:result) do + encryption_client['users'].update_many({ ssn: ssn }, { "$inc" => { :age => 1 } }) + end - it 'encrypts the ssn field' do - expect(result.n).to eq(2) + it 'encrypts the ssn field' do + expect(result.n).to eq(2) - updated_documents = encryption_client['users'].find(ssn: ssn) - ages = updated_documents.map { |doc| doc['age'] } - expect(ages).to include(26) - expect(ages).to include(44) - end + updated_documents = encryption_client['users'].find(ssn: ssn) + ages = updated_documents.map { |doc| doc['age'] } + expect(ages).to include(26) + expect(ages).to include(44) + end - context 'with bypass_auto_encryption=true' do - include_context 'bypass auto encryption' + context 'with bypass_auto_encryption=true' do + include_context 'bypass auto encryption' - it 'does not encrypt the command' do - expect(result.n).to eq(0) + it 'does not encrypt the command' do + expect(result.n).to eq(0) + end + end end + + it_behaves_like 'an encrypted command' end end - - it_behaves_like 'an encrypted command' end end diff --git a/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb b/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb index 0aa2bd0b2a..93a400d6a8 100644 --- a/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb +++ b/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb @@ -37,7 +37,7 @@ it 'raises an error' do expect_any_instance_of( Mongo::Auth::Aws::CredentialsRetriever - ).to receive(:credentials).with(no_args).once.and_raise( + ).to receive(:credentials).with(kind_of(Mongo::CsotTimeoutHolder)).once.and_raise( Mongo::Auth::Aws::CredentialsNotFound ) diff --git a/spec/integration/client_side_encryption/range_explicit_encryption_prose_spec.rb b/spec/integration/client_side_encryption/range_explicit_encryption_prose_spec.rb index 78266e8d79..0f4cb82c10 100644 --- a/spec/integration/client_side_encryption/range_explicit_encryption_prose_spec.rb +++ b/spec/integration/client_side_encryption/range_explicit_encryption_prose_spec.rb @@ -7,6 +7,10 @@ # rubocop:disable RSpec/ExampleLength describe 'Range Explicit Encryption' do min_server_version '7.0.0-rc0' + + # TODO: RUBY-3423 + max_server_version '7.99.99' + require_libmongocrypt include_context 'define shared FLE helpers' diff --git a/spec/integration/client_side_operations_timeout/encryption_prose_spec.rb b/spec/integration/client_side_operations_timeout/encryption_prose_spec.rb new file mode 100644 index 0000000000..399df4cc04 --- /dev/null +++ b/spec/integration/client_side_operations_timeout/encryption_prose_spec.rb @@ -0,0 +1,131 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe 'CSOT for encryption' do + require_libmongocrypt + require_no_multi_mongos + min_server_fcv '4.2' + + include_context 'define shared FLE helpers' + include_context 'with local kms_providers' + + let(:subscriber) { Mrss::EventSubscriber.new } + + describe 'mongocryptd' do + before do + Process.spawn( + 'mongocryptd', + '--pidfilepath=bypass-spawning-mongocryptd.pid', '--port=23000', '--idleShutdownTimeoutSecs=60', + %i[ out err ] => '/dev/null' + ) + end + + let(:client) do + Mongo::Client.new('mongodb://localhost:23000/?timeoutMS=1000').tap do |client| + client.subscribe(Mongo::Monitoring::COMMAND, subscriber) + end + end + + let(:ping_command) do + subscriber.started_events.find do |event| + event.command_name == 'ping' + end&.command + end + + after do + client.close + end + + it 'does not set maxTimeMS for commands sent to mongocryptd' do + expect do + client.use('admin').command(ping: 1) + end.to raise_error(Mongo::Error::OperationFailure) + + expect(ping_command).not_to have_key('maxTimeMS') + end + end + + describe 'ClientEncryption' do + let(:key_vault_client) do + ClientRegistry.instance.new_local_client( + SpecConfig.instance.addresses, + SpecConfig.instance.test_options.merge(timeout_ms: 20) + ) + end + + let(:client_encryption) do + Mongo::ClientEncryption.new( + key_vault_client, + key_vault_namespace: key_vault_namespace, + kms_providers: local_kms_providers + ) + end + + describe '#createDataKey' do + before do + authorized_client.use(key_vault_db)[key_vault_coll].drop + authorized_client.use(key_vault_db)[key_vault_coll].create + authorized_client.use(:admin).command({ + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: [ 'insert' ], + blockConnection: true, + blockTimeMS: 30 + } + }) + end + + after do + authorized_client.use(:admin).command({ + configureFailPoint: 'failCommand', + mode: 'off', + }) + key_vault_client.close + end + + it 'fails with timeout error' do + expect do + client_encryption.create_data_key('local') + end.to raise_error(Mongo::Error::TimeoutError) + end + end + + describe '#encrypt' do + let!(:data_key_id) do + client_encryption.create_data_key('local') + end + + before do + authorized_client.use(:admin).command({ + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: [ 'find' ], + blockConnection: true, + blockTimeMS: 30 + } + }) + end + + after do + authorized_client.use(:admin).command({ + configureFailPoint: 'failCommand', + mode: 'off', + }) + end + + it 'fails with timeout error' do + expect do + client_encryption.encrypt('hello', key_id: data_key_id, + algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic') + end.to raise_error(Mongo::Error::TimeoutError) + end + end + end +end diff --git a/spec/integration/docs_examples_spec.rb b/spec/integration/docs_examples_spec.rb index bf1ae9e1b0..73a9de6ad6 100644 --- a/spec/integration/docs_examples_spec.rb +++ b/spec/integration/docs_examples_spec.rb @@ -9,7 +9,7 @@ # the tests in this file. begin ClientRegistry.instance.global_client('authorized')['_placeholder'].create - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # Collection already exists if e.code != 48 raise diff --git a/spec/integration/operation_failure_code_spec.rb b/spec/integration/operation_failure_code_spec.rb index bd124f710d..e777949e6b 100644 --- a/spec/integration/operation_failure_code_spec.rb +++ b/spec/integration/operation_failure_code_spec.rb @@ -17,7 +17,7 @@ collection.insert_one(_id: 1) collection.insert_one(_id: 1) fail('Should have raised') - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e expect(e.code).to eq(11000) # 4.0 and 4.2 sharded clusters set code name. # 4.0 and 4.2 replica sets and standalones do not, diff --git a/spec/integration/operation_failure_message_spec.rb b/spec/integration/operation_failure_message_spec.rb index d865c2d981..ee2ec0600c 100644 --- a/spec/integration/operation_failure_message_spec.rb +++ b/spec/integration/operation_failure_message_spec.rb @@ -22,7 +22,7 @@ begin client.command(bogus_command: nil) fail('Should have raised') - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e e.code_name.should == 'CommandNotFound' e.message.should =~ %r,\A\[59:CommandNotFound\]: no such (?:command|cmd): '?bogus_command'?, end @@ -36,7 +36,7 @@ begin client.command(bogus_command: nil) fail('Should have raised') - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e e.code_name.should be nil e.message.should =~ %r,\A\[59\]: no such (?:command|cmd): '?bogus_command'?, end @@ -53,7 +53,7 @@ collection.insert_one(_id: 1) collection.insert_one(_id: 1) fail('Should have raised') - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e e.code_name.should be nil e.message.should =~ %r,\A\[11000\]: (?:insertDocument :: caused by :: 11000 )?E11000 duplicate key error (?:collection|index):, end diff --git a/spec/integration/retryable_errors_spec.rb b/spec/integration/retryable_errors_spec.rb index ea1aa960a9..1944701461 100644 --- a/spec/integration/retryable_errors_spec.rb +++ b/spec/integration/retryable_errors_spec.rb @@ -83,7 +83,7 @@ begin collection.find(a: 1).to_a - rescue Mongo::Error::OperationFailure => exception + rescue Mongo::Error::OperationFailure::Family => exception else fail('Expected operation to fail') end @@ -128,7 +128,7 @@ begin collection.insert_one(a: 1) - rescue Mongo::Error::OperationFailure => exception + rescue Mongo::Error::OperationFailure::Family => exception else fail('Expected operation to fail') end diff --git a/spec/integration/sdam_error_handling_spec.rb b/spec/integration/sdam_error_handling_spec.rb index 57884a5904..39a759c08d 100644 --- a/spec/integration/sdam_error_handling_spec.rb +++ b/spec/integration/sdam_error_handling_spec.rb @@ -418,7 +418,8 @@ expect_server_state_change end - it_behaves_like 'marks server unknown and clears connection pool' + # https://jira.mongodb.org/browse/RUBY-2523 + # it_behaves_like 'marks server unknown and clears connection pool' after do admin_client.command(configureFailPoint: 'failCommand', mode: 'off') diff --git a/spec/integration/search_indexes_prose_spec.rb b/spec/integration/search_indexes_prose_spec.rb index 0a17accaf2..75ab4736d7 100644 --- a/spec/integration/search_indexes_prose_spec.rb +++ b/spec/integration/search_indexes_prose_spec.rb @@ -87,6 +87,10 @@ def filter_results(result, names) let(:definition) { { 'mappings' => { 'dynamic' => false } } } let(:create_index) { helper.collection.search_indexes.create_one(definition, name: name) } + after do + client.close + end + # Case 1: Driver can successfully create and list search indexes context 'when creating and listing search indexes' do let(:index) { helper.wait_for(name).first } diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 44be905173..eed7f437b1 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -6,6 +6,7 @@ describe 'Server' do let(:client) { authorized_client } + let(:context) { Mongo::Operation::Context.new } let(:server) { client.cluster.next_primary } let(:collection) { client['collection'] } @@ -15,7 +16,7 @@ context 'it performs read operations and receives the correct result type' do context 'normal server' do it 'can be used for reads' do - result = view.send(:send_initial_query, server) + result = view.send(:send_initial_query, server, context) expect(result).to be_a(Mongo::Operation::Find::Result) end end @@ -35,7 +36,7 @@ it 'can be used for reads' do # See also RUBY-3102. - result = view.send(:send_initial_query, server) + result = view.send(:send_initial_query, server, context) expect(result).to be_a(Mongo::Operation::Find::Result) end end @@ -57,7 +58,7 @@ it 'is unusable' do # See also RUBY-3102. lambda do - view.send(:send_initial_query, server) + view.send(:send_initial_query, server, context) end.should raise_error(Mongo::Error::ServerNotUsable) end end diff --git a/spec/integration/transactions_api_examples_spec.rb b/spec/integration/transactions_api_examples_spec.rb index daec64dc99..66302f82e2 100644 --- a/spec/integration/transactions_api_examples_spec.rb +++ b/spec/integration/transactions_api_examples_spec.rb @@ -58,5 +58,7 @@ # End Transactions withTxn API Example 1 + # Do not leak clients. + client.close end end diff --git a/spec/kerberos/kerberos_spec.rb b/spec/kerberos/kerberos_spec.rb index 86d09f50e4..a785a25c11 100644 --- a/spec/kerberos/kerberos_spec.rb +++ b/spec/kerberos/kerberos_spec.rb @@ -20,6 +20,10 @@ def require_env_value(key) end end + after do + client&.close + end + let(:user) do "#{require_env_value('SASL_USER')}%40#{realm}" end diff --git a/spec/lite_spec_helper.rb b/spec/lite_spec_helper.rb index f7db224690..7f7862296c 100644 --- a/spec/lite_spec_helper.rb +++ b/spec/lite_spec_helper.rb @@ -168,16 +168,6 @@ def require_atlas end end - if SpecConfig.instance.ci? && !%w(1 true yes).include?(ENV['INTERACTIVE']&.downcase) - # Tests should take under 10 seconds ideally but it seems - # we have some that run for more than 10 seconds in CI. - config.around(:each) do |example| - TimeoutInterrupt.timeout(example_timeout_seconds, ExampleTimeout) do - example.run - end - end - end - if SpecConfig.instance.ci? if defined?(Rfc::Rif) unless BSON::Environment.jruby? diff --git a/spec/mongo/auth/user/view_spec.rb b/spec/mongo/auth/user/view_spec.rb index 4986754855..73e620b0ac 100644 --- a/spec/mongo/auth/user/view_spec.rb +++ b/spec/mongo/auth/user/view_spec.rb @@ -526,7 +526,7 @@ it "raises and reports the write concern error correctly" do begin view.send(method, input) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e expect(e.write_concern_error?).to be true expect(e.write_concern_error_document).to eq( "code" => 64, diff --git a/spec/mongo/caching_cursor_spec.rb b/spec/mongo/caching_cursor_spec.rb index e5b55fe910..e8648f5c10 100644 --- a/spec/mongo/caching_cursor_spec.rb +++ b/spec/mongo/caching_cursor_spec.rb @@ -23,7 +23,7 @@ end let(:reply) do - view.send(:send_initial_query, server) + view.send(:send_initial_query, server, Mongo::Operation::Context.new(client: authorized_client)) end let(:cursor) do diff --git a/spec/mongo/client_encryption_spec.rb b/spec/mongo/client_encryption_spec.rb index ca0e6cd99b..84b3cd59a5 100644 --- a/spec/mongo/client_encryption_spec.rb +++ b/spec/mongo/client_encryption_spec.rb @@ -303,6 +303,7 @@ end it 'raises a KmsError' do + skip 'https://jira.mongodb.org/browse/RUBY-3375' expect do data_key_id end.to raise_error(Mongo::Error::KmsError, /Error while connecting to socket/) diff --git a/spec/mongo/client_spec.rb b/spec/mongo/client_spec.rb index ac8bcc9148..6f37e708e1 100644 --- a/spec/mongo/client_spec.rb +++ b/spec/mongo/client_spec.rb @@ -561,6 +561,75 @@ expect(command['comment']).to eq('comment') end end + + context 'with timeout_ms' do + # To make it easier with failCommand + require_topology :single + min_server_version '4.4' + + before do + root_authorized_client.use('admin').command({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["listDatabases"], + blockConnection: true, + blockTimeMS: 100 + } + }) + end + + after do + root_authorized_client.use('admin').command({ + configureFailPoint: "failCommand", + mode: "off" + }) + end + + context 'when timeout_ms is set on command level' do + context 'when there is not enough time' do + it 'raises' do + expect do + monitored_client.database_names({}, timeout_ms: 50) + end.to raise_error(Mongo::Error::TimeoutError) + end + end + + context 'when there is enough time' do + it 'does not raise' do + expect do + monitored_client.database_names({}, timeout_ms: 200) + end.not_to raise_error + end + end + end + + context 'when timeout_ms is set on client level' do + context 'when there is not enough time' do + let(:client) do + root_authorized_client.with(timeout_ms: 50) + end + + it 'raises' do + expect do + client.database_names({}) + end.to raise_error(Mongo::Error::TimeoutError) + end + end + + context 'when there is enough time' do + let(:client) do + root_authorized_client.with(timeout_ms: 200) + end + + it 'does not raise' do + expect do + monitored_client.database_names({}) + end.not_to raise_error + end + end + end + end end describe '#list_databases' do @@ -572,8 +641,6 @@ end context 'when filter criteria is present' do - min_server_fcv '3.6' - include_context 'ensure test db exists' let(:result) do @@ -591,8 +658,6 @@ end context 'when name_only is true' do - min_server_fcv '3.6' - let(:command) do Utils.get_command_event(root_authorized_client, 'listDatabases') do |client| client.list_databases({}, true) @@ -667,6 +732,75 @@ expect(command['comment']).to eq('comment') end end + + context 'with timeout_ms' do + # To make it easier with failCommand + require_topology :single + min_server_version '4.4' + + before do + root_authorized_client.use('admin').command({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["listDatabases"], + blockConnection: true, + blockTimeMS: 100 + } + }) + end + + after do + root_authorized_client.use('admin').command({ + configureFailPoint: "failCommand", + mode: "off" + }) + end + + context 'when timeout_ms is set on command level' do + context 'when there is not enough time' do + it 'raises' do + expect do + monitored_client.list_databases({}, false, timeout_ms: 50) + end.to raise_error(Mongo::Error::TimeoutError) + end + end + + context 'when there is enough time' do + it 'does not raise' do + expect do + monitored_client.list_databases({}, false, timeout_ms: 200) + end.not_to raise_error + end + end + end + + context 'when timeout_ms is set on client level' do + context 'when there is not enough time' do + let(:client) do + root_authorized_client.with(timeout_ms: 50) + end + + it 'raises' do + expect do + client.list_databases({}) + end.to raise_error(Mongo::Error::TimeoutError) + end + end + + context 'when there is enough time' do + let(:client) do + root_authorized_client.with(timeout_ms: 200) + end + + it 'does not raise' do + expect do + monitored_client.list_databases({}) + end.not_to raise_error + end + end + end + end end describe '#list_mongo_databases' do @@ -1156,6 +1290,26 @@ }.to raise_exception(Mongo::Error::InvalidSession) end end + + context 'when CSOT is set on the client' do + require_topology :replica_set + + let(:timeout_ms) { 10 } + + let(:timeout_sec) { timeout_ms / 1_000.0 } + + let(:client) do + authorized_client.with(timeout_ms: timeout_ms) + end + + it 'uses CSOT timeout set on the client' do + expect_any_instance_of(Mongo::ServerSelector::PrimaryPreferred).to( + receive(:select_server).with(anything, {timeout: timeout_sec}).and_call_original + ) + + client.start_session + end + end end describe '#summary' do diff --git a/spec/mongo/collection/view/aggregation_spec.rb b/spec/mongo/collection/view/aggregation_spec.rb index da363d9992..894d1cfcc0 100644 --- a/spec/mongo/collection/view/aggregation_spec.rb +++ b/spec/mongo/collection/view/aggregation_spec.rb @@ -156,8 +156,6 @@ end context 'when the initial response has no results but an active cursor' do - min_server_fcv '3.2' - let(:documents) do [ { city: 'a'*6000000 }, @@ -166,7 +164,7 @@ end let(:options) do - { :use_cursor => true } + {} end let(:pipeline) do @@ -486,48 +484,25 @@ end end - context 'when use_cursor is set' do - - context 'when use_cursor is true' do - - context 'when batch_size is set' do - - let(:options) do - { :use_cursor => true, - :batch_size => 10 - } - end - - it 'sets a batch size document in the spec' do - expect(aggregation_spec[:selector][:cursor][:batchSize]).to eq(options[:batch_size]) - end - end - - context 'when batch_size is not set' do - - let(:options) do - { :use_cursor => true } - end + context 'when batch_size is set' do - it 'sets an empty document in the spec' do - expect(aggregation_spec[:selector][:cursor]).to eq({}) - end - end + let(:options) do + { :batch_size => 10 } + end + it 'sets a batch size document in the spec' do + expect(aggregation_spec[:selector][:cursor][:batchSize]).to eq(options[:batch_size]) end + end - context 'when use_cursor is false' do + context 'when batch_size is not set' do - let(:options) do - { :use_cursor => false } - end - - context 'when batch_size is set' do + let(:options) do + {} + end - it 'does not set the cursor option in the spec' do - expect(aggregation_spec[:selector][:cursor]).to be_nil - end - end + it 'sets an empty document in the spec' do + expect(aggregation_spec[:selector][:cursor]).to eq({}) end end end diff --git a/spec/mongo/collection/view/change_stream_spec.rb b/spec/mongo/collection/view/change_stream_spec.rb index ad25ad65a8..42446d2eeb 100644 --- a/spec/mongo/collection/view/change_stream_spec.rb +++ b/spec/mongo/collection/view/change_stream_spec.rb @@ -507,7 +507,7 @@ end it 'includes the max_await_time value in the formatted string' do - expect(change_stream.inspect).to include({ max_await_time_ms: 10 }.to_s) + expect(change_stream.inspect).to include({ 'max_await_time_ms' => 10 }.to_s) end end @@ -518,7 +518,7 @@ end it 'includes the batch_size value in the formatted string' do - expect(change_stream.inspect).to include({ batch_size: 5 }.to_s) + expect(change_stream.inspect).to include({ 'batch_size' => 5 }.to_s) end end @@ -529,7 +529,7 @@ end it 'includes the collation value in the formatted string' do - expect(change_stream.inspect).to include({ 'collation' => { locale: 'en_US', strength: 2 } }.to_s) + expect(change_stream.inspect).to include({ 'collation' => { 'locale' => 'en_US', 'strength' => 2 } }.to_s) end end diff --git a/spec/mongo/collection_spec.rb b/spec/mongo/collection_spec.rb index ef58e44c84..a6d37224d1 100644 --- a/spec/mongo/collection_spec.rb +++ b/spec/mongo/collection_spec.rb @@ -828,13 +828,12 @@ let(:enum) { change_stream.to_enum } + let(:get_more) { subscriber.started_events.detect { |e| e.command['getMore'] }.command } + it 'sets the option correctly' do - expect(change_stream.instance_variable_get(:@cursor)).to receive(:get_more_operation).once.and_wrap_original do |m, *args, &block| - m.call(*args).tap do |op| - expect(op.max_time_ms).to eq(3000) - end - end - enum.next + enum.try_next + expect(get_more).not_to be_nil + expect(get_more['maxTimeMS']).to be == 3000 end it "waits the appropriate amount of time" do diff --git a/spec/mongo/crypt/auto_encrypter_spec.rb b/spec/mongo/crypt/auto_encrypter_spec.rb index 0c9aeb7922..5ed3246577 100644 --- a/spec/mongo/crypt/auto_encrypter_spec.rb +++ b/spec/mongo/crypt/auto_encrypter_spec.rb @@ -58,6 +58,8 @@ ) end + let(:operation_context) { Mongo::Operation::Context.new } + shared_context 'with jsonSchema validator' do before do users_collection = client.use(db_name)[collection_name] @@ -81,14 +83,14 @@ shared_examples 'a functioning auto encrypter' do describe '#encrypt' do it 'replaces the ssn field with a BSON::Binary' do - result = auto_encrypter.encrypt(db_name, command) + result = auto_encrypter.encrypt(db_name, command, operation_context) expect(result).to eq(encrypted_command) end end describe '#decrypt' do it 'returns the unencrypted document' do - result = auto_encrypter.decrypt(encrypted_command) + result = auto_encrypter.decrypt(encrypted_command, operation_context) expect(result).to eq(command) end end @@ -329,14 +331,14 @@ describe '#encrypt' do it 'does not perform encryption' do - result = auto_encrypter.encrypt(db_name, command) + result = auto_encrypter.encrypt(db_name, command, operation_context) expect(result).to eq(command) end end describe '#decrypt' do it 'still performs decryption' do - result = auto_encrypter.decrypt(encrypted_command) + result = auto_encrypter.decrypt(encrypted_command, operation_context) expect(result).to eq(command) end end @@ -347,14 +349,14 @@ describe '#encrypt' do it 'does not perform encryption' do - result = auto_encrypter.encrypt(db_name, command) + result = auto_encrypter.encrypt(db_name, command, operation_context) expect(result).to eq(command) end end describe '#decrypt' do it 'still performs decryption' do - result = auto_encrypter.decrypt(encrypted_command) + result = auto_encrypter.decrypt(encrypted_command, operation_context) expect(result).to eq(command) end end @@ -365,14 +367,14 @@ describe '#encrypt' do it 'does not perform encryption' do - result = auto_encrypter.encrypt(db_name, command) + result = auto_encrypter.encrypt(db_name, command, operation_context) expect(result).to eq(command) end end describe '#decrypt' do it 'still performs decryption' do - result = auto_encrypter.decrypt(encrypted_command) + result = auto_encrypter.decrypt(encrypted_command, operation_context) expect(result).to eq(command) end end @@ -383,14 +385,14 @@ describe '#encrypt' do it 'does not perform encryption' do - result = auto_encrypter.encrypt(db_name, command) + result = auto_encrypter.encrypt(db_name, command, operation_context) expect(result).to eq(command) end end describe '#decrypt' do it 'still performs decryption' do - result = auto_encrypter.decrypt(encrypted_command) + result = auto_encrypter.decrypt(encrypted_command, operation_context) expect(result).to eq(command) end end @@ -401,14 +403,14 @@ describe '#encrypt' do it 'does not perform encryption' do - result = auto_encrypter.encrypt(db_name, command) + result = auto_encrypter.encrypt(db_name, command, operation_context) expect(result).to eq(command) end end describe '#decrypt' do it 'still performs decryption' do - result = auto_encrypter.decrypt(encrypted_command) + result = auto_encrypter.decrypt(encrypted_command, operation_context) expect(result).to eq(command) end end diff --git a/spec/mongo/crypt/data_key_context_spec.rb b/spec/mongo/crypt/data_key_context_spec.rb index babcfac43c..b1b261e07b 100644 --- a/spec/mongo/crypt/data_key_context_spec.rb +++ b/spec/mongo/crypt/data_key_context_spec.rb @@ -136,8 +136,10 @@ ) end + let(:operation_context) { Mongo::Operation::Context.new } + it 'creates a data key' do - expect(context.run_state_machine).to be_a_kind_of(Hash) + expect(context.run_state_machine(operation_context)).to be_a_kind_of(Hash) end end end diff --git a/spec/mongo/cursor_spec.rb b/spec/mongo/cursor_spec.rb index bab8c632f6..0b4b3a8cb8 100644 --- a/spec/mongo/cursor_spec.rb +++ b/spec/mongo/cursor_spec.rb @@ -8,6 +8,10 @@ authorized_client['cursor_spec_collection'] end + let(:context) do + Mongo::Operation::Context.new(client: authorized_client) + end + before do authorized_collection.drop end @@ -18,7 +22,7 @@ end let(:reply) do - view.send(:send_initial_query, server) + view.send(:send_initial_query, server, context) end let(:cursor) do @@ -118,7 +122,7 @@ end let(:reply) do - view.send(:send_initial_query, server) + view.send(:send_initial_query, server, context) end let(:cursor) do @@ -645,7 +649,7 @@ end let(:reply) do - view.send(:send_initial_query, authorized_primary) + view.send(:send_initial_query, authorized_primary, context) end let(:cursor) do @@ -721,7 +725,7 @@ end let(:reply) do - view.send(:send_initial_query, server) + view.send(:send_initial_query, server, context) end let(:cursor) do diff --git a/spec/mongo/error/operation_failure_heavy_spec.rb b/spec/mongo/error/operation_failure_heavy_spec.rb index e18373992e..b0b41f9126 100644 --- a/spec/mongo/error/operation_failure_heavy_spec.rb +++ b/spec/mongo/error/operation_failure_heavy_spec.rb @@ -39,7 +39,7 @@ begin authorized_client['foo'].insert_one(test: 1) - rescue Mongo::Error::OperationFailure => exc + rescue Mongo::Error::OperationFailure::Family => exc expect(exc.details).to eq(exc.document['writeConcernError']['errInfo']) expect(exc.server_message).to eq(exc.document['writeConcernError']['errmsg']) expect(exc.code).to eq(exc.document['writeConcernError']['code']) @@ -90,7 +90,7 @@ it 'succeeds and prints the error' do begin collection.insert_one({x: 1}) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e insert_events = subscriber.succeeded_events.select { |e| e.command_name == "insert" } expect(insert_events.length).to eq 1 expect(e.message).to match(/\[#{e.code}(:.*)?\].+ -- .+/) diff --git a/spec/mongo/operation/context_spec.rb b/spec/mongo/operation/context_spec.rb new file mode 100644 index 0000000000..3f2bb1daa9 --- /dev/null +++ b/spec/mongo/operation/context_spec.rb @@ -0,0 +1,79 @@ +# frozen_string_literal: true + +require 'lite_spec_helper' + +describe Mongo::Operation::Context do + describe '#initialize' do + context 'when timeout_ms is negative' do + it 'raises an error' do + expect do + described_class.new(operation_timeouts: { operation_timeout_ms: -1 }) + end.to raise_error ArgumentError, /must be a non-negative integer/ + end + end + end + + describe '#deadline' do + let(:context) { described_class.new(operation_timeouts: { operation_timeout_ms: timeout_ms }) } + + context 'when timeout_ms is nil' do + let(:timeout_ms) { nil } + + it 'returns nil' do + expect(context.deadline).to be_nil + end + end + + context 'when timeout_ms is zero' do + let(:timeout_ms) { 0 } + + it 'returns nil' do + expect(context.deadline).to eq(0) + end + end + + context 'when timeout_ms is positive' do + before do + allow(Mongo::Utils).to receive(:monotonic_time).and_return(100.0) + end + + let(:timeout_ms) { 10_000 } + + it 'calculates the deadline' do + expect(context.deadline).to eq(110) + end + end + end + + describe '#remaining_timeout_ms' do + let(:context) { described_class.new(operation_timeouts: { operation_timeout_ms: timeout_ms }) } + + context 'when timeout_ms is nil' do + let(:timeout_ms) { nil } + + it 'returns nil' do + expect(context.remaining_timeout_ms).to be_nil + end + end + + context 'when timeout_ms is zero' do + let(:timeout_ms) { 0 } + + it 'returns nil' do + expect(context.remaining_timeout_ms).to be_nil + end + end + + context 'when timeout_ms is positive' do + before do + allow(Mongo::Utils).to receive(:monotonic_time).and_return(100.0, 105.0) + end + + let(:timeout_ms) { 10_000 } + + it 'calculates the remaining time' do + expect(context.remaining_timeout_ms).to eq(5_000) + end + end + end +end diff --git a/spec/mongo/operation/create/op_msg_spec.rb b/spec/mongo/operation/create/op_msg_spec.rb index f4033611b8..0ecafc8d52 100644 --- a/spec/mongo/operation/create/op_msg_spec.rb +++ b/spec/mongo/operation/create/op_msg_spec.rb @@ -2,8 +2,12 @@ # rubocop:todo all require 'spec_helper' +require_relative '../shared/csot/examples' describe Mongo::Operation::Create::OpMsg do + include CSOT::Examples + + let(:context) { Mongo::Operation::Context.new } let(:write_concern) do Mongo::WriteConcern.get(w: :majority) @@ -73,8 +77,6 @@ end describe '#selector' do - min_server_fcv '3.6' - it 'does not mutate user input' do user_input = IceNine.deep_freeze(spec.dup) expect do @@ -87,158 +89,152 @@ # https://jira.mongodb.org/browse/RUBY-2224 require_no_linting - context 'when the server supports OP_MSG' do + let(:global_args) do + { + create: TEST_COLL, + writeConcern: write_concern.options, + '$db' => SpecConfig.instance.test_db, + lsid: session.session_id + } + end + + let(:session) do + authorized_client.start_session + end - let(:global_args) do - { - create: TEST_COLL, - writeConcern: write_concern.options, - '$db' => SpecConfig.instance.test_db, - lsid: session.session_id - } + context 'when the topology is replica set or sharded' do + require_topology :replica_set, :sharded + + let(:expected_global_args) do + global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end - let(:session) do - authorized_client.start_session + it 'creates the correct OP_MSG message' do + authorized_client.command(ping:1) + expect(Mongo::Protocol::Msg).to receive(:new).with([], {}, expected_global_args) + op.send(:message, connection) end + end - context 'when the topology is replica set or sharded' do - min_server_fcv '3.6' - require_topology :replica_set, :sharded + context 'when the topology is standalone' do + require_topology :single - let(:expected_global_args) do - global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) - end + let(:expected_global_args) do + global_args + end - it 'creates the correct OP_MSG message' do - authorized_client.command(ping:1) - expect(Mongo::Protocol::Msg).to receive(:new).with([], {}, expected_global_args) - op.send(:message, connection) - end + it 'creates the correct OP_MSG message' do + authorized_client.command(ping:1) + expect(Mongo::Protocol::Msg).to receive(:new).with([], {}, expected_global_args) + op.send(:message, connection) end - context 'when the topology is standalone' do - min_server_fcv '3.6' - require_topology :single + context 'when an implicit session is created and the topology is then updated and the server does not support sessions' do + # Mocks on features are incompatible with linting + require_no_linting let(:expected_global_args) do - global_args - end - - it 'creates the correct OP_MSG message' do - authorized_client.command(ping:1) - expect(Mongo::Protocol::Msg).to receive(:new).with([], {}, expected_global_args) - op.send(:message, connection) - end - - context 'when an implicit session is created and the topology is then updated and the server does not support sessions' do - # Mocks on features are incompatible with linting - require_no_linting - - let(:expected_global_args) do - global_args.dup.tap do |args| - args.delete(:lsid) - end + global_args.dup.tap do |args| + args.delete(:lsid) end + end - let(:session) do - Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| - allow(session).to receive(:session_id).and_return(42) - session.should be_implicit - end + let(:session) do + Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| + allow(session).to receive(:session_id).and_return(42) + session.should be_implicit end + end - it 'creates the correct OP_MSG message' do - RSpec::Mocks.with_temporary_scope do - expect(connection.features).to receive(:sessions_enabled?).and_return(false) + it 'creates the correct OP_MSG message' do + RSpec::Mocks.with_temporary_scope do + expect(connection.features).to receive(:sessions_enabled?).and_return(false) - expect(expected_global_args[:session]).to be nil - expect(Mongo::Protocol::Msg).to receive(:new).with([], {}, expected_global_args) - op.send(:message, connection) - end + expect(expected_global_args[:session]).to be nil + expect(Mongo::Protocol::Msg).to receive(:new).with([], {}, expected_global_args) + op.send(:message, connection) end end end + end - context 'when the write concern is 0' do + context 'when the write concern is 0' do - let(:write_concern) do - Mongo::WriteConcern.get(w: 0) - end + let(:write_concern) do + Mongo::WriteConcern.get(w: 0) + end - context 'when the session is implicit' do + context 'when the session is implicit' do - let(:session) do - Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| - allow(session).to receive(:session_id).and_return(42) - session.should be_implicit - end + let(:session) do + Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| + allow(session).to receive(:session_id).and_return(42) + session.should be_implicit end + end - context 'when the topology is replica set or sharded' do - min_server_fcv '3.6' - require_topology :replica_set, :sharded + context 'when the topology is replica set or sharded' do + require_topology :replica_set, :sharded - let(:expected_global_args) do - global_args.dup.tap do |args| - args.delete(:lsid) - args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) - end + let(:expected_global_args) do + global_args.dup.tap do |args| + args.delete(:lsid) + args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end + end - it 'does not send a session id in the command' do - authorized_client.command(ping:1) - expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args) - op.send(:message, connection) - end + it 'does not send a session id in the command' do + authorized_client.command(ping:1) + expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args) + op.send(:message, connection) end + end - context 'when the topology is standalone' do - min_server_fcv '3.6' - require_topology :single + context 'when the topology is standalone' do + require_topology :single - let(:expected_global_args) do - global_args.dup.tap do |args| - args.delete(:lsid) - end + let(:expected_global_args) do + global_args.dup.tap do |args| + args.delete(:lsid) end + end - it 'creates the correct OP_MSG message' do - authorized_client.command(ping:1) - expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args) - op.send(:message, connection) - end + it 'creates the correct OP_MSG message' do + authorized_client.command(ping:1) + expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args) + op.send(:message, connection) end end + end - context 'when the session is explicit' do - min_server_fcv '3.6' - require_topology :replica_set, :sharded + context 'when the session is explicit' do + require_topology :replica_set, :sharded - let(:session) do - authorized_client.start_session - end + let(:session) do + authorized_client.start_session + end - before do - session.should_not be_implicit - end + before do + session.should_not be_implicit + end - let(:expected_global_args) do - global_args.dup.tap do |args| - args.delete(:lsid) - args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) - end + let(:expected_global_args) do + global_args.dup.tap do |args| + args.delete(:lsid) + args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end + end - it 'does not send a session id in the command' do - authorized_client.command(ping:1) - RSpec::Mocks.with_temporary_scope do - expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args) - op.send(:message, connection) - end + it 'does not send a session id in the command' do + authorized_client.command(ping:1) + RSpec::Mocks.with_temporary_scope do + expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], {}, expected_global_args) + op.send(:message, connection) end end end end end + + it_behaves_like 'a CSOT-compliant OpMsg subclass' end diff --git a/spec/mongo/operation/delete/op_msg_spec.rb b/spec/mongo/operation/delete/op_msg_spec.rb index ab163e3840..2e477df33d 100644 --- a/spec/mongo/operation/delete/op_msg_spec.rb +++ b/spec/mongo/operation/delete/op_msg_spec.rb @@ -2,8 +2,12 @@ # rubocop:todo all require 'spec_helper' +require_relative '../shared/csot/examples' describe Mongo::Operation::Delete::OpMsg do + include CSOT::Examples + + let(:context) { Mongo::Operation::Context.new } let(:write_concern) do Mongo::WriteConcern.get(w: :majority) @@ -125,7 +129,6 @@ end context 'when the topology is replica set or sharded' do - min_server_fcv '3.6' require_topology :replica_set, :sharded let(:expected_global_args) do @@ -140,7 +143,6 @@ end context 'when the topology is standalone' do - min_server_fcv '3.6' require_topology :single let(:expected_global_args) do @@ -198,7 +200,6 @@ end context 'when the topology is replica set or sharded' do - min_server_fcv '3.6' require_topology :replica_set, :sharded let(:expected_global_args) do @@ -216,7 +217,6 @@ end context 'when the topology is standalone' do - min_server_fcv '3.6' require_topology :single let(:expected_global_args) do @@ -234,7 +234,6 @@ end context 'when the session is explicit' do - min_server_fcv '3.6' require_topology :replica_set, :sharded let(:session) do @@ -263,4 +262,6 @@ end end end + + it_behaves_like 'a CSOT-compliant OpMsg subclass' end diff --git a/spec/mongo/operation/find/op_msg_spec.rb b/spec/mongo/operation/find/op_msg_spec.rb new file mode 100644 index 0000000000..5a5868c475 --- /dev/null +++ b/spec/mongo/operation/find/op_msg_spec.rb @@ -0,0 +1,66 @@ +# frozen_string_literal: true + +require 'spec_helper' +require_relative '../shared/csot/examples' + +describe Mongo::Operation::Find::OpMsg do + include CSOT::Examples + + let(:spec) do + { coll_name: 'coll_name', + filter: {}, + db_name: 'db_name' } + end + + let(:op) { described_class.new(spec) } + + context 'when it is a CSOT-compliant OpMsg' do + include_examples 'mock CSOT environment' + + context 'when no timeout_ms set' do + it 'does not set maxTimeMS' do + expect(body.key?(:maxTimeMS)).to be false + end + end + + context 'when timeout_ms is set' do + let(:remaining_timeout_sec) { 3 } + + context 'when cursor is non-tailable' do + let(:cursor_type) { nil } + + context 'when timeout_mode is cursor_lifetime' do + let(:timeout_mode) { :cursor_lifetime } + + it 'sets maxTimeMS' do + expect(body[:maxTimeMS]).to be == 3_000 + end + end + + context 'when timeout_mode is iteration' do + let(:timeout_mode) { :iteration } + + it 'omits maxTimeMS' do + expect(body[:maxTimeMS]).to be_nil + end + end + end + + context 'when cursor is tailable' do + let(:cursor_type) { :tailable } + + it 'omits maxTimeMS' do + expect(body[:maxTimeMS]).to be_nil + end + end + + context 'when cursor is tailable_await' do + let(:cursor_type) { :tailable_await } + + it 'sets maxTimeMS' do + expect(body[:maxTimeMS]).to be == 3_000 + end + end + end + end +end diff --git a/spec/mongo/operation/get_more/op_msg_spec.rb b/spec/mongo/operation/get_more/op_msg_spec.rb new file mode 100644 index 0000000000..408ac6cac6 --- /dev/null +++ b/spec/mongo/operation/get_more/op_msg_spec.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +require 'spec_helper' +require_relative '../shared/csot/examples' + +describe Mongo::Operation::GetMore::OpMsg do + include CSOT::Examples + + let(:spec) do + { + options: {}, + db_name: 'db_name', + coll_name: 'coll_name', + cursor_id: 1_234_567_890, + } + end + + let(:op) { described_class.new(spec) } + + context 'when it is a CSOT-compliant OpMsg' do + include_examples 'mock CSOT environment' + + context 'when no timeout_ms set' do + it 'does not set maxTimeMS' do + expect(body.key?(:maxTimeMS)).to be false + end + end + + context 'when timeout_ms is set' do + let(:remaining_timeout_sec) { 3 } + + context 'when cursor is non-tailable' do + it 'omits maxTimeMS' do + expect(body[:maxTimeMS]).to be_nil + end + end + + context 'when cursor is tailable' do + let(:cursor_type) { :tailable } + + it 'omits maxTimeMS' do + expect(body[:maxTimeMS]).to be_nil + end + end + + context 'when cursor is tailable_await' do + let(:cursor_type) { :tailable_await } + + context 'when max_await_time_ms is omitted' do + it 'omits maxTimeMS' do + expect(body[:maxTimeMS]).to be_nil + end + end + + context 'when max_await_time_ms is given' do + let(:max_await_time_ms) { 1_234 } + + it 'sets maxTimeMS' do + expect(body[:maxTimeMS]).to be == 1_234 + end + end + end + end + end +end diff --git a/spec/mongo/operation/insert/op_msg_spec.rb b/spec/mongo/operation/insert/op_msg_spec.rb index 9da07f2ab6..9b9e28cc17 100644 --- a/spec/mongo/operation/insert/op_msg_spec.rb +++ b/spec/mongo/operation/insert/op_msg_spec.rb @@ -2,8 +2,12 @@ # rubocop:todo all require 'spec_helper' +require_relative '../shared/csot/examples' describe Mongo::Operation::Insert::OpMsg do + include CSOT::Examples + + let(:context) { Mongo::Operation::Context.new } let(:documents) { [{ :_id => 1, :foo => 1 }] } let(:session) { nil } @@ -104,193 +108,186 @@ # https://jira.mongodb.org/browse/RUBY-2224 require_no_linting - context 'when the server supports OP_MSG' do - min_server_fcv '3.6' + let(:documents) do + [ { foo: 1 }, { bar: 2 }] + end + + let(:global_args) do + { + insert: TEST_COLL, + ordered: true, + writeConcern: write_concern.options, + '$db' => SpecConfig.instance.test_db, + lsid: session.session_id + } + end + + let!(:expected_payload_1) do + Mongo::Protocol::Msg::Section1.new('documents', op.documents) + end - let(:documents) do - [ { foo: 1 }, { bar: 2 }] + let(:session) do + Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| + allow(session).to receive(:session_id).and_return(42) end + end - let(:global_args) do - { - insert: TEST_COLL, - ordered: true, - writeConcern: write_concern.options, - '$db' => SpecConfig.instance.test_db, - lsid: session.session_id - } + context 'when the topology is replica set or sharded' do + require_topology :replica_set, :sharded + + let(:expected_global_args) do + global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) + end + + it 'creates the correct OP_MSG message' do + authorized_client.command(ping:1) + RSpec::Mocks.with_temporary_scope do + expect(Mongo::Protocol::Msg).to receive(:new).with([], + {}, + expected_global_args, + expected_payload_1) + op.send(:message, connection) + end end + end + + context 'when the topology is standalone' do + require_topology :single - let!(:expected_payload_1) do - Mongo::Protocol::Msg::Section1.new('documents', op.documents) + let(:expected_global_args) do + global_args end - let(:session) do - Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| - allow(session).to receive(:session_id).and_return(42) + it 'creates the correct OP_MSG message' do + RSpec::Mocks.with_temporary_scope do + authorized_client.command(ping:1) + expect(Mongo::Protocol::Msg).to receive(:new).with([], + {}, + expected_global_args, + expected_payload_1) + op.send(:message, connection) end end - context 'when the topology is replica set or sharded' do - min_server_fcv '3.6' - require_topology :replica_set, :sharded + context 'when an implicit session is created and the topology is then updated and the server does not support sessions' do + # Mocks on features are incompatible with linting + require_no_linting let(:expected_global_args) do - global_args.merge(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) + global_args.dup.tap do |args| + args.delete(:lsid) + end + end + + before do + session.implicit?.should be true end it 'creates the correct OP_MSG message' do - authorized_client.command(ping:1) RSpec::Mocks.with_temporary_scope do + expect(connection.features).to receive(:sessions_enabled?).and_return(false) + + expect(expected_global_args).not_to have_key(:lsid) expect(Mongo::Protocol::Msg).to receive(:new).with([], - {}, - expected_global_args, - expected_payload_1) + {}, + expected_global_args, + expected_payload_1) op.send(:message, connection) end end end + end - context 'when the topology is standalone' do - min_server_fcv '3.6' - require_topology :single + context 'when the write concern is 0' do - let(:expected_global_args) do - global_args - end + let(:write_concern) do + Mongo::WriteConcern.get(w: 0) + end - it 'creates the correct OP_MSG message' do - RSpec::Mocks.with_temporary_scope do - authorized_client.command(ping:1) - expect(Mongo::Protocol::Msg).to receive(:new).with([], - {}, - expected_global_args, - expected_payload_1) - op.send(:message, connection) + context 'when the session is implicit' do + + let(:session) do + Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| + allow(session).to receive(:session_id).and_return(42) + session.should be_implicit end end - context 'when an implicit session is created and the topology is then updated and the server does not support sessions' do - # Mocks on features are incompatible with linting - require_no_linting + context 'when the topology is replica set or sharded' do + require_topology :replica_set, :sharded let(:expected_global_args) do global_args.dup.tap do |args| args.delete(:lsid) + args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end end - before do - session.implicit?.should be true - end - - it 'creates the correct OP_MSG message' do + it 'does not send a session id in the command' do + authorized_client.command(ping:1) RSpec::Mocks.with_temporary_scope do - expect(connection.features).to receive(:sessions_enabled?).and_return(false) - - expect(expected_global_args).not_to have_key(:lsid) - expect(Mongo::Protocol::Msg).to receive(:new).with([], - {}, - expected_global_args, - expected_payload_1) + expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], + {}, + expected_global_args, + expected_payload_1) op.send(:message, connection) end end end - end - context 'when the write concern is 0' do - - let(:write_concern) do - Mongo::WriteConcern.get(w: 0) - end + context 'when the topology is standalone' do + require_topology :single - context 'when the session is implicit' do - - let(:session) do - Mongo::Session.new(nil, authorized_client, implicit: true).tap do |session| - allow(session).to receive(:session_id).and_return(42) - session.should be_implicit - end - end - - context 'when the topology is replica set or sharded' do - min_server_fcv '3.6' - require_topology :replica_set, :sharded - - let(:expected_global_args) do - global_args.dup.tap do |args| - args.delete(:lsid) - args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) - end - end - - it 'does not send a session id in the command' do - authorized_client.command(ping:1) - RSpec::Mocks.with_temporary_scope do - expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], - {}, - expected_global_args, - expected_payload_1) - op.send(:message, connection) - end + let(:expected_global_args) do + global_args.dup.tap do |args| + args.delete(:lsid) end end - context 'when the topology is standalone' do - min_server_fcv '3.6' - require_topology :single - - let(:expected_global_args) do - global_args.dup.tap do |args| - args.delete(:lsid) - end - end - - it 'creates the correct OP_MSG message' do - authorized_client.command(ping:1) - RSpec::Mocks.with_temporary_scope do - expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], - {}, - expected_global_args, - expected_payload_1) - op.send(:message, connection) - end + it 'creates the correct OP_MSG message' do + authorized_client.command(ping:1) + RSpec::Mocks.with_temporary_scope do + expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], + {}, + expected_global_args, + expected_payload_1) + op.send(:message, connection) end end end + end - context 'when the session is explicit' do - min_server_fcv '3.6' - require_topology :replica_set, :sharded + context 'when the session is explicit' do + require_topology :replica_set, :sharded - let(:session) do - authorized_client.start_session - end + let(:session) do + authorized_client.start_session + end - before do - session.should_not be_implicit - end + before do + session.should_not be_implicit + end - let(:expected_global_args) do - global_args.dup.tap do |args| - args.delete(:lsid) - args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) - end + let(:expected_global_args) do + global_args.dup.tap do |args| + args.delete(:lsid) + args.merge!(Mongo::Operation::CLUSTER_TIME => authorized_client.cluster.cluster_time) end + end - it 'does not send a session id in the command' do - authorized_client.command(ping:1) - RSpec::Mocks.with_temporary_scope do - expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], - {}, - expected_global_args, - expected_payload_1) - op.send(:message, connection) - end + it 'does not send a session id in the command' do + authorized_client.command(ping:1) + RSpec::Mocks.with_temporary_scope do + expect(Mongo::Protocol::Msg).to receive(:new).with([:more_to_come], + {}, + expected_global_args, + expected_payload_1) + op.send(:message, connection) end end end end end + + it_behaves_like 'a CSOT-compliant OpMsg subclass' end diff --git a/spec/mongo/operation/shared/csot/examples.rb b/spec/mongo/operation/shared/csot/examples.rb new file mode 100644 index 0000000000..24c43a43d1 --- /dev/null +++ b/spec/mongo/operation/shared/csot/examples.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true +# rubocop:todo all + +module CSOT + module Examples + # expects the following values to be available: + # `op` -- an instance of an OpMsgBase subclass + def self.included(example_context) + example_context.shared_examples 'mock CSOT environment' do + # Linting freaks out because of the doubles used in these specs. + require_no_linting + + let(:message) { op.send(:message, connection) } + + let(:body) { message.documents.first } + + let(:cursor_type) { nil } + let(:timeout_mode) { nil } + let(:remaining_timeout_sec) { nil } + let(:minimum_round_trip_time) { 0 } + let(:view_options) { {} } + let(:max_await_time_ms) { nil } + + let(:view) do + instance_double(Mongo::Collection::View).tap do |view| + allow(view).to receive(:cursor_type).and_return(cursor_type) + allow(view).to receive(:timeout_mode).and_return(timeout_mode) + allow(view).to receive(:options).and_return(view_options) + allow(view).to receive(:max_await_time_ms).and_return(max_await_time_ms) + end + end + + let(:context) do + Mongo::Operation::Context.new(view: view).tap do |context| + allow(context).to receive(:remaining_timeout_sec).and_return(remaining_timeout_sec) + allow(context).to receive(:timeout?).and_return(!remaining_timeout_sec.nil?) + end + end + + let(:server) do + instance_double(Mongo::Server).tap do |server| + allow(server).to receive(:minimum_round_trip_time).and_return(minimum_round_trip_time) + end + end + + let(:address) { Mongo::Address.new('127.0.0.1') } + + let(:description) do + Mongo::Server::Description.new( + address, { Mongo::Operation::Result::OK => 1 } + ) + end + + let(:features) do + Mongo::Server::Description::Features.new( + Mongo::Server::Description::Features::DRIVER_WIRE_VERSIONS, + address + ) + end + + let(:connection) do + instance_double(Mongo::Server::Connection).tap do |conn| + allow(conn).to receive(:server).and_return(server) + allow(conn).to receive(:description).and_return(description) + allow(conn).to receive(:features).and_return(features) + end + end + + before do + # context is normally set when calling `execute` on the operation, + # but since we're not doing that, we have to tell the operation + # what the context is. + op.context = context + end + end + + example_context.shared_examples 'a CSOT-compliant OpMsg subclass' do + include_examples 'mock CSOT environment' + + context 'when no timeout_ms set' do + it 'does not set maxTimeMS' do + expect(body.key?(:maxTimeMS)).to be false + end + end + + context 'when there is enough time to send the message' do + # Ten seconds remaining + let(:remaining_timeout_sec) { 10 } + + # One second RTT + let(:minimum_round_trip_time) { 1 } + + it 'sets the maxTimeMS' do + # Nine seconds + expect(body[:maxTimeMS]).to eq(9_000) + end + end + + context 'when there is not enough time to send the message' do + # Ten seconds remaining + let(:remaining_timeout_sec) { 0.1 } + + # One second RTT + let(:minimum_round_trip_time) { 1 } + + it 'fails with an exception' do + expect { message }.to raise_error(Mongo::Error::TimeoutError) + end + end + end + end + end +end diff --git a/spec/mongo/query_cache_spec.rb b/spec/mongo/query_cache_spec.rb index a644facf14..a39e259676 100644 --- a/spec/mongo/query_cache_spec.rb +++ b/spec/mongo/query_cache_spec.rb @@ -138,12 +138,19 @@ end describe '#get' do - let(:view) { double("Mongo::Collection::View") } + let(:view) do + double("Mongo::Collection::View").tap do |view| + allow(view).to receive(:client).and_return(client) + allow(view).to receive(:operation_timeouts).and_return({}) + end + end + let(:result) do double("Mongo::Operation::Result").tap do |result| allow(result).to receive(:is_a?).with(Mongo::Operation::Result).and_return(true) end end + let(:server) { double("Mongo::Server") } let(:caching_cursor) { Mongo::CachingCursor.new(view, result, server) } diff --git a/spec/mongo/retryable_spec.rb b/spec/mongo/retryable_spec.rb index 2c9947816f..d7f6b9697c 100644 --- a/spec/mongo/retryable_spec.rb +++ b/spec/mongo/retryable_spec.rb @@ -76,6 +76,7 @@ def session allow(session).to receive(:pinned_connection_global_id) allow(session).to receive(:starting_transaction?).and_return(false) allow(session).to receive(:materialize) + allow(session).to receive(:with_transaction_deadline).and_return(nil) end end diff --git a/spec/mongo/server/round_trip_time_averager_spec.rb b/spec/mongo/server/round_trip_time_averager_spec.rb deleted file mode 100644 index 9e885d2246..0000000000 --- a/spec/mongo/server/round_trip_time_averager_spec.rb +++ /dev/null @@ -1,48 +0,0 @@ -# frozen_string_literal: true -# rubocop:todo all - -require 'spec_helper' - -describe Mongo::Server::RoundTripTimeAverager do - let(:averager) { Mongo::Server::RoundTripTimeAverager.new } - - describe '#update_average_round_trip_time' do - context 'no existing average rtt' do - it 'updates average rtt' do - averager.instance_variable_set('@last_round_trip_time', 5) - averager.send(:update_average_round_trip_time) - expect(averager.average_round_trip_time).to eq(5) - end - end - - context 'with existing average rtt' do - it 'averages with existing average rtt' do - averager.instance_variable_set('@last_round_trip_time', 5) - averager.instance_variable_set('@average_round_trip_time', 10) - averager.send(:update_average_round_trip_time) - expect(averager.average_round_trip_time).to eq(9) - end - end - end - - describe '#measure' do - context 'block does not raise' do - it 'updates average rtt' do - expect(averager).to receive(:update_average_round_trip_time) - averager.measure do - end - end - end - - context 'block raises' do - it 'does not update average rtt' do - expect(averager).not_to receive(:update_average_round_trip_time) - lambda do - averager.measure do - raise "Problem" - end - end.should raise_error(/Problem/) - end - end - end -end diff --git a/spec/mongo/server/round_trip_time_calculator_spec.rb b/spec/mongo/server/round_trip_time_calculator_spec.rb new file mode 100644 index 0000000000..ce4dbd39ad --- /dev/null +++ b/spec/mongo/server/round_trip_time_calculator_spec.rb @@ -0,0 +1,120 @@ +# frozen_string_literal: true +# rubocop:todo all + +require 'spec_helper' + +describe Mongo::Server::RoundTripTimeCalculator do + let(:calculator) { Mongo::Server::RoundTripTimeCalculator.new } + + describe '#update_average_round_trip_time' do + context 'no existing average rtt' do + it 'updates average rtt' do + calculator.instance_variable_set('@last_round_trip_time', 5) + calculator.update_average_round_trip_time + expect(calculator.average_round_trip_time).to eq(5) + end + end + + context 'with existing average rtt' do + it 'averages with existing average rtt' do + calculator.instance_variable_set('@last_round_trip_time', 5) + calculator.instance_variable_set('@average_round_trip_time', 10) + calculator.update_average_round_trip_time + expect(calculator.average_round_trip_time).to eq(9) + end + end + end + + describe '#update_minimum_round_trip_time' do + context 'with no samples' do + it 'sets minimum_round_trip_time to zero' do + calculator.update_minimum_round_trip_time + expect(calculator.minimum_round_trip_time).to eq(0) + end + end + + context 'with one sample' do + before do + calculator.instance_variable_set('@last_round_trip_time', 5) + end + + it 'sets minimum_round_trip_time to zero' do + calculator.update_minimum_round_trip_time + expect(calculator.minimum_round_trip_time).to eq(0) + end + end + + context 'with two samples' do + before do + calculator.instance_variable_set('@last_round_trip_time', 10) + calculator.instance_variable_set('@rtts', [5]) + end + + it 'sets minimum_round_trip_time to zero' do + calculator.update_minimum_round_trip_time + expect(calculator.minimum_round_trip_time).to eq(0) + end + end + + context 'with samples less than maximum' do + before do + calculator.instance_variable_set('@last_round_trip_time', 10) + calculator.instance_variable_set('@rtts', [5, 4, 120]) + end + + it 'properly sets minimum_round_trip_time' do + calculator.update_minimum_round_trip_time + expect(calculator.minimum_round_trip_time).to eq(4) + end + end + + context 'with more than maximum samples' do + before do + calculator.instance_variable_set('@last_round_trip_time', 2) + calculator.instance_variable_set('@rtts', [1, 20, 15, 4, 5, 6, 7, 39, 8, 4]) + end + + it 'properly sets minimum_round_trip_time' do + calculator.update_minimum_round_trip_time + expect(calculator.minimum_round_trip_time).to eq(2) + end + end + + end + + describe '#measure' do + context 'block does not raise' do + it 'updates average rtt' do + expect(calculator).to receive(:update_average_round_trip_time) + calculator.measure do + end + end + + it 'updates minimum rtt' do + expect(calculator).to receive(:update_minimum_round_trip_time) + calculator.measure do + end + end + end + + context 'block raises' do + it 'does not update average rtt' do + expect(calculator).not_to receive(:update_average_round_trip_time) + expect do + calculator.measure do + raise "Problem" + end + end.to raise_error(/Problem/) + end + + it 'does not update minimum rtt' do + expect(calculator).not_to receive(:update_minimum_round_trip_time) + expect do + calculator.measure do + raise "Problem" + end + end.to raise_error(/Problem/) + end + end + end +end diff --git a/spec/mongo/socket/ssl_spec.rb b/spec/mongo/socket/ssl_spec.rb index ecdb74e06a..28be3ce78f 100644 --- a/spec/mongo/socket/ssl_spec.rb +++ b/spec/mongo/socket/ssl_spec.rb @@ -103,16 +103,6 @@ expect(socket).to be_alive end end - - context 'when connecting the tcp socket raises an exception' do - - it 'raises an exception' do - expect_any_instance_of(::Socket).to receive(:connect).and_raise(Mongo::Error::SocketTimeoutError) - expect do - socket - end.to raise_error(Mongo::Error::SocketTimeoutError) - end - end end context 'when a certificate and key are provided as strings' do diff --git a/spec/runners/change_streams/test.rb b/spec/runners/change_streams/test.rb index e7b2799b6a..756e7e8fda 100644 --- a/spec/runners/change_streams/test.rb +++ b/spec/runners/change_streams/test.rb @@ -111,7 +111,7 @@ def teardown_test def run change_stream = begin @target.watch(@pipeline, ::Utils.snakeize_hash(@options)) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e return { result: { error: { @@ -146,7 +146,7 @@ def run begin change = enum.next changes << change - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e return { result: { error: { diff --git a/spec/runners/crud/operation.rb b/spec/runners/crud/operation.rb index 8e93bccd2d..569a169a01 100644 --- a/spec/runners/crud/operation.rb +++ b/spec/runners/crud/operation.rb @@ -355,7 +355,7 @@ def assert_index_not_exists(client, context) if coll.indexes.map { |doc| doc['name'] }.include?(ixn = arguments.fetch('index')) raise "Index #{ixn} exists in collection #{cn} in database #{dn}, but must not" end - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e if e.to_s =~ /ns does not exist/ # Success. else diff --git a/spec/runners/transactions/operation.rb b/spec/runners/transactions/operation.rb index 3f74c65305..afd4282ad0 100644 --- a/spec/runners/transactions/operation.rb +++ b/spec/runners/transactions/operation.rb @@ -43,12 +43,10 @@ def execute(target, context) end result - rescue Mongo::Error::OperationFailure => e - result = e.instance_variable_get(:@result) - if result.nil? - raise "OperationFailure had nil result: #{e}" - end - err_doc = result.send(:first_document) + rescue Mongo::Error::OperationFailure::Family => e + raise "OperationFailure had nil result: #{e}" if e.result.nil? + + err_doc = e.result.send(:first_document) error_code_name = err_doc['codeName'] || err_doc['writeConcernError'] && err_doc['writeConcernError']['codeName'] if error_code_name.nil? # Sometimes the server does not return the error code name, diff --git a/spec/runners/unified.rb b/spec/runners/unified.rb index 7ab3b1904c..042b1c3947 100644 --- a/spec/runners/unified.rb +++ b/spec/runners/unified.rb @@ -72,7 +72,7 @@ def define_unified_spec_tests(base_path, paths, expect_failure: false) test.assert_events # HACK: other errors are possible and likely will need to # be added here later as the tests evolve. - rescue Mongo::Error::OperationFailure, Unified::Error::UnsupportedOperation, UsingHash::UsingHashKeyError, Unified::Error::EntityMissing + rescue Mongo::Error::OperationFailure::Family, Unified::Error::UnsupportedOperation, UsingHash::UsingHashKeyError, Unified::Error::EntityMissing rescue => e fail "Expected to raise Mongo::Error::OperationFailure or Unified::Error::UnsupportedOperation or UsingHash::UsingHashKeyError or Unified::Error::EntityMissing, got #{e.class}: #{e}" else diff --git a/spec/runners/unified/ambiguous_operations.rb b/spec/runners/unified/ambiguous_operations.rb new file mode 100644 index 0000000000..c83364aa0f --- /dev/null +++ b/spec/runners/unified/ambiguous_operations.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +module Unified + module AmbiguousOperations + def find(op) + entities.get(:collection, op['object']) + crud_find(op) + rescue Unified::Error::EntityMissing + entities.get(:bucket, op['object']) + gridfs_find(op) + end + end +end diff --git a/spec/runners/unified/assertions.rb b/spec/runners/unified/assertions.rb index 908fe80fbb..69cb2282ad 100644 --- a/spec/runners/unified/assertions.rb +++ b/spec/runners/unified/assertions.rb @@ -379,6 +379,10 @@ def assert_value_matches(actual, expected, msg) unless actual == result raise Error::ResultMismatch, "Actual value #{actual} does not match entity #{expected_v} with value #{result}" end + when '$$lte' + if actual.nil? || actual >= expected_v + raise Error::ResultMismatch, "Actual value #{actual} should be less than #{expected_v}" + end else raise NotImplementedError, "Unknown operator #{operator}" end diff --git a/spec/runners/unified/change_stream_operations.rb b/spec/runners/unified/change_stream_operations.rb index 343486db04..ade607f40c 100644 --- a/spec/runners/unified/change_stream_operations.rb +++ b/spec/runners/unified/change_stream_operations.rb @@ -10,22 +10,9 @@ def create_change_stream(op) object = entities.get_any(object_id) use_arguments(op) do |args| pipeline = args.use!('pipeline') - opts = {} - if batch_size = args.use('batchSize') - opts[:batch_size] = batch_size - end - if comment = args.use('comment') - opts[:comment] = comment - end - if full_document = args.use('fullDocument') - opts[:full_document] = full_document - end - if full_document_before_change = args.use('fullDocumentBeforeChange') - opts[:full_document_before_change] = full_document_before_change - end - if args.key?('showExpandedEvents') - opts[:show_expanded_events] = args.use!('showExpandedEvents') - end + opts = extract_options(args, 'batchSize', 'comment', 'fullDocument', + 'fullDocumentBeforeChange', 'showExpandedEvents', 'timeoutMS', + 'maxAwaitTimeMS') cs = object.watch(pipeline, **opts) if name = op.use('saveResultAsEntity') entities.set(:change_stream, name, cs) @@ -35,18 +22,21 @@ def create_change_stream(op) def iterate_until_document_or_error(op) object_id = op.use!('object') - object = entities.get(:change_stream, object_id) - object.to_enum.next + object = entities.get_any(object_id) + object.try_next + end + + def iterate_once(op) + stream_id = op.use!('object') + stream = entities.get_any(stream_id) + stream.try_next end def close(op) object_id = op.use!('object') - # The Ruby driver unified spec runner does not currently implement - # find cursors as created by createFindCursor. This will be done - # as part of CSOT implementation. When this is done, the line(s) below - # should be changed to retrieve such cursor instances and close them. - object = entities.get(:csot_cursor, object_id) - object.close + opts = op.key?('arguments') ? extract_options(op.use!('arguments'), 'timeoutMS') : {} + object = entities.get_any(object_id) + object.close(opts) end end end diff --git a/spec/runners/unified/crud_operations.rb b/spec/runners/unified/crud_operations.rb index 69e35513a8..fb2387ce6e 100644 --- a/spec/runners/unified/crud_operations.rb +++ b/spec/runners/unified/crud_operations.rb @@ -5,7 +5,7 @@ module Unified module CrudOperations - def find(op) + def crud_find(op) get_find_view(op).to_a end @@ -16,35 +16,21 @@ def find_one(op) def get_find_view(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| - opts = { - let: args.use('let'), - comment: args.use('comment'), - allow_disk_use: args.use('allowDiskUse'), - show_disk_loc: args.use('showRecordId'), - return_key: args.use('returnKey'), - projection: args.use('projection'), - skip: args.use('skip'), - hint: args.use('hint'), - max_value: args.use('max'), - max_time_ms: args.use('maxTimeMS'), - min_value: args.use('min'), - } - if session = args.use('session') - opts[:session] = entities.get(:session, session) - end - if collation = args.use('collation') - opts[:collation] = collation - end - if args.key?('noCursorTimeout') - opts[:no_cursor_timeout] = args.use('noCursorTimeout') - end - if args.key?('oplogReplay') - opts[:oplog_replay] = args.use('oplogReplay') - end - if args.key?('allowPartialResults') - opts[:allow_partial_results] = args.use('allowPartialResults') - end - req = collection.find(args.use!('filter'), **opts) + filter = args.use!('filter') + session = args.use('session') + + opts = extract_options(args, 'let', 'comment', + 'allowDiskUse', 'returnKey', 'projection', + 'skip', 'hint', 'maxTimeMS', 'timeoutMS', + 'collation', 'noCursorTimeout', 'oplogReplay', 'allowPartialResults', + 'timeoutMode', 'maxAwaitTimeMS', 'cursorType', 'timeoutMode', + { 'showRecordId' => :show_disk_loc, 'max' => :max_value, 'min' => :min_value }, + allow_extra: true) + symbolize_options!(opts, :timeout_mode, :cursor_type) + + opts[:session] = entities.get(:session, session) if session + + req = collection.find(filter, **opts) if batch_size = args.use('batchSize') req = req.batch_size(batch_size) end @@ -61,15 +47,23 @@ def get_find_view(op) end end - def count_documents(op) + def count(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| - opts = {} + opts = extract_options(args, 'comment', 'timeoutMS', 'maxTimeMS', allow_extra: true) if session = args.use('session') opts[:session] = entities.get(:session, session) end - if comment = args.use('comment') - opts[:comment] = comment + collection.count(args.use!('filter'), **opts) + end + end + + def count_documents(op) + collection = entities.get(:collection, op.use!('object')) + use_arguments(op) do |args| + opts = extract_options(args, 'comment', 'timeoutMS', 'maxTimeMS', allow_extra: true) + if session = args.use('session') + opts[:session] = entities.get(:session, session) end collection.find(args.use!('filter')).count_documents(**opts) end @@ -78,12 +72,9 @@ def count_documents(op) def estimated_document_count(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| - opts = {} - if max_time_ms = args.use('maxTimeMS') - opts[:max_time_ms] = max_time_ms - end - if comment = args.use('comment') - opts[:comment] = comment + opts = extract_options(args, 'comment', 'timeoutMS', 'maxTimeMS', allow_extra: true) + if session = args.use('session') + opts[:session] = entities.get(:session, session) end collection.estimated_document_count(**opts) end @@ -92,13 +83,10 @@ def estimated_document_count(op) def distinct(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| - opts = {} + opts = extract_options(args, 'comment', 'timeoutMS', 'maxTimeMS', allow_extra: true) if session = args.use('session') opts[:session] = entities.get(:session, session) end - if comment = args.use('comment') - opts[:comment] = comment - end req = collection.find(args.use!('filter'), **opts).distinct(args.use!('fieldName'), **opts) result = req.to_a end @@ -114,6 +102,8 @@ def find_one_and_update(op) comment: args.use('comment'), hint: args.use('hint'), upsert: args.use('upsert'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } if return_document = args.use('returnDocument') opts[:return_document] = return_document.downcase.to_sym @@ -134,6 +124,8 @@ def find_one_and_replace(op) let: args.use('let'), comment: args.use('comment'), hint: args.use('hint'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } if session = args.use('session') opts[:session] = entities.get(:session, session) @@ -150,6 +142,8 @@ def find_one_and_delete(op) let: args.use('let'), comment: args.use('comment'), hint: args.use('hint'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } if session = args.use('session') opts[:session] = entities.get(:session, session) @@ -162,7 +156,9 @@ def insert_one(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| opts = { - comment: args.use('comment') + comment: args.use('comment'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } if session = args.use('session') opts[:session] = entities.get(:session, session) @@ -175,7 +171,9 @@ def insert_many(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| opts = { - comment: args.use('comment') + comment: args.use('comment'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } unless (ordered = args.use('ordered')).nil? opts[:ordered] = ordered @@ -195,6 +193,8 @@ def update_one(op) comment: args.use('comment'), hint: args.use('hint'), upsert: args.use('upsert'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } if session = args.use('session') opts[:session] = entities.get(:session, session) @@ -210,6 +210,8 @@ def update_many(op) let: args.use('let'), comment: args.use('comment'), hint: args.use('hint'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } collection.update_many(args.use!('filter'), args.use!('update'), **opts) end @@ -224,7 +226,9 @@ def replace_one(op) comment: args.use('comment'), upsert: args.use('upsert'), let: args.use('let'), - hint: args.use('hint') + hint: args.use('hint'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') ) end end @@ -236,6 +240,8 @@ def delete_one(op) let: args.use('let'), comment: args.use('comment'), hint: args.use('hint'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } if session = args.use('session') opts[:session] = entities.get(:session, session) @@ -251,6 +257,8 @@ def delete_many(op) let: args.use('let'), comment: args.use('comment'), hint: args.use('hint'), + timeout_ms: args.use('timeoutMS'), + max_time_ms: args.use('maxTimeMS') } collection.delete_many(args.use!('filter'), **opts) end @@ -272,6 +280,12 @@ def bulk_write(op) if let = args.use('let') opts[:let] = let end + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + if max_time_ms = args.use('maxTimeMS') + opts[:max_time_ms] = max_time_ms + end collection.bulk_write(requests, **opts) end end @@ -280,27 +294,36 @@ def aggregate(op) obj = entities.get_any(op.use!('object')) args = op.use!('arguments') pipeline = args.use!('pipeline') - opts = { - let: args.use('let'), - } + + opts = extract_options(args, 'let', 'comment', 'batchSize', 'maxTimeMS', + 'allowDiskUse', 'timeoutMode', 'timeoutMS', 'maxTimeMS', allow_extra: true) + symbolize_options!(opts, :timeout_mode) + if session = args.use('session') opts[:session] = entities.get(:session, session) end - if comment = args.use('comment') - opts[:comment] = comment - end - if batch_size = args.use('batchSize') - opts[:batch_size] = batch_size - end - if args.key?('allowDiskUse') - opts[:allow_disk_use] = args.use('allowDiskUse') - end + unless args.empty? raise NotImplementedError, "Unhandled spec keys: #{args} in #{test_spec}" end + obj.aggregate(pipeline, **opts).to_a end + def create_find_cursor(op) + obj = entities.get_any(op.use!('object')) + args = op.use!('arguments') + + filter = args.use('filter') + opts = extract_options(args, 'batchSize', 'timeoutMS', 'cursorType', 'maxAwaitTimeMS') + symbolize_options!(opts, :cursor_type) + + view = obj.find(filter, opts) + view.each # to initialize the cursor + + view.cursor + end + private def convert_bulk_write_spec(spec) diff --git a/spec/runners/unified/ddl_operations.rb b/spec/runners/unified/ddl_operations.rb index 185a251b7a..bdfa916a1f 100644 --- a/spec/runners/unified/ddl_operations.rb +++ b/spec/runners/unified/ddl_operations.rb @@ -20,6 +20,9 @@ def list_dbs(op, name_only: false) if session = args.use('session') opts[:session] = entities.get(:session, session) end + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end client.list_databases(args.use('filter') || {}, name_only, **opts) end end @@ -50,6 +53,15 @@ def create_collection(op) if pipeline = args.use('pipeline') collection_opts[:pipeline] = pipeline end + if capped = args.use('capped') + collection_opts[:capped] = capped + end + if size = args.use('size') + collection_opts[:size] = size + end + if max = args.use('max') + collection_opts[:max] = max + end database[args.use!('collection'), collection_opts].create(**opts) end end @@ -65,13 +77,16 @@ def list_collection_names(op) def list_colls(op, name_only: false) database = entities.get(:database, op.use!('object')) use_arguments(op) do |args| - opts = {} + opts = extract_options(args, 'filter', 'timeoutMode', allow_extra: true) + symbolize_options!(opts, :timeout_mode) + if session = args.use('session') opts[:session] = entities.get(:session, session) end - if filter = args.use('filter') - opts[:filter] = filter + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms end + database.list_collections(**opts.merge(name_only: name_only)) end end @@ -126,14 +141,25 @@ def assert_collection_not_exists(op) def list_indexes(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| - opts = {} + opts = extract_options(args, 'timeoutMode', allow_extra: true) if session = args.use('session') opts[:session] = entities.get(:session, session) end + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end collection.indexes(**opts).to_a end end + def drop_indexes(op) + collection = entities.get(:collection, op.use!('object')) + use_arguments(op) do |args| + opts = extract_options(args, 'maxTimeMS', 'timeoutMS', allow_extra: true) + collection.indexes.drop_all(**opts) + end + end + def create_index(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| @@ -144,7 +170,12 @@ def create_index(op) if args.key?('unique') opts[:unique] = args.use('unique') end - + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + if max_time_ms = args.use('maxTimeMS') + opts[:max_time_ms] = max_time_ms + end collection.indexes.create_one( args.use!('keys'), name: args.use('name'), @@ -156,7 +187,7 @@ def create_index(op) def drop_index(op) collection = entities.get(:collection, op.use!('object')) use_arguments(op) do |args| - opts = {} + opts = extract_options(args, 'maxTimeMS', 'timeoutMS', allow_extra: true) if session = args.use('session') opts[:session] = entities.get(:session, session) end @@ -188,7 +219,7 @@ def assert_index_not_exists(op) begin index = collection.indexes.get(args.use!('indexName')) raise Error::ResultMismatch, "Index found" - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e if e.code == 26 # OK else diff --git a/spec/runners/unified/grid_fs_operations.rb b/spec/runners/unified/grid_fs_operations.rb index c5cad546bf..2121eb0fa4 100644 --- a/spec/runners/unified/grid_fs_operations.rb +++ b/spec/runners/unified/grid_fs_operations.rb @@ -5,17 +5,38 @@ module Unified module GridFsOperations + def gridfs_find(op) + bucket = entities.get(:bucket, op.use!('object')) + use_arguments(op) do |args| + filter = args.use!('filter') + + opts = extract_options(args, 'allowDiskUse', + 'skip', 'hint','timeoutMS', + 'noCursorTimeout', 'sort', 'limit') + + bucket.find(filter,opts).to_a + end + end + def delete(op) bucket = entities.get(:bucket, op.use!('object')) use_arguments(op) do |args| - bucket.delete(args.use!('id')) + opts = {} + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + bucket.delete(args.use!('id'), opts) end end def download(op) bucket = entities.get(:bucket, op.use!('object')) use_arguments(op) do |args| - stream = bucket.open_download_stream(args.use!('id')) + opts = {} + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + stream = bucket.open_download_stream(args.use!('id'), opts) stream.read end end @@ -48,6 +69,9 @@ def upload(op) if disable_md5 = args.use('disableMD5') opts[:disable_md5] = disable_md5 end + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end contents = transform_contents(args.use!('source')) file_id = nil bucket.open_upload_stream(args.use!('filename'), **opts) do |stream| @@ -58,6 +82,17 @@ def upload(op) end end + def drop(op) + bucket = entities.get(:bucket, op.use!('object')) + use_arguments(op) do |args| + opts = {} + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + bucket.drop(opts) + end + end + private def transform_contents(contents) diff --git a/spec/runners/unified/support_operations.rb b/spec/runners/unified/support_operations.rb index a99d310958..45f3aadc10 100644 --- a/spec/runners/unified/support_operations.rb +++ b/spec/runners/unified/support_operations.rb @@ -20,6 +20,9 @@ def run_command(op) if read_preference = args.use('readPreference') opts[:read] = ::Utils.snakeize_hash(read_preference) end + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end database.command(cmd, **opts) end @@ -129,14 +132,20 @@ def assert_session_transaction_state(op) def commit_transaction(op) session = entities.get(:session, op.use!('object')) - assert_no_arguments(op) - session.commit_transaction + opts = {} + use_arguments(op) do |args| + opts[:timeout_ms] = args.use('timeoutMS') + end + session.commit_transaction(opts.compact) end def abort_transaction(op) session = entities.get(:session, op.use!('object')) - assert_no_arguments(op) - session.abort_transaction + opts = {} + use_arguments(op) do |args| + opts[:timeout_ms] = args.use('timeoutMS') + end + session.abort_transaction(opts.compact) end def with_transaction(op) @@ -311,6 +320,36 @@ def assert_number_connections_checked_out(op) private + # @param [ UsingHash ] args the arguments to extract options from + # @param [ Array ] keys an array of strings and Hashes, + # where Hashes represent a mapping from the MDB key to the correspoding + # Ruby key. For Strings, the Ruby key is assumed to be a simple conversion + # of the MDB key, from camel-case to snake-case. + # @param [ true | false ] allow_extra whether or not extra keys are allowed + # to exist in the args hash, beyond those listed. + def extract_options(args, *keys, allow_extra: false) + {}.tap do |opts| + keys.each do |key| + Array(key).each do |mdb_key, ruby_key| + value = args.use(mdb_key) + opts[ruby_key || mdb_name_to_ruby(mdb_key)] = value unless value.nil? + end + end + + raise NotImplementedError, "unhandled keys: #{args}" if !allow_extra && !args.empty? + end + end + + def symbolize_options!(opts, *keys) + keys.each do |key| + opts[key] = mdb_name_to_ruby(opts[key]) if opts[key] + end + end + + def mdb_name_to_ruby(name) + name.to_s.gsub(/([a-z])([A-Z])/) { "#{$1}_#{$2}" }.downcase.to_sym + end + def assert_no_arguments(op) if op.key?('arguments') raise NotimplementedError, "Arguments are not allowed" diff --git a/spec/runners/unified/test.rb b/spec/runners/unified/test.rb index 114088f693..32b0ba0a82 100644 --- a/spec/runners/unified/test.rb +++ b/spec/runners/unified/test.rb @@ -2,6 +2,7 @@ # rubocop:todo all require 'runners/crud/requirement' +require 'runners/unified/ambiguous_operations' require 'runners/unified/client_side_encryption_operations' require 'runners/unified/crud_operations' require 'runners/unified/grid_fs_operations' @@ -17,6 +18,7 @@ module Unified class Test + include AmbiguousOperations include ClientSideEncryptionOperations include CrudOperations include GridFsOperations @@ -42,12 +44,14 @@ def initialize(spec, **opts) if req = @spec['group_runOnRequirements'] @group_reqs = req.map { |r| Mongo::CRUD::Requirement.new(r) } end - mongoses = @spec['createEntities'].select do |spec| - spec['client'] - end.map do |spec| - spec['client']['useMultipleMongoses'] - end.compact.uniq - @multiple_mongoses = mongoses.any? { |v| v } + if @spec['createEntities'] + mongoses = @spec['createEntities'].select do |spec| + spec['client'] + end.map do |spec| + spec['client']['useMultipleMongoses'] + end.compact.uniq + @multiple_mongoses = mongoses.any? { |v| v } + end @test_spec.freeze @subscribers = {} @observe_sensitive = {} @@ -85,6 +89,8 @@ def create_spec_entities end def generate_entities(es) + return if es.nil? + es.each do |entity_spec| unless entity_spec.keys.length == 1 raise NotImplementedError, "Entity must have exactly one key" @@ -329,7 +335,7 @@ def set_initial_data begin collection.create(create_options) rescue Mongo::Error => e - if Mongo::Error::OperationFailure === e && ( + if Mongo::Error::OperationFailure::Family === e && ( e.code == 48 || e.message =~ /collection already exists/ ) # Already exists @@ -411,10 +417,16 @@ def execute_operation(op) public_send(method_name, op) rescue Mongo::Error, bson_error, Mongo::Auth::Unauthorized, ArgumentError => e + if expected_error.use('isTimeoutError') + unless Mongo::Error::TimeoutError === e + raise e + raise Error::ErrorMismatch, %Q,Expected TimeoutError ("isTimeoutError") but got #{e}, + end + end if expected_error.use('isClientError') # isClientError doesn't actually mean a client error. # It means anything other than OperationFailure. DRIVERS-1799 - if Mongo::Error::OperationFailure === e + if Mongo::Error::OperationFailure::Family === e raise Error::ErrorMismatch, %Q,Expected not OperationFailure ("isClientError") but got #{e}, end end @@ -482,7 +494,7 @@ def execute_operation(op) if result.nil? && expected_result.keys == ["$$unsetOrMatches"] return elsif result.nil? && !expected_result.empty? - raise Error::ResultMismatch, "#{msg}: expected #{expected} but got nil" + raise Error::ResultMismatch, "expected #{expected_result} but got nil" elsif Array === expected_result assert_documents_match(result, expected_result) else @@ -536,7 +548,7 @@ def kill_sessions root_authorized_client.command( killAllSessions: [], ) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e if e.code == 11601 # operation was interrupted, ignore. SERVER-38335 elsif e.code == 13 diff --git a/spec/shared b/spec/shared index cee4bc0264..e582b57f1e 160000 --- a/spec/shared +++ b/spec/shared @@ -1 +1 @@ -Subproject commit cee4bc02649a573c8256b0505c1d23f503ac2609 +Subproject commit e582b57f1e33c967e360fbe554c3b19272bd9915 diff --git a/spec/solo/clean_exit_spec.rb b/spec/solo/clean_exit_spec.rb index 80e80fd030..f26e7307b0 100644 --- a/spec/solo/clean_exit_spec.rb +++ b/spec/solo/clean_exit_spec.rb @@ -17,6 +17,8 @@ it 'exits cleanly' do client = Mongo::Client.new(uri) client.database.collection_names.to_a + ensure + client.close end end end diff --git a/spec/spec_tests/client_side_operations_timeout_spec.rb b/spec/spec_tests/client_side_operations_timeout_spec.rb new file mode 100644 index 0000000000..ac5cf53495 --- /dev/null +++ b/spec/spec_tests/client_side_operations_timeout_spec.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'runners/unified' + +base = "#{CURRENT_PATH}/spec_tests/data/client_side_operations_timeout" +CSOT_TESTS = Dir.glob("#{base}/**/*.yml").sort + +describe 'CSOT unified spec tests' do + if [ 1, '1', 'yes', 'true' ].include?(ENV['CSOT_SPEC_TESTS']) + define_unified_spec_tests(base, CSOT_TESTS) + else + skip 'CSOT spec tests are disabled. To enable them set env variable CSOT_SPEC_TESTS to 1' + end +end diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Aggregate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Aggregate.yml index 93ad199511..24d74ca7c2 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Aggregate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -238,4 +240,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Correctness.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Correctness.yml index d60b7b6201..99d1f8a59f 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Correctness.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -419,4 +421,4 @@ tests: filter: { encryptedDate: { $gte: { $numberDouble: "0" } }} result: # expect an error mongocryptd. - errorContains: "value type is a date" \ No newline at end of file + errorContains: "value type is a date" diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Delete.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Delete.yml index 0b969fd48c..c587857e8e 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Delete.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -179,4 +181,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-FindOneAndUpdate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-FindOneAndUpdate.yml index 76bfe7ea25..5c1299db79 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-FindOneAndUpdate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -236,4 +238,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-InsertFind.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-InsertFind.yml index e978b9de59..707c66b255 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-InsertFind.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -232,4 +234,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Update.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Update.yml index fe7d050f1b..a2a6928f48 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Update.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Date-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -122,7 +124,7 @@ tests: escCollection: "enxcol_.default.esc" ecocCollection: "enxcol_.default.ecoc" <<: *encrypted_fields - "$db": "default" + "$db": "default" outcome: collection: @@ -249,4 +251,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Aggregate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Aggregate.yml index 0926988d27..8e6a4d0efb 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Aggregate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Aggregate.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -1684,4 +1686,4 @@ tests: } ] } - \ No newline at end of file + diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Correctness.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Correctness.yml index 1961ed3e5f..e873c87452 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Correctness.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Correctness.yml @@ -8,6 +8,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -290,4 +292,4 @@ tests: sort: { _id: 1 } result: # expect an error from libmongocrypt. - errorContains: "field type is not supported" \ No newline at end of file + errorContains: "field type is not supported" diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Delete.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Delete.yml index bdb10e2d88..1811f7f352 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Delete.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Delete.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -902,4 +904,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-FindOneAndUpdate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-FindOneAndUpdate.yml index defccea0aa..7c23564e90 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-FindOneAndUpdate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-FindOneAndUpdate.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -1681,4 +1683,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-InsertFind.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-InsertFind.yml index 51abaa6423..50b597a2e8 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-InsertFind.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-InsertFind.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -1677,4 +1679,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Update.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Update.yml index e79fd082ec..ad219410a2 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Update.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Decimal-Update.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -1694,4 +1696,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Aggregate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Aggregate.yml index 08f4a380cf..d7ee0faec2 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Aggregate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -326,4 +328,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Correctness.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Correctness.yml index 18252b4bbe..87c4ab797e 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Correctness.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -421,4 +423,4 @@ tests: sort: { _id: 1 } result: # expect an error from libmongocrypt. - errorContains: "field type is not supported" \ No newline at end of file + errorContains: "field type is not supported" diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Delete.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Delete.yml index ade385d202..d89c61919f 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Delete.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -223,4 +225,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml index 7100d58886..7e7f5d7f50 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-InsertFind.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-InsertFind.yml index 32785d6589..f80ad9bab8 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-InsertFind.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -316,4 +318,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Update.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Update.yml index eedd076084..07256f7649 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Update.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DecimalPrecision-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -333,4 +335,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Aggregate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Aggregate.yml index 4fb95343b0..938f262740 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Aggregate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -910,4 +912,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Correctness.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Correctness.yml index 7289bb24dc..64adaf433d 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Correctness.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -289,4 +291,4 @@ tests: sort: { _id: 1 } result: # expect an error from libmongocrypt. - errorContains: "field type is not supported" \ No newline at end of file + errorContains: "field type is not supported" diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Delete.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Delete.yml index 2f42c1da10..6df3913386 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Delete.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -515,4 +517,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-FindOneAndUpdate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-FindOneAndUpdate.yml index d6573ff862..97c67833a6 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-FindOneAndUpdate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -908,4 +910,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-InsertFind.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-InsertFind.yml index 0122ba243c..d98c4b3c59 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-InsertFind.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -904,4 +906,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Update.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Update.yml index 176db3971e..1319286ce3 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Update.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Double-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -921,4 +923,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Aggregate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Aggregate.yml index 134003bf9c..c20d7a3432 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Aggregate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -322,4 +324,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Correctness.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Correctness.yml index a8fc4ec2a4..77ebd5360f 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Correctness.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -421,4 +423,4 @@ tests: sort: { _id: 1 } result: # expect an error from libmongocrypt. - errorContains: "field type is not supported" \ No newline at end of file + errorContains: "field type is not supported" diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Delete.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Delete.yml index a6f83da786..58bc62d453 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Delete.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -221,4 +223,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml index 5def8d287d..68b1381abd 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -320,4 +322,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-InsertFind.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-InsertFind.yml index 8900f79a86..1bfb40cf3c 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-InsertFind.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -316,4 +318,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Update.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Update.yml index 3e31f40181..d0f539aa47 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Update.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-DoublePrecision-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -335,4 +337,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Aggregate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Aggregate.yml index 13c350ea5f..92abb0e26d 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Aggregate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -238,4 +240,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Correctness.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Correctness.yml index 1e7d5d47fc..aba992d920 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Correctness.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -420,4 +422,4 @@ tests: filter: { encryptedInt: { $gte: { $numberDouble: "0" } }} result: # expect an error from libmongocrypt. - errorContains: "field type is not supported" \ No newline at end of file + errorContains: "field type is not supported" diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Delete.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Delete.yml index ab1e9d2e5e..ca7f0076ea 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Delete.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -179,4 +181,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-FindOneAndUpdate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-FindOneAndUpdate.yml index a33a5120a5..853aaec5b7 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-FindOneAndUpdate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -236,4 +238,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-InsertFind.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-InsertFind.yml index 4ef8c8e520..b2d7063071 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-InsertFind.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -232,4 +234,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Update.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Update.yml index cf5716dab0..a5cbde9069 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Update.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Int-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -251,4 +253,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Aggregate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Aggregate.yml index cb5e42c158..55edd83240 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Aggregate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -238,4 +240,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Correctness.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Correctness.yml index a7a33e274e..04a90ac51f 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Correctness.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -419,4 +421,4 @@ tests: filter: { encryptedLong: { $gte: { $numberDouble: "0" } }} result: # expect an error from libmongocrypt. - errorContains: "field type is not supported" \ No newline at end of file + errorContains: "field type is not supported" diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Delete.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Delete.yml index 8dd1603f33..32c5c37228 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Delete.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -179,4 +181,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-FindOneAndUpdate.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-FindOneAndUpdate.yml index 0641988b98..a569efd78d 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-FindOneAndUpdate.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -236,4 +238,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-InsertFind.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-InsertFind.yml index 076670d49e..67c314a397 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-InsertFind.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -232,4 +234,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Update.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Update.yml index 0aad7c4416..d26e70e6ca 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Update.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-Long-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] @@ -251,4 +253,4 @@ tests: } } ] - } \ No newline at end of file + } diff --git a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-WrongType.yml b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-WrongType.yml index b44e8c5055..496a0119df 100644 --- a/spec/spec_tests/data/client_side_encryption/fle2v2-Range-WrongType.yml +++ b/spec/spec_tests/data/client_side_encryption/fle2v2-Range-WrongType.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # TODO: RUBY-3423 + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/spec/spec_tests/data/client_side_operations_timeout/bulkWrite.yml b/spec/spec_tests/data/client_side_operations_timeout/bulkWrite.yml new file mode 100644 index 0000000000..0459dbbc16 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/bulkWrite.yml @@ -0,0 +1,87 @@ +description: "timeoutMS behaves correctly for bulkWrite operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + uriOptions: + # Used to speed up the test + w: 1 + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Test that drivers do not refresh timeoutMS between commands. This is done by running a bulkWrite that will require + # two commands with timeoutMS=200 and blocking each command for 120ms. The server should take over 200ms total, so the + # bulkWrite should fail with a timeout error. + - description: "timeoutMS applied to entire bulkWrite, not individual commands" + operations: + # Do an operation without a timeout to ensure the servers are discovered. + - name: insertOne + object: *collection + arguments: + document: {} + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert", "update"] + blockConnection: true + blockTimeMS: 120 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + - replaceOne: + filter: { _id: 1 } + replacement: { x: 1 } + timeoutMS: 200 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/spec/spec_tests/data/client_side_operations_timeout/change-streams.yml b/spec/spec_tests/data/client_side_operations_timeout/change-streams.yml new file mode 100644 index 0000000000..683c30674f --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/change-streams.yml @@ -0,0 +1,358 @@ +description: "timeoutMS behaves correctly for change streams" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + # Drivers are not required to execute killCursors during resume attempts, so it should be ignored for command + # monitoring assertions. + ignoreCommandMonitoringEvents: ["killCursors"] + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + - description: "error if maxAwaitTimeMS is greater than timeoutMS" + operations: + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + timeoutMS: 5 + maxAwaitTimeMS: 10 + expectError: + isClientError: true + + - description: "error if maxAwaitTimeMS is equal to timeoutMS" + operations: + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + timeoutMS: 5 + maxAwaitTimeMS: 5 + expectError: + isClientError: true + + - description: "timeoutMS applied to initial aggregate" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 55 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + timeoutMS: 50 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + + # If maxAwaitTimeMS is not set, timeoutMS should be refreshed for the getMore and the getMore should not have a + # maxTimeMS field. This test requires a high timeout because the server applies a default 1000ms maxAwaitTime. To + # ensure that the driver is refreshing the timeout between commands, the test blocks aggregate and getMore commands + # for 30ms each and creates/iterates a change stream with timeoutMS=1050. The initial aggregate will block for 30ms + # and the getMore will block for 1030ms. + - description: "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate", "getMore"] + blockConnection: true + blockTimeMS: 30 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + timeoutMS: 1050 + saveResultAsEntity: &changeStream changeStream + - name: iterateOnce + object: *changeStream + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + + # If maxAwaitTimeMS is set, timeoutMS should still be refreshed for the getMore and the getMore command should have a + # maxTimeMS field. + - description: "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate", "getMore"] + blockConnection: true + # was 15, changed to 30 to account for jruby driver latency. + blockTimeMS: 30 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + # was 20, changed to 29 to account for native ruby driver latency. + # Changed again to 59 to account for additional jruby driver latency. + # The idea for this test is that each operation is delayed by 15ms + # (by failpoint). the timeout for each operation is set to (originally) + # 20ms, because if timeoutMS was not refreshed for getMore, it would timeout. + # However, we're tickling the 20ms timeout because the driver itself + # is taking more than 5ms to do its thing. + # + # Changing the blockTimeMS in the failpoint to 30ms, and then bumping + # the timeout to almost twice that (59ms) should give us the same + # effect in the test. + timeoutMS: 59 + batchSize: 2 + maxAwaitTimeMS: 1 + saveResultAsEntity: &changeStream changeStream + - name: iterateOnce + object: *changeStream + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: 1 + + # The timeout should be applied to the entire resume attempt, not individually to each command. The test creates a + # change stream with timeoutMS=20 which returns an empty initial batch and then sets a fail point to block both + # getMore and aggregate for 12ms each and fail with a resumable error. When the resume attempt happens, the getMore + # and aggregate block for longer than 20ms total, so it times out. + - description: "timeoutMS applies to full resume attempt in a next call" + operations: + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + # Originally set to 20, but the Ruby driver was too-often taking + # that much time, and causing the timing of the test to fail. Instead, + # bumped the timout to 23ms, which is just less than twice the + # blockTimeMS for the failpoint. It still failed on jruby, so the + # timeout (and blockTimeMS) were drastically increased to accomodate + # JRuby. This tests the same thing, but gives the driver a bit more + # breathing space. + timeoutMS: 99 + saveResultAsEntity: &changeStream changeStream + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["getMore", "aggregate"] + blockConnection: true + # Originally 12, bumped it to 50 to give the jruby driver a bit + # more breathing room. + blockTimeMS: 50 + errorCode: 7 # HostNotFound - resumable but does not require an SDAM state change. + # failCommand doesn't correctly add the ResumableChangeStreamError by default. It needs to be specified + # manually here so the error is considered resumable. The failGetMoreAfterCursorCheckout fail point + # would add the label without this, but it does not support blockConnection functionality. + errorLabels: ["ResumableChangeStreamError"] + - name: iterateUntilDocumentOrError + object: *changeStream + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "change stream can be iterated again if previous iteration times out" + operations: + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + # Specify a short maxAwaitTimeMS because otherwise the getMore on the new cursor will wait for 1000ms and + # time out. + maxAwaitTimeMS: 1 + timeoutMS: 100 + saveResultAsEntity: &changeStream changeStream + # Block getMore for 150ms to force the next() call to time out. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 150 + # The original aggregate didn't return any events so this should do a getMore and return a timeout error. + - name: iterateUntilDocumentOrError + object: *changeStream + expectError: + isTimeoutError: true + # The previous iteration attempt timed out so this should re-create the change stream. We use iterateOnce rather + # than iterateUntilDocumentOrError because there haven't been any events and we only want to assert that the + # cursor was re-created. + - name: iterateOnce + object: *changeStream + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + # The iterateUntilDocumentOrError operation should send a getMore. + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + # The iterateOnce operation should re-create the cursor via an aggregate and then send a getMore to iterate + # the new cursor. + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The createChangeStream operation + # sets timeoutMS=10 and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + - description: "timeoutMS is refreshed for getMore - failure" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + # blockTimeMS: 15 + # Increase timeout + blockTimeMS: 30 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + # timeoutMS: 10 + # Increase timeout + timeoutMS: 20 + saveResultAsEntity: &changeStream changeStream + # The first iteration should do a getMore + - name: iterateUntilDocumentOrError + object: *changeStream + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + # The iterateUntilDocumentOrError operation should send a getMore. + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName diff --git a/spec/spec_tests/data/client_side_operations_timeout/close-cursors.yml b/spec/spec_tests/data/client_side_operations_timeout/close-cursors.yml new file mode 100644 index 0000000000..352f602cbb --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/close-cursors.yml @@ -0,0 +1,129 @@ +description: "timeoutMS behaves correctly when closing cursors" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 0 } + - { _id: 1 } + - { _id: 2 } + +tests: + - description: "timeoutMS is refreshed for close" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 50 + - name: createFindCursor + object: *collection + arguments: + filter: {} + batchSize: 2 + timeoutMS: 20 + saveResultAsEntity: &cursor cursor + # Iterate the cursor three times. The third should do a getMore, which should fail with a timeout error. + - name: iterateUntilDocumentOrError + object: *cursor + - name: iterateUntilDocumentOrError + object: *cursor + - name: iterateUntilDocumentOrError + object: *cursor + expectError: + isTimeoutError: true + # All errors from close() are ignored, so we close the cursor here but assert that killCursors was executed + # successfully via command monitoring expectations below. + - name: close + object: *cursor + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + - commandSucceededEvent: + commandName: find + - commandStartedEvent: + commandName: getMore + - commandFailedEvent: + commandName: getMore + - commandStartedEvent: + command: + killCursors: *collectionName + # The close() operation should inherit timeoutMS from the initial find(). + maxTimeMS: { $$type: ["int", "long"] } + commandName: killCursors + - commandSucceededEvent: + commandName: killCursors + + - description: "timeoutMS can be overridden for close" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["killCursors"] + blockConnection: true + blockTimeMS: 30 + - name: createFindCursor + object: *collection + arguments: + filter: {} + batchSize: 2 + timeoutMS: 20 + saveResultAsEntity: &cursor cursor + - name: close + object: *cursor + arguments: + # timeoutMS: 40 + # Increase timeout + timeoutMS: 50 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + - commandSucceededEvent: + commandName: find + - commandStartedEvent: + command: + killCursors: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + commandName: killCursors + - commandSucceededEvent: + commandName: killCursors diff --git a/spec/spec_tests/data/client_side_operations_timeout/command-execution.yml b/spec/spec_tests/data/client_side_operations_timeout/command-execution.yml new file mode 100644 index 0000000000..400a90867a --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/command-execution.yml @@ -0,0 +1,250 @@ +description: "timeoutMS behaves correctly during command execution" + +schemaVersion: "1.9" + +runOnRequirements: + # The appName filter cannot be used to set a fail point on connection handshakes until server version 4.9 due to + # SERVER-49220/SERVER-49336. + - minServerVersion: "4.9" + # Skip load-balanced and serverless which do not support RTT measurements. + topologies: [ single, replicaset, sharded ] + serverless: forbid + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + +initialData: + # The corresponding entities for the collections defined here are created in test-level createEntities operations. + # This is done so that tests can set fail points that will affect all of the handshakes and heartbeats done by a + # client. The collection and database names are listed here so that the collections will be dropped and re-created at + # the beginning of each test. + - collectionName: ®ularCollectionName coll + databaseName: &databaseName test + documents: [] + - collectionName: &timeoutCollectionName timeoutColl + databaseName: &databaseName test + documents: [] + +tests: + - description: "maxTimeMS value in the command is less than timeoutMS" + operations: + # Artificially increase the server RTT to ~50ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: ["hello", "isMaster"] + appName: &appName reduceMaxTimeMSTest + blockConnection: true + blockTimeMS: 50 + # Create a client with the app name specified in the fail point and timeoutMS higher than blockTimeMS. + # Also create database and collection entities derived from the new client. + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + uriOptions: + appName: *appName + w: 1 # Override server's w:majority default to speed up the test. + timeoutMS: 500 + heartbeatFrequencyMS: 500 + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &timeoutCollection timeoutCollection + database: *database + collectionName: *timeoutCollectionName + # Do an operation with a large timeout to ensure the servers are discovered. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 1 } + timeoutMS: 100000 + # Wait until short-circuiting has been enabled (at least 2 RTT measurements). + - name: wait + object: testRunner + arguments: + ms: 1000 + # Do an operation with timeoutCollection so the event will include a maxTimeMS field. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 2 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + maxTimeMS: { $$lte: 450 } + + - description: "command is not sent if RTT is greater than timeoutMS" + operations: + # Artificially increase the server RTT to ~50ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: ["hello", "isMaster"] + appName: &appName rttTooHighTest + blockConnection: true + blockTimeMS: 50 + # Create a client with the app name specified in the fail point. Also create database and collection entities + # derived from the new client. There is one collection entity with no timeoutMS and another with a timeoutMS + # that's lower than the fail point's blockTimeMS value. + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + uriOptions: + appName: *appName + w: 1 # Override server's w:majority default to speed up the test. + timeoutMS: 10 + heartbeatFrequencyMS: 500 + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &timeoutCollection timeoutCollection + database: *database + collectionName: *timeoutCollectionName + # Do an operation with a large timeout to ensure the servers are discovered. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 1 } + timeoutMS: 100000 + # Wait until short-circuiting has been enabled (at least 2 RTT measurements). + - name: wait + object: testRunner + arguments: + ms: 1000 + # Do an operation with timeoutCollection which will error. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 2 } + expectError: + isTimeoutError: true + # Do an operation with timeoutCollection which will error. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 3 } + expectError: + isTimeoutError: true + # Do an operation with timeoutCollection which will error. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 4 } + expectError: + isTimeoutError: true + expectEvents: + # There should only be one event, which corresponds to the first + # insertOne call. For the subsequent insertOne calls, drivers should + # fail client-side. + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + + - description: "short-circuit is not enabled with only 1 RTT measurement" + operations: + # Artificially increase the server RTT to ~300ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: ["hello", "isMaster"] + appName: &appName reduceMaxTimeMSTest + blockConnection: true + blockTimeMS: 100 + # Create a client with the app name specified in the fail point and timeoutMS lower than blockTimeMS. + # Also create database and collection entities derived from the new client. + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + uriOptions: + appName: *appName + w: 1 # Override server's w:majority default to speed up the test. + timeoutMS: 90 + heartbeatFrequencyMS: 100000 # Override heartbeatFrequencyMS to ensure only 1 RTT is recorded. + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &timeoutCollection timeoutCollection + database: *database + collectionName: *timeoutCollectionName + # Do an operation with a large timeout to ensure the servers are discovered. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 1 } + timeoutMS: 100000 + # Do an operation with timeoutCollection which will succeed. If this + # fails it indicates the driver mistakenly used the min RTT even though + # there has only been one sample. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 2 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + maxTimeMS: { $$lte: 450 } diff --git a/spec/spec_tests/data/client_side_operations_timeout/convenient-transactions.yml b/spec/spec_tests/data/client_side_operations_timeout/convenient-transactions.yml new file mode 100644 index 0000000000..050d0d514f --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/convenient-transactions.yml @@ -0,0 +1,113 @@ +description: "timeoutMS behaves correctly for the withTransaction API" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 50 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + - description: "withTransaction raises a client-side error if timeoutMS is overridden inside the callback" + operations: + - name: withTransaction + object: *session + arguments: + callback: + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + session: *session + timeoutMS: 100 + expectError: + isClientError: true + expectEvents: + # The only operation run fails with a client-side error, so there should be no events for the client. + - client: *client + events: [] + + - description: "timeoutMS is not refreshed for each operation in the callback" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + # Was 30, but JRuby was taking too long in preparing and issuing + # the operation. We now specify the timeoutMS below, and set this + # value to just more than half of it (so that two inserts will + # exceed the timeout, but one won't--or shouldn't). + blockTimeMS: 51 + - name: withTransaction + object: *session + arguments: + # Was originally not specified here, inheriting the client value of 50ms. + # That wasn't giving JRuby enough time, so we specify a larger value + # here. + timeoutMS: 100 + callback: + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + session: *session + - name: insertOne + object: *collection + arguments: + document: { _id: 2 } + session: *session + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + # Because the second insert expects an error and gets an error, it technically succeeds, so withTransaction + # will try to run commitTransaction. This will fail client-side, though, because the timeout has already + # expired, so no command is sent. + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/spec/spec_tests/data/client_side_operations_timeout/cursors.yml b/spec/spec_tests/data/client_side_operations_timeout/cursors.yml new file mode 100644 index 0000000000..0202054732 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/cursors.yml @@ -0,0 +1,70 @@ +description: "tests for timeoutMS behavior that applies to all cursor types" + +schemaVersion: "1.0" + +createEntities: + - client: + id: &client client + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + - description: "find errors if timeoutMode is set and timeoutMS is not" + operations: + - name: find + object: *collection + arguments: + filter: {} + timeoutMode: cursorLifetime + expectError: + isClientError: true + + - description: "collection aggregate errors if timeoutMode is set and timeoutMS is not" + operations: + - name: aggregate + object: *collection + arguments: + pipeline: [] + timeoutMode: cursorLifetime + expectError: + isClientError: true + + - description: "database aggregate errors if timeoutMode is set and timeoutMS is not" + operations: + - name: aggregate + object: *database + arguments: + pipeline: [] + timeoutMode: cursorLifetime + expectError: + isClientError: true + + - description: "listCollections errors if timeoutMode is set and timeoutMS is not" + operations: + - name: listCollections + object: *database + arguments: + filter: {} + timeoutMode: cursorLifetime + expectError: + isClientError: true + + - description: "listIndexes errors if timeoutMode is set and timeoutMS is not" + operations: + - name: listIndexes + object: *collection + arguments: + timeoutMode: cursorLifetime + expectError: + isClientError: true diff --git a/spec/spec_tests/data/client_side_operations_timeout/deprecated-options.yml b/spec/spec_tests/data/client_side_operations_timeout/deprecated-options.yml new file mode 100644 index 0000000000..31eeb8d089 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/deprecated-options.yml @@ -0,0 +1,3982 @@ +description: "operations ignore deprecated timeout options if timeoutMS is set" + +schemaVersion: "1.9" + +# Most tests in this file can be executed against any server version, but some tests execute operations that are only +# available on higher server versions (e.g. abortTransaction). To avoid too many special cases in templated tests, the +# min server version is set to 4.2 for all. +runOnRequirements: + - minServerVersion: "4.2" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + +initialData: + - collectionName: &collectionName coll + databaseName: &databaseName test + documents: [] + +tests: + # For each operation, run these tests: + # + # 1. socketTimeoutMS is ignored if timeoutMS is set. The test creates a client with socketTimeoutMS=1, configures and + # a failpoint to block the operation for 5ms, runs the operation with timeoutMS=10000, and expects it to succeed. + # + # 2. wTimeoutMS is ignored if timeoutMS is set. The test creates a client with wTimeoutMS=1, runs the operation with + # timeoutMS=10000, expects the operation to succeed, and uses command monitoring expectations to assert that the + # command sent to the server does not contain a writeConcern field. + # + # 3. If the operation supports maxTimeMS, it ignores maxTimeMS if timeoutMS is set. The test executes the operation + # with timeoutMS=1000 and maxTimeMS=5000. It expects the operation to succeed and uses command monitoring expectations + # to assert that the actual maxTimeMS value sent was less than or equal to 100, thereby asserting that it was + # actually derived from timeoutMS. + + # Tests for commitTransaction. These are not included in the operations loop because the tests need to execute + # additional "startTransaction" and "insertOne" operations to establish a server-side transaction. There is also one + # additional test to assert that maxCommitTimeMS is ignored if timeoutMS is set. + + - description: "commitTransaction ignores socketTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + # This test uses 20 instead of 1 like other tests because socketTimeoutMS also applies to the + # operation done to start the server-side transaction and it needs time to succeed. + socketTimeoutMS: 20 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 5 + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: commitTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "commitTransaction ignores wTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: commitTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "commitTransaction ignores maxCommitTimeMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + sessionOptions: + defaultTransactionOptions: + maxCommitTimeMS: 5000 + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: commitTransaction + object: *session + arguments: + timeoutMS: &timeoutMS 1000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + # Assert that the final maxTimeMS field is derived from timeoutMS, not maxCommitTimeMS. + maxTimeMS: { $$lte: *timeoutMS } + + # Tests for abortTransaction. These are not included in the operations loop because the tests need to execute + # additional "startTransaction" and "insertOne" operations to establish a server-side transaction. + + - description: "abortTransaction ignores socketTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + # This test uses 20 instead of 1 like other tests because socketTimeoutMS also applies to the + # operation done to start the server-side transaction and it needs time to succeed. + socketTimeoutMS: 20 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 5 + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: abortTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "abortTransaction ignores wTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: abortTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + # Tests for withTransaction. These are not included in the operations loop because the command monitoring + # expectations contain multiple commands. There is also one additional test to assert that maxCommitTimeMS is ignored + # if timeoutMS is set. + + - description: "withTransaction ignores socketTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + # This test uses 20 instead of 1 like other tests because socketTimeoutMS also applies to the + # operation done to start the server-side transaction and it needs time to succeed. + socketTimeoutMS: 20 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 5 + - name: withTransaction + object: *session + arguments: + timeoutMS: 10000 + callback: + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "withTransaction ignores wTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: withTransaction + object: *session + arguments: + timeoutMS: 10000 + callback: + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "withTransaction ignores maxCommitTimeMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + sessionOptions: + defaultTransactionOptions: + maxCommitTimeMS: 5000 + - name: withTransaction + object: *session + arguments: + timeoutMS: &timeoutMS 1000 + callback: + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + # Assert that the final maxTimeMS field is derived from timeoutMS, not maxCommitTimeMS. + maxTimeMS: { $$lte: *timeoutMS } + + # Tests for operations that can be generated. + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 5 + - name: listDatabases + object: *client + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listDatabases + object: *client + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 5 + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: createChangeStream + object: *client + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createChangeStream + object: *client + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: aggregate + object: *database + arguments: + timeoutMS: 100000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *database + arguments: + timeoutMS: 100000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *database + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 5 + - name: listCollections + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listCollections + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 5 + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 5 + - name: runCommand + object: *database + arguments: + timeoutMS: 100000 + command: { ping: 1 } + commandName: ping + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: runCommand + object: *database + arguments: + timeoutMS: 100000 + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: createChangeStream + object: *database + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createChangeStream + object: *database + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: aggregate + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 5 + - name: count + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: count + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: count + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: countDocuments + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: countDocuments + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 5 + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 5 + - name: distinct + object: *collection + arguments: + timeoutMS: 100000 + fieldName: x + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: distinct + object: *collection + arguments: + timeoutMS: 100000 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: distinct + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 5 + - name: find + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: find + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: find + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 5 + - name: findOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOne + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: listIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 5 + - name: insertOne + object: *collection + arguments: + timeoutMS: 100000 + document: { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: insertOne + object: *collection + arguments: + timeoutMS: 100000 + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 5 + - name: insertMany + object: *collection + arguments: + timeoutMS: 100000 + documents: + - { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: insertMany + object: *collection + arguments: + timeoutMS: 100000 + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 5 + - name: deleteOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: deleteOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 5 + - name: deleteMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: deleteMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 5 + - name: replaceOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: replaceOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 5 + - name: updateOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: updateOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 5 + - name: updateMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: updateMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 5 + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 5 + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 5 + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 5 + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 100000 + requests: + - insertOne: + document: { _id: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 100000 + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: createIndex + object: *collection + arguments: + timeoutMS: 100000 + keys: { x: 1 } + name: "x_1" + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createIndex + object: *collection + arguments: + timeoutMS: 100000 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createIndex + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: dropIndex + object: *collection + arguments: + timeoutMS: 100000 + name: "x_1" + + expectError: + isClientError: false + isTimeoutError: false + + - description: "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: dropIndex + object: *collection + arguments: + timeoutMS: 100000 + name: "x_1" + + expectError: + isClientError: false + isTimeoutError: false + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: dropIndex + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + name: "x_1" + + expectError: + isClientError: false + isTimeoutError: false + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: dropIndexes + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + diff --git a/spec/spec_tests/data/client_side_operations_timeout/error-transformations.yml b/spec/spec_tests/data/client_side_operations_timeout/error-transformations.yml new file mode 100644 index 0000000000..7bff4776a8 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/error-transformations.yml @@ -0,0 +1,96 @@ +description: "MaxTimeMSExpired server errors are transformed into a custom timeout error" + +schemaVersion: "1.9" + +# failCommand is available on 4.0 for replica sets and 4.2 for sharded clusters. +runOnRequirements: + - minServerVersion: "4.0" + topologies: ["replicaset"] + - minServerVersion: "4.2" + topologies: ["sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # A server response like {ok: 0, code: 50, ...} is transformed. + - description: "basic MaxTimeMSExpired error is transformed" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + errorCode: 50 + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + + # A server response like {ok: 1, writeConcernError: {code: 50, ...}} is transformed. + - description: "write concern error MaxTimeMSExpired is transformed" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + writeConcernError: + code: 50 + errmsg: "maxTimeMS expired" + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/spec/spec_tests/data/client_side_operations_timeout/global-timeoutMS.yml b/spec/spec_tests/data/client_side_operations_timeout/global-timeoutMS.yml new file mode 100644 index 0000000000..7b4a78ac78 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/global-timeoutMS.yml @@ -0,0 +1,3236 @@ +# Tests in this file are generated from global-timeoutMS.yml.template. + +description: "timeoutMS can be configured on a MongoClient" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + +initialData: + - collectionName: &collectionName coll + databaseName: &databaseName test + documents: [] + +tests: + # For each operation, we execute two tests: + # + # 1. timeoutMS can be configured to a non-zero value on a MongoClient and is inherited by the operation. Each test + # constructs a client entity with timeoutMS=250 and configures a fail point to block the operation for 350ms so + # execution results in a timeout error. + # + # 2. timeoutMS can be set to 0 for a MongoClient. Each test constructs a client entity with timeoutMS=0 and + # configures a fail point to block the operation for 15ms. The tests expect the operation to succeed and the command + # sent to not contain a maxTimeMS field. + + - description: "timeoutMS can be configured on a MongoClient - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 350 + - name: listDatabases + object: *client + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabases + object: *client + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 350 + - name: listDatabaseNames + object: *client + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabaseNames + object: *client + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 350 + - name: listCollections + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 350 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 350 + - name: runCommand + object: *database + arguments: + command: { ping: 1 } + commandName: ping + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 350 + - name: count + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 350 + - name: estimatedDocumentCount + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 350 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 350 + - name: find + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 350 + - name: findOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: listIndexes + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: listIndexNames + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 350 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 350 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 350 + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 350 + - name: deleteMany + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 350 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 350 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 350 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 350 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 350 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 350 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 350 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: dropIndex + object: *collection + arguments: + name: "x_1" + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndex + object: *collection + arguments: + name: "x_1" + + expectError: + isClientError: false + isTimeoutError: false + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: dropIndexes + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + \ No newline at end of file diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-advanced.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-advanced.yml new file mode 100644 index 0000000000..b03812b719 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-advanced.yml @@ -0,0 +1,207 @@ +description: "timeoutMS behaves correctly for advanced GridFS API operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: + - _id: &fileDocumentId { $oid: "000000000000000000000005" } + length: 8 + chunkSize: 4 + uploadDate: { $date: "1970-01-01T00:00:00.000Z" } + filename: "length-8" + contentType: "application/octet-stream" + aliases: [] + metadata: {} + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: + - _id: { $oid: "000000000000000000000005" } + files_id: *fileDocumentId + n: 0 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + - _id: { $oid: "000000000000000000000006" } + files_id: *fileDocumentId + n: 1 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + +tests: + # Tests for the "rename" operation. + # Ruby driver does not support rename for GridFS bucket + + # - description: "timeoutMS can be overridden for a rename" + # operations: + # - name: failPoint + # object: testRunner + # arguments: + # client: *failPointClient + # failPoint: + # configureFailPoint: failCommand + # mode: { times: 1 } + # data: + # failCommands: ["update"] + # blockConnection: true + # blockTimeMS: 100 + # - name: rename + # object: *bucket + # arguments: + # id: *fileDocumentId + # newFilename: "foo" + # timeoutMS: 2000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 2000ms should let it succeed. + # expectEvents: + # - client: *client + # events: + # - commandStartedEvent: + # commandName: update + # databaseName: *databaseName + # command: + # update: *filesCollectionName + # maxTimeMS: { $$type: ["int", "long"] } + + # - description: "timeoutMS applied to update during a rename" + # operations: + # - name: failPoint + # object: testRunner + # arguments: + # client: *failPointClient + # failPoint: + # configureFailPoint: failCommand + # mode: { times: 1 } + # data: + # failCommands: ["update"] + # blockConnection: true + # blockTimeMS: 100 + # - name: rename + # object: *bucket + # arguments: + # id: *fileDocumentId + # newFilename: "foo" + # expectError: + # isTimeoutError: true + # expectEvents: + # - client: *client + # events: + # - commandStartedEvent: + # commandName: update + # databaseName: *databaseName + # command: + # update: *filesCollectionName + # maxTimeMS: { $$type: ["int", "long"] } + + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents + # assertions as these assertions reduce test robustness and can cause flaky failures. + + - description: "timeoutMS can be overridden for drop" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 100 + - name: drop + object: *bucket + arguments: + timeoutMS: 2000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 2000ms should let it succeed. + + - description: "timeoutMS applied to files collection drop" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 100 + - name: drop + object: *bucket + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: drop + databaseName: *databaseName + command: + drop: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to chunks collection drop" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: + # Skip the drop for the files collection. + skip: 1 + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 100 + - name: drop + object: *bucket + expectError: + isTimeoutError: true + + - description: "timeoutMS applied to drop as a whole, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 50 + - name: drop + object: *bucket + expectError: + isTimeoutError: true diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-delete.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-delete.yml new file mode 100644 index 0000000000..9c72537c38 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-delete.yml @@ -0,0 +1,152 @@ +description: "timeoutMS behaves correctly for GridFS delete operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: + - _id: &fileDocumentId { $oid: "000000000000000000000005" } + length: 8 + chunkSize: 4 + uploadDate: { $date: "1970-01-01T00:00:00.000Z" } + filename: "length-8" + contentType: "application/octet-stream" + aliases: [] + metadata: {} + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: + - _id: { $oid: "000000000000000000000005" } + files_id: *fileDocumentId + n: 0 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + - _id: { $oid: "000000000000000000000006" } + files_id: *fileDocumentId + n: 1 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + +tests: + - description: "timeoutMS can be overridden for delete" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 100 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + timeoutMS: 1000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 1000ms should let it succeed. + + - description: "timeoutMS applied to delete against the files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 100 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to delete against the chunks collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: + # The first "delete" will be against the files collection, so we skip it. + skip: 1 + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 100 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + + # Test that drivers are not refreshing the timeout between commands. We test this by blocking both "delete" commands + # for 50ms each. The delete should inherit timeoutMS=75 from the client/database and the server takes over 75ms + # total, so the operation should fail. + - description: "timeoutMS applied to entire delete, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 50 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-download.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-download.yml new file mode 100644 index 0000000000..772ffd6e08 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-download.yml @@ -0,0 +1,182 @@ +description: "timeoutMS behaves correctly for GridFS download operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: + - _id: &fileDocumentId { $oid: "000000000000000000000005" } + length: 8 + chunkSize: 4 + uploadDate: { $date: "1970-01-01T00:00:00.000Z" } + filename: "length-8" + contentType: "application/octet-stream" + aliases: [] + metadata: {} + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: + - _id: { $oid: "000000000000000000000005" } + files_id: *fileDocumentId + n: 0 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + - _id: { $oid: "000000000000000000000006" } + files_id: *fileDocumentId + n: 1 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + +tests: + - description: "timeoutMS can be overridden for download" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + timeoutMS: 1000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 1000ms should let it succeed. + + - description: "timeoutMS applied to find to get files document" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to find to get chunks" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: + # The first "find" will be against the files collection, so we skip it. + skip: 1 + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *chunksCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + # Test that drivers are not refreshing the timeout between commands. We test this by blocking both "find" commands + # for 50ms each. The download should inherit timeoutMS=75 from the client/database and the server takes over 75ms + # total, so the operation should fail. + - description: "timeoutMS applied to entire download, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 50 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *chunksCollectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-find.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-find.yml new file mode 100644 index 0000000000..000150ae67 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-find.yml @@ -0,0 +1,100 @@ +description: "timeoutMS behaves correctly for GridFS find operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: [] + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: [] + +tests: + - description: "timeoutMS can be overridden for a find" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: find + object: *bucket + arguments: + filter: {} + timeoutMS: 1000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 1000ms should let it succeed. + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to find command" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: find + object: *bucket + arguments: + filter: {} + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-upload.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-upload.yml new file mode 100644 index 0000000000..51e1366878 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-upload.yml @@ -0,0 +1,249 @@ +description: "timeoutMS behaves correctly for GridFS upload operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: [] + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: [] + +tests: + # Many tests in this file do not specify command monitoring expectations because GridFS uploads internally do a + # number of operations, so expecting an exact set of commands can cause flaky failures. + + - description: "timeoutMS can be overridden for upload" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + timeoutMS: 1000 + + # On the first write to the bucket, drivers check if the files collection is empty to see if indexes need to be + # created. + - description: "timeoutMS applied to initial find on files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # On the first write to the bucket, drivers check if the files collection has the correct indexes. + - description: "timeoutMS applied to listIndexes on files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # If the files collection is empty when the first write to the bucket occurs, drivers attempt to create an index + # on the bucket's files collection. + - description: "timeoutMS applied to index creation for files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # On the first write to the bucket, drivers check if the chunks collection has the correct indexes. + - description: "timeoutMS applied to listIndexes on chunks collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # The first listIndexes will be on the files collection, so we skip it. + mode: { skip: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # If the files collection is empty when the first write to the bucket occurs, drivers attempt to create an index + # on the bucket's chunks collection. + - description: "timeoutMS applied to index creation for chunks collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # This index is created after the one on the files collection, so we skip the first createIndexes command + # and target the second. + mode: { skip: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + - description: "timeoutMS applied to chunk insertion" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + - description: "timeoutMS applied to creation of files document" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Skip the insert to upload the chunk. Because the whole file fits into one chunk, the second insert will + # be the files document upload. + mode: { skip: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # Test that drivers apply timeoutMS to the entire upload rather than refreshing it between individual commands. We + # test this by blocking the "find" and "listIndexes" commands for 50ms each and performing an upload. The upload + # should inherit timeoutMS=75 from the client/database and the server takes over 75ms total, so the operation should + # fail. + - description: "timeoutMS applied to upload as a whole, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "listIndexes"] + blockConnection: true + blockTimeMS: 50 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true diff --git a/spec/spec_tests/data/client_side_operations_timeout/legacy-timeouts.yml b/spec/spec_tests/data/client_side_operations_timeout/legacy-timeouts.yml new file mode 100644 index 0000000000..81c48f7c4f --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/legacy-timeouts.yml @@ -0,0 +1,204 @@ +description: "legacy timeouts continue to work if timeoutMS is not set" + +schemaVersion: "1.0" + +runOnRequirements: + - minServerVersion: "4.4" + +initialData: + - collectionName: &collectionName coll + databaseName: &databaseName test + documents: [] + +tests: + - description: "socketTimeoutMS is not used to derive a maxTimeMS command field" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + uriOptions: + socketTimeoutMS: 50000 + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + + - description: "waitQueueTimeoutMS is not used to derive a maxTimeMS command field" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + uriOptions: + waitQueueTimeoutMS: 50000 + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + + - description: "wTimeoutMS is not used to derive a maxTimeMS command field" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + uriOptions: + wTimeoutMS: &wTimeoutMS 50000 + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + writeConcern: + wtimeout: *wTimeoutMS + + # If the maxTimeMS option is set for a specific command, it should be used as the maxTimeMS command field without any + # modifications. This is different from timeoutMS because in that case, drivers subtract the target server's min + # RTT from the remaining timeout to derive a maxTimeMS field. + - description: "maxTimeMS option is used directly as the maxTimeMS field on a command" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: estimatedDocumentCount + object: *collection + arguments: + maxTimeMS: &maxTimeMS 50000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: *maxTimeMS + + # Same test as above but with the maxCommitTimeMS option. + - description: "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command" + runOnRequirements: + # Note: minServerVersion is specified in top-level runOnRequirements + - topologies: ["replicaset", "sharded"] + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + sessionOptions: + defaultTransactionOptions: + maxCommitTimeMS: &maxCommitTimeMS 1000 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + session: *session + - name: commitTransaction + object: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: *maxCommitTimeMS diff --git a/spec/spec_tests/data/client_side_operations_timeout/non-tailable-cursors.yml b/spec/spec_tests/data/client_side_operations_timeout/non-tailable-cursors.yml new file mode 100644 index 0000000000..4862ba21a9 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/non-tailable-cursors.yml @@ -0,0 +1,307 @@ +description: "timeoutMS behaves correctly for non-tailable cursors" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 0 } + - { _id: 1 } + - { _id: 2 } + - collectionName: &aggregateOutputCollectionName aggregateOutputColl + databaseName: *databaseName + documents: [] + +tests: + # If timeoutMode is explicitly set to CURSOR_LIFETIME, the timeout should apply to the initial command. + # This should also be the case if timeoutMode is unset, but this is already tested in global-timeoutMS.yml. + - description: "timeoutMS applied to find if timeoutMode is cursor_lifetime" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + # changed to 30ms to accommodate jruby latencies + blockTimeMS: 30 + - name: find + object: *collection + arguments: + filter: {} + # added as a 25ms timeout to accommodate jruby latencies + timeoutMS: 25 + timeoutMode: cursorLifetime + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + + # If timeoutMode is unset, it should default to CURSOR_LIFETIME and the time remaining after the find succeeds should + # be applied to the getMore. + - description: "remaining timeoutMS applied to getMore if timeoutMode is unset" + operations: + # Block find/getMore for 15ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "getMore"] + blockConnection: true + # bumped to 50 to accommodate jruby latencies + blockTimeMS: 50 + # Run a find with timeoutMS=39 and batchSize=1 to force two batches, which will cause a find and a getMore to be + # sent. Both will block for 20ms so together they will go over the timeout. + - name: find + object: *collection + arguments: + filter: {} + # bumped to 99 to accommodate jruby latencies + timeoutMS: 99 + batchSize: 2 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + + # Same test as above, but with timeoutMode explicitly set to CURSOR_LIFETIME. + - description: "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "getMore"] + blockConnection: true + blockTimeMS: 20 + - name: find + object: *collection + arguments: + filter: {} + timeoutMode: cursorLifetime + timeoutMS: 39 + batchSize: 2 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + + # If timeoutMode=ITERATION, timeoutMS should apply to the initial find command and the command shouldn't have a + # maxTimeMS field. + - description: "timeoutMS applied to find if timeoutMode is iteration" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + timeoutMode: iteration + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + + # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither + # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=29 and both + # "find" and "getMore" commands are blocked for 15ms each. Neither exceeds the timeout, so iteration succeeds. + - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - success" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "getMore"] + blockConnection: true + # blockTimeMS: 15 + # Increase timeout + blockTimeMS: 20 + - name: find + object: *collection + arguments: + filter: {} + timeoutMode: iteration + # timeoutMS: 29 + # Increase timeout + timeoutMS: 39 + batchSize: 2 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + + # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=10 and "getMore" + # commands are blocked for 15ms, causing iteration to fail with a timeout error. + - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + timeoutMode: iteration + batchSize: 2 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + + - description: "aggregate with $out errors if timeoutMode is iteration" + operations: + - name: aggregate + object: *collection + arguments: + pipeline: + - $out: *aggregateOutputCollectionName + timeoutMS: 100 + timeoutMode: iteration + expectError: + isClientError: true + expectEvents: + - client: *client + events: [] + + - description: "aggregate with $merge errors if timeoutMode is iteration" + operations: + - name: aggregate + object: *collection + arguments: + pipeline: + - $merge: *aggregateOutputCollectionName + timeoutMS: 100 + timeoutMode: iteration + expectError: + isClientError: true + expectEvents: + - client: *client + events: [] diff --git a/spec/spec_tests/data/client_side_operations_timeout/override-collection-timeoutMS.yml b/spec/spec_tests/data/client_side_operations_timeout/override-collection-timeoutMS.yml new file mode 100644 index 0000000000..d1d1c61056 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/override-collection-timeoutMS.yml @@ -0,0 +1,1877 @@ +# Tests in this file are generated from override-collection-timeoutMS.yml.template. + +description: "timeoutMS can be overridden for a MongoCollection" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + +initialData: + - collectionName: &collectionName coll + databaseName: *databaseName + documents: [] + +tests: + # For each collection-level operation, we execute two tests: + # + # 1. timeoutMS can be overridden to a non-zero value for a MongoCollection. Each test uses the client entity defined + # above to construct a collection entity with timeoutMS=1000 and configures a fail point to block the operation for + # 15ms so the operation succeeds. + # + # 2. timeoutMS can be overridden to 0 for a MongoCollection. Each test constructs a collection entity with + # timeoutMS=0 using the global client entity and configures a fail point to block the operation for 15ms. The + # operation should succeed and the command sent to the server should not contain a maxTimeMS field. + + - description: "timeoutMS can be configured on a MongoCollection - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndex + object: *collection + arguments: + name: "x_1" + + expectError: + isClientError: false + isTimeoutError: false + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndex + object: *collection + arguments: + name: "x_1" + + expectError: + isClientError: false + isTimeoutError: false + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + \ No newline at end of file diff --git a/spec/spec_tests/data/client_side_operations_timeout/override-operation-timeoutMS.yml b/spec/spec_tests/data/client_side_operations_timeout/override-operation-timeoutMS.yml new file mode 100644 index 0000000000..28eabcb7c8 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/override-operation-timeoutMS.yml @@ -0,0 +1,1918 @@ +# Tests in this file are generated from override-operation-timeoutMS.yml.template. + +description: "timeoutMS can be overridden for an operation" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # For each level operation, we execute two tests: + # + # 1. timeoutMS can be overridden to a non-zero value for an operation. Each test executes an operation using one of + # the entities defined above with an overridden timeoutMS=1000 and configures a fail point to block the operation for + # 15ms so the operation succeeds. + # + # 2. timeoutMS can be overridden to 0 for an operation. Each test executes an operation using the entities defined + # above with an overridden timeoutMS=0 so the operation succeeds. + + - description: "timeoutMS can be configured for an operation - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabases + object: *client + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabases + object: *client + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *client + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *client + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + timeoutMS: 1000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + timeoutMS: 0 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - runCommand on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + timeoutMS: 1000 + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - runCommand on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + timeoutMS: 0 + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + timeoutMS: 1000 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + timeoutMS: 0 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listIndexNames on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listIndexNames on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + timeoutMS: 1000 + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + timeoutMS: 0 + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + timeoutMS: 1000 + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + timeoutMS: 0 + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - deleteMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - deleteMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - updateMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - updateMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 1000 + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 0 + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + timeoutMS: 1000 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + timeoutMS: 0 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - dropIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndex + object: *collection + arguments: + timeoutMS: 1000 + name: "x_1" + + expectError: + isTimeoutError: false # IndexNotFound + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - dropIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndex + object: *collection + arguments: + timeoutMS: 0 + name: "x_1" + + expectError: + isTimeoutError: false # IndexNotFound + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - dropIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - dropIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + \ No newline at end of file diff --git a/spec/spec_tests/data/client_side_operations_timeout/retryability-legacy-timeouts.yml b/spec/spec_tests/data/client_side_operations_timeout/retryability-legacy-timeouts.yml new file mode 100644 index 0000000000..abcaec6127 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/retryability-legacy-timeouts.yml @@ -0,0 +1,1676 @@ +# Tests in this file are generated from retryability-legacy-timeouts.yml.template. + +description: "legacy timeouts behave correctly for retryable operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + socketTimeoutMS: 100 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # For each retryable operation, run two tests: + # + # 1. Socket timeouts are retried once - Each test constructs a client entity with socketTimeoutMS=100, configures a + # fail point to block the operation once for 125ms, and expects the operation to succeed. + # + # 2. Operations fail after two consecutive socket timeouts - Same as (1) but the fail point is configured to block + # the operation twice and the test expects the operation to fail. + + - description: "operation succeeds after one socket timeout - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation fails after two consecutive socket timeouts - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation succeeds after one socket timeout - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation fails after two consecutive socket timeouts - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation succeeds after one socket timeout - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 125 + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - description: "operation fails after two consecutive socket timeouts - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 125 + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - description: "operation succeeds after one socket timeout - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation fails after two consecutive socket timeouts - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation succeeds after one socket timeout - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation fails after two consecutive socket timeouts - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation succeeds after one socket timeout - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation succeeds after one socket timeout - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation succeeds after one socket timeout - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation succeeds after one socket timeout - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation fails after two consecutive socket timeouts - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation succeeds after one socket timeout - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabases + object: *client + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation fails after two consecutive socket timeouts - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabases + object: *client + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation succeeds after one socket timeout - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabaseNames + object: *client + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation fails after two consecutive socket timeouts - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabaseNames + object: *client + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation succeeds after one socket timeout - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - description: "operation fails after two consecutive socket timeouts - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - description: "operation succeeds after one socket timeout - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation fails after two consecutive socket timeouts - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation succeeds after one socket timeout - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollections + object: *database + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation fails after two consecutive socket timeouts - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollections + object: *database + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation succeeds after one socket timeout - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation fails after two consecutive socket timeouts - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation succeeds after one socket timeout - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation fails after two consecutive socket timeouts - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation succeeds after one socket timeout - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation fails after two consecutive socket timeouts - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation succeeds after one socket timeout - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: count + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation fails after two consecutive socket timeouts - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: count + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation succeeds after one socket timeout - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation fails after two consecutive socket timeouts - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation succeeds after one socket timeout - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: estimatedDocumentCount + object: *collection + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: estimatedDocumentCount + object: *collection + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation succeeds after one socket timeout - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 125 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - description: "operation fails after two consecutive socket timeouts - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 125 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - description: "operation succeeds after one socket timeout - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: find + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation fails after two consecutive socket timeouts - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: find + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation succeeds after one socket timeout - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: findOne + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: findOne + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation succeeds after one socket timeout - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 125 + - name: listIndexes + object: *collection + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - description: "operation fails after two consecutive socket timeouts - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 125 + - name: listIndexes + object: *collection + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - description: "operation succeeds after one socket timeout - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation fails after two consecutive socket timeouts - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + \ No newline at end of file diff --git a/spec/spec_tests/data/client_side_operations_timeout/retryability-timeoutMS.yml b/spec/spec_tests/data/client_side_operations_timeout/retryability-timeoutMS.yml new file mode 100644 index 0000000000..6f47d6c2e4 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/retryability-timeoutMS.yml @@ -0,0 +1,2824 @@ +# Tests in this file are generated from retryability-timeoutMS.yml.template. + +description: "timeoutMS behaves correctly for retryable operations" + +schemaVersion: "1.9" + +# failCommand is available on 4.0+ replica sets and 4.2+ sharded clusters. +runOnRequirements: + - minServerVersion: "4.0" + topologies: ["replicaset"] + - minServerVersion: "4.2" + topologies: ["sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 100 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # For each retryable operation, run three tests: + # + # 1. timeoutMS applies to the whole operation, not to individual attempts - Client timeoutMS=100 and the operation is + # fails with a retryable error after being blocked server-side for 60ms. The operation should fail with a timeout error + # because the second attempt should take it over the 100ms limit. This test only runs on 4.4+ because it uses the + # blockConnection option in failCommand. + # + # 2. operation is retried multiple times if timeoutMS is set to a non-zero value - Client timeoutMS=100 and the + # operation fails with a retryable error twice. Drivers should send the original operation and two retries, the + # second of which should succeed. + # + # 3. operation is retried multiple times if timeoutMS is set to a zero - Override timeoutMS to zero for the operation + # and set a fail point to force a retryable error twice. Drivers should send the original operation and two retries, + # the second of which should succeed. + # + # The fail points in these tests use error code 7 (HostNotFound) because it is a retryable error but does not trigger + # an SDAM state change so we don't lose any time to server rediscovery. The tests also explicitly specify an + # errorLabels array in the fail point to avoid behavioral differences among server types and ensure that the error + # will be considered retryable. + + - description: "timeoutMS applies to whole operation, not individual attempts - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertOne + object: *collection + arguments: + timeoutMS: 1000 + document: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertOne + object: *collection + arguments: + timeoutMS: 0 + document: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertMany + object: *collection + arguments: + timeoutMS: 1000 + documents: + - { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertMany + object: *collection + arguments: + timeoutMS: 0 + documents: + - { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: deleteOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: deleteOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: replaceOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: replaceOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: updateOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: updateOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 1000 + requests: + - insertOne: + document: { _id: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 0 + requests: + - insertOne: + document: { _id: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listDatabases + object: *client + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabases + object: *client + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabases + object: *client + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listDatabaseNames + object: *client + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 1000 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 0 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *client + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *client + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - aggregate on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *database + arguments: + timeoutMS: 1000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *database + arguments: + timeoutMS: 0 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listCollections on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listCollections + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollections + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollections + object: *database + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *database + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *database + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - count on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: count + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: count + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: count + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: countDocuments + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: countDocuments + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: estimatedDocumentCount + object: *collection + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 1000 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 0 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - distinct on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["distinct"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: distinct + object: *collection + arguments: + timeoutMS: 1000 + fieldName: x + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["distinct"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: distinct + object: *collection + arguments: + timeoutMS: 0 + fieldName: x + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - find on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: find + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: find + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: find + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listIndexes + object: *collection + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listIndexes + object: *collection + arguments: + timeoutMS: 1000 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listIndexes + object: *collection + arguments: + timeoutMS: 0 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + \ No newline at end of file diff --git a/spec/spec_tests/data/client_side_operations_timeout/sessions-inherit-timeoutMS.yml b/spec/spec_tests/data/client_side_operations_timeout/sessions-inherit-timeoutMS.yml new file mode 100644 index 0000000000..184ef7eb9e --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/sessions-inherit-timeoutMS.yml @@ -0,0 +1,168 @@ +description: "sessions inherit timeoutMS from their parent MongoClient" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 50 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Drivers ignore errors from abortTransaction, so the tests in this file use commandSucceededEvent and + # commandFailedEvent events to assert success/failure. + + - description: "timeoutMS applied to commitTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 60 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: commitTransaction + object: *session + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: commitTransaction + + - description: "timeoutMS applied to abortTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 60 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: abortTransaction + object: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction + + - description: "timeoutMS applied to withTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + - name: withTransaction + object: *session + arguments: + callback: + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will + # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, + # so no command is sent. + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + # withTransaction specifies timeoutMS for each operation in the callback that uses the session, so the + # insert command should have a maxTimeMS field. + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: insert diff --git a/spec/spec_tests/data/client_side_operations_timeout/sessions-override-operation-timeoutMS.yml b/spec/spec_tests/data/client_side_operations_timeout/sessions-override-operation-timeoutMS.yml new file mode 100644 index 0000000000..8a80a65720 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/sessions-override-operation-timeoutMS.yml @@ -0,0 +1,171 @@ +description: "timeoutMS can be overridden for individual session operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Drivers ignore errors from abortTransaction, so the tests in this file use commandSucceededEvent and + # commandFailedEvent events to assert success/failure. + + - description: "timeoutMS can be overridden for commitTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 60 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: commitTransaction + object: *session + arguments: + timeoutMS: 50 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: commitTransaction + + - description: "timeoutMS applied to abortTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 60 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: abortTransaction + object: *session + arguments: + timeoutMS: 50 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction + + - description: "timeoutMS applied to withTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + - name: withTransaction + object: *session + arguments: + timeoutMS: 50 + callback: + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will + # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, + # so no command is sent. + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + # withTransaction specifies timeoutMS for each operation in the callback that uses the session, so the + # insert command should have a maxTimeMS field. + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: insert diff --git a/spec/spec_tests/data/client_side_operations_timeout/sessions-override-timeoutMS.yml b/spec/spec_tests/data/client_side_operations_timeout/sessions-override-timeoutMS.yml new file mode 100644 index 0000000000..61aaab4d97 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/sessions-override-timeoutMS.yml @@ -0,0 +1,168 @@ +description: "timeoutMS can be overridden at the level of a ClientSession" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + sessionOptions: + defaultTimeoutMS: 50 + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Drivers ignore errors from abortTransaction, so the tests in this file use commandSucceededEvent and + # commandFailedEvent events to assert success/failure. + + - description: "timeoutMS applied to commitTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 60 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: commitTransaction + object: *session + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: commitTransaction + + - description: "timeoutMS applied to abortTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 60 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: abortTransaction + object: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction + + - description: "timeoutMS applied to withTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + - name: withTransaction + object: *session + arguments: + callback: + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will + # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, + # so no command is sent. + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + # withTransaction specifies timeoutMS for each operation in the callback that uses the session, so the + # insert command should have a maxTimeMS field. + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: insert diff --git a/spec/spec_tests/data/client_side_operations_timeout/tailable-awaitData.yml b/spec/spec_tests/data/client_side_operations_timeout/tailable-awaitData.yml new file mode 100644 index 0000000000..9f5790943d --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/tailable-awaitData.yml @@ -0,0 +1,247 @@ +description: "timeoutMS behaves correctly for tailable awaitData cursors" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + createOptions: + capped: true + size: 500 + documents: + - { _id: 0 } + - { _id: 1 } + +tests: + - description: "error if timeoutMode is cursor_lifetime" + operations: + - name: find + object: *collection + arguments: + filter: {} + timeoutMode: cursorLifetime + cursorType: tailableAwait + expectError: + isClientError: true + + - description: "error if maxAwaitTimeMS is greater than timeoutMS" + operations: + - name: find + object: *collection + arguments: + filter: {} + cursorType: tailableAwait + timeoutMS: 5 + maxAwaitTimeMS: 10 + expectError: + isClientError: true + + - description: "error if maxAwaitTimeMS is equal to timeoutMS" + operations: + - name: find + object: *collection + arguments: + filter: {} + cursorType: tailableAwait + timeoutMS: 5 + maxAwaitTimeMS: 5 + expectError: + isClientError: true + + - description: "timeoutMS applied to find" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + cursorType: tailableAwait + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + tailable: true + awaitData: true + maxTimeMS: { $$exists: true } + + # If maxAwaitTimeMS is not set, timeoutMS should be refreshed for the getMore and the getMore should not have a + # maxTimeMS field. + - description: "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "getMore"] + blockConnection: true + blockTimeMS: 15 + - name: createFindCursor + object: *collection + arguments: + filter: {} + cursorType: tailableAwait + timeoutMS: 29 + batchSize: 1 + saveResultAsEntity: &tailableCursor tailableCursor + # Iterate twice to force a getMore. The first iteration will return the document from the first batch and the + # second will do a getMore. + - name: iterateUntilDocumentOrError + object: *tailableCursor + - name: iterateUntilDocumentOrError + object: *tailableCursor + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + tailable: true + awaitData: true + maxTimeMS: { $$exists: true } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + + # If maxAwaitTimeMS is set for the initial command, timeoutMS should still be refreshed for the getMore and the + # getMore command should have a maxTimeMS field. + - description: "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "getMore"] + blockConnection: true + blockTimeMS: 15 + - name: createFindCursor + object: *collection + arguments: + filter: {} + cursorType: tailableAwait + timeoutMS: 29 + batchSize: 1 + maxAwaitTimeMS: 1 + saveResultAsEntity: &tailableCursor tailableCursor + # Iterate twice to force a getMore. + - name: iterateUntilDocumentOrError + object: *tailableCursor + - name: iterateUntilDocumentOrError + object: *tailableCursor + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + tailable: true + awaitData: true + maxTimeMS: { $$exists: true } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: 1 + + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from + # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + - description: "timeoutMS is refreshed for getMore - failure" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 15 + - name: createFindCursor + object: *collection + arguments: + filter: {} + cursorType: tailableAwait + batchSize: 1 + saveResultAsEntity: &tailableCursor tailableCursor + # Iterate twice to force a getMore. + - name: iterateUntilDocumentOrError + object: *tailableCursor + - name: iterateUntilDocumentOrError + object: *tailableCursor + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + tailable: true + awaitData: true + maxTimeMS: { $$exists: true } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName diff --git a/spec/spec_tests/data/client_side_operations_timeout/tailable-non-awaitData.yml b/spec/spec_tests/data/client_side_operations_timeout/tailable-non-awaitData.yml new file mode 100644 index 0000000000..766b46e658 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/tailable-non-awaitData.yml @@ -0,0 +1,181 @@ +description: "timeoutMS behaves correctly for tailable non-awaitData cursors" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + createOptions: + capped: true + size: 500 + documents: + - { _id: 0 } + - { _id: 1 } + +tests: + - description: "error if timeoutMode is cursor_lifetime" + operations: + - name: find + object: *collection + arguments: + filter: {} + timeoutMode: cursorLifetime + cursorType: tailable + expectError: + isClientError: true + + - description: "timeoutMS applied to find" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + cursorType: tailable + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + # Due to SERVER-51153, the find command should not contain a maxTimeMS field for tailable non-awaitData + # cursors because that would cap the lifetime of the created cursor. + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + tailable: true + awaitData: { $$exists: false } + maxTimeMS: { $$exists: false } + + # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a success test. The + # find is executed with timeoutMS=20 and both find and getMore commands are configured to block for 15ms each. Neither + # exceeds the timeout so the operation succeeds. + - description: "timeoutMS is refreshed for getMore - success" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "getMore"] + blockConnection: true + blockTimeMS: 15 + - name: createFindCursor + object: *collection + arguments: + filter: {} + cursorType: tailable + timeoutMS: 20 + batchSize: 1 + saveResultAsEntity: &tailableCursor tailableCursor + # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the + # second will do a getMore. + - name: iterateUntilDocumentOrError + object: *tailableCursor + - name: iterateUntilDocumentOrError + object: *tailableCursor + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + tailable: true + awaitData: { $$exists: false } + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } + + # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The + # find inherits timeoutMS=10 from the collection and the getMore command blocks for 15ms, causing iteration to fail + # with a timeout error. + - description: "timeoutMS is refreshed for getMore - failure" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["getMore"] + blockConnection: true + blockTimeMS: 15 + - name: createFindCursor + object: *collection + arguments: + filter: {} + cursorType: tailable + batchSize: 1 + saveResultAsEntity: &tailableCursor tailableCursor + # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the + # second will do a getMore. + - name: iterateUntilDocumentOrError + object: *tailableCursor + - name: iterateUntilDocumentOrError + object: *tailableCursor + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + tailable: true + awaitData: { $$exists: false } + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: getMore + databaseName: *databaseName + command: + getMore: { $$type: ["int", "long"] } + collection: *collectionName + maxTimeMS: { $$exists: false } diff --git a/spec/spec_tests/data/crud_unified/aggregate-write-readPreference.yml b/spec/spec_tests/data/crud_unified/aggregate-write-readPreference.yml index 86f5a4399c..421743d2e1 100644 --- a/spec/spec_tests/data/crud_unified/aggregate-write-readPreference.yml +++ b/spec/spec_tests/data/crud_unified/aggregate-write-readPreference.yml @@ -6,6 +6,8 @@ runOnRequirements: # 3.6+ non-standalone is needed to utilize $readPreference in OP_MSG - minServerVersion: "3.6" topologies: [ replicaset, sharded, load-balanced ] + # SERVER-90047: failures against latest server necessitate adding this for now + maxServerVersion: "8.0.0" _yamlAnchors: readConcern: &readConcern diff --git a/spec/spec_tests/data/crud_unified/db-aggregate-write-readPreference.yml b/spec/spec_tests/data/crud_unified/db-aggregate-write-readPreference.yml index 04a3b2169f..0192a3e0e5 100644 --- a/spec/spec_tests/data/crud_unified/db-aggregate-write-readPreference.yml +++ b/spec/spec_tests/data/crud_unified/db-aggregate-write-readPreference.yml @@ -9,6 +9,8 @@ runOnRequirements: - minServerVersion: "3.6" topologies: [ replicaset ] serverless: forbid + # SERVER-90047: failures against latest server necessitate adding this for now + maxServerVersion: "8.0.0" _yamlAnchors: readConcern: &readConcern diff --git a/spec/spec_tests/data/crud_unified/find-test-all-options.yml b/spec/spec_tests/data/crud_unified/find-test-all-options.yml index 0f456b9cdf..7aebaf504e 100644 --- a/spec/spec_tests/data/crud_unified/find-test-all-options.yml +++ b/spec/spec_tests/data/crud_unified/find-test-all-options.yml @@ -1,3 +1,6 @@ +# This spec is specific to the ruby driver, and is not part of the general +# `specifications` repo. + description: "find options" schemaVersion: "1.0" @@ -162,11 +165,30 @@ tests: commandName: find databaseName: *database0Name + - description: "timeoutMS" + operations: + - name: find + arguments: + filter: *filter + timeoutMS: &timeoutMS 1000 + object: *collection0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: *filter + maxTimeMS: { $$type: [ int ] } + commandName: find + databaseName: *database0Name + - description: "max" operations: - name: find arguments: filter: *filter + hint: { _id: 1 } max: &max { _id: 10 } object: *collection0 expectEvents: @@ -182,6 +204,11 @@ tests: - description: "min" operations: + - name: createIndex + object: *collection0 + arguments: + name: "name_1" + keys: { name: 1 } - name: find arguments: filter: *filter @@ -191,6 +218,8 @@ tests: expectEvents: - client: *client0 events: + - commandStartedEvent: + commandName: createIndexes - commandStartedEvent: command: find: *collection0Name diff --git a/spec/spec_tests/server_selection_rtt_spec.rb b/spec/spec_tests/server_selection_rtt_spec.rb index a12a47e832..ea981846c8 100644 --- a/spec/spec_tests/server_selection_rtt_spec.rb +++ b/spec/spec_tests/server_selection_rtt_spec.rb @@ -15,18 +15,18 @@ context(spec.description) do - let(:averager) do - Mongo::Server::RoundTripTimeAverager.new + let(:calculator) do + Mongo::Server::RoundTripTimeCalculator.new end before do - averager.instance_variable_set(:@average_round_trip_time, spec.average_rtt) - averager.instance_variable_set(:@last_round_trip_time, spec.new_rtt) - averager.send(:update_average_round_trip_time) + calculator.instance_variable_set(:@average_round_trip_time, spec.average_rtt) + calculator.instance_variable_set(:@last_round_trip_time, spec.new_rtt) + calculator.update_average_round_trip_time end it 'correctly calculates the moving average round trip time' do - expect(averager.average_round_trip_time).to eq(spec.new_average_rtt) + expect(calculator.average_round_trip_time).to eq(spec.new_average_rtt) end end end diff --git a/spec/support/cluster_tools.rb b/spec/support/cluster_tools.rb index 253782da98..3eeeef2beb 100644 --- a/spec/support/cluster_tools.rb +++ b/spec/support/cluster_tools.rb @@ -98,7 +98,7 @@ def reset_priorities def step_down admin_client.database.command( replSetStepDown: 4, secondaryCatchUpPeriodSecs: 2) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # While waiting for secondaries to catch up before stepping down, this node decided to step down for other reasons (189) if e.code == 189 # success @@ -118,7 +118,7 @@ def step_up(address) begin client.database.command(replSetStepUp: 1) break - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # Election failed. (125) if e.code == 125 # Possible reason is the node we are trying to elect has deny-listed @@ -261,7 +261,7 @@ def encourage_primary(address) def unfreeze_server(address) begin direct_client(address).use('admin').database.command(replSetFreeze: 0) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # Mongo::Error::OperationFailure: cannot freeze node when primary or running for election. state: Primary (95) if e.code == 95 # The server we want to become primary may have already become the diff --git a/spec/support/common_shortcuts.rb b/spec/support/common_shortcuts.rb index 32d386b36a..8eacdf2c7c 100644 --- a/spec/support/common_shortcuts.rb +++ b/spec/support/common_shortcuts.rb @@ -176,7 +176,7 @@ def kill_all_server_sessions ClientRegistry.instance.global_client('root_authorized').command(killAllSessions: []) # killAllSessions also kills the implicit session which the driver uses # to send this command, as a result it always fails - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # "operation was interrupted" unless e.code == 11601 raise @@ -396,7 +396,7 @@ def wait_for_snapshot(db: nil, collection: nil, client: nil) client.start_session(snapshot: true) do |session| client[collection].aggregate([{'$match': {any: true}}], session: session).to_a end - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # Retry them as the server demands... if e.code == 246 # SnapshotUnavailable if Mongo::Utils.monotonic_time < start_time + 10 diff --git a/spec/support/shared/session.rb b/spec/support/shared/session.rb index 7188f5f883..acbc3a2ceb 100644 --- a/spec/support/shared/session.rb +++ b/spec/support/shared/session.rb @@ -110,8 +110,8 @@ end it 'raises an error' do - expect([Mongo::Error::OperationFailure, - Mongo::Error::BulkWriteError]).to include(operation_result.class) + expect([Mongo::Error::OperationFailure::Family, + Mongo::Error::BulkWriteError].any? { |e| e === operation_result }).to be true end it 'updates the last use value' do diff --git a/spec/support/spec_setup.rb b/spec/support/spec_setup.rb index dcfa83a533..442a7352cd 100644 --- a/spec/support/spec_setup.rb +++ b/spec/support/spec_setup.rb @@ -28,7 +28,7 @@ def run # more users to any other databases. begin create_user(client, SpecConfig.instance.root_user) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e # When testing a cluster that requires auth, root user is already set up # and it is not creatable without auth. # Seems like every mongodb version has its own error message @@ -61,7 +61,7 @@ def create_user(client, user) users = client.use('admin').database.users begin users.create(user) - rescue Mongo::Error::OperationFailure => e + rescue Mongo::Error::OperationFailure::Family => e if e.message =~ /User.*already exists/ users.remove(user.name) users.create(user)