diff --git a/.gitignore b/.gitignore index 51321392fb..ed7f15ce3d 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ last_test_dir upgrade html/ doxygen/doxypy-0.4.2/ +.pytest_cache/ +.vscode/ diff --git a/auth_test.py b/auth_test.py index 6d92ff56e6..2a2b6ab8d9 100644 --- a/auth_test.py +++ b/auth_test.py @@ -541,10 +541,16 @@ def test_materialized_views_auth(self): * Create a new user, 'cathy', with no permissions * Create a ks, table * Connect as cathy + * * Try CREATE MV without ALTER permission on base table, assert throws Unauthorized * Grant cathy ALTER permissions, then CREATE MV successfully + * + * Try to MODIFY base without WRITE permission on base, assert throws Unauthorized + * Grant cathy WRITE permissions on base, and modify base successfully + * * Try to SELECT from the mv, assert throws Unauthorized - * Grant cathy SELECT permissions, and read from the MV successfully + * Grant cathy SELECT permissions on base, and read from the MV successfully + * * Revoke cathy's ALTER permissions, assert DROP MV throws Unauthorized * Restore cathy's ALTER permissions, DROP MV successfully """ @@ -565,12 +571,36 @@ def test_materialized_views_auth(self): cassandra.execute("GRANT ALTER ON ks.cf TO cathy") cathy.execute(create_mv) - # TRY SELECT MV without SELECT permission on base table - assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on or any of its parents") + # Try MODIFY base without WRITE permission on base + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on
or any of its parents") - # Grant SELECT permission and CREATE MV - cassandra.execute("GRANT SELECT ON ks.cf TO cathy") - cathy.execute("SELECT * FROM ks.mv1") + if self.cluster.version() >= LooseVersion('4.0'): + # From 4.0 onward, only base MODIFY permission is required to update base with MV + # Grant WRITE permission on Base + cassandra.execute("GRANT MODIFY ON ks.cf TO cathy") + cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')") + + # TRY SELECT MV without SELECT permission on base table + assert_unauthorized(cathy, "SELECT * FROM ks.cf", "User cathy has no SELECT permission on
or any of its parents") + assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on
or any of its parents") + + # Grant SELECT permission + cassandra.execute("GRANT SELECT ON ks.cf TO cathy") + assert_one(cathy, "SELECT * FROM ks.cf", [1, '1']) + assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1]) + else: + # Before 4.0, MODIFY on MV is required to insert to base + # Grant WRITE permission on Base + cassandra.execute("GRANT MODIFY ON ks.cf TO cathy") + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no SELECT permission on
or any of its parents") + cassandra.execute("GRANT SELECT ON ks.cf TO cathy") + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on
or any of its parents") + + # Grant WRITE permission on MV + cassandra.execute("GRANT MODIFY ON ks.mv1 TO cathy") + cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')") + assert_one(cathy, "SELECT * FROM ks.cf", [1, '1']) + assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1]) # Revoke ALTER permission and try DROP MV cassandra.execute("REVOKE ALTER ON ks.cf FROM cathy") diff --git a/bootstrap_test.py b/bootstrap_test.py index dc0691778f..960af0b4a7 100644 --- a/bootstrap_test.py +++ b/bootstrap_test.py @@ -882,7 +882,9 @@ def test_simultaneous_bootstrap(self): # Repeat the select count(*) query, to help catch # bugs like 9484, where count(*) fails at higher # data loads. + logger.error(node1.nodetool('status').stdout) for _ in range(5): + logger.error("Executing SELECT to node2") assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE) def test_cleanup(self): @@ -1018,6 +1020,57 @@ def test_bootstrap_binary_disabled(self): assert_bootstrap_state(self, node3, 'COMPLETED', user='cassandra', password='cassandra') node3.wait_for_binary_interface() + @since('4.0') + @pytest.mark.no_vnodes + def test_simple_bootstrap_with_everywhere_strategy(self): + cluster = self.cluster + tokens = cluster.balanced_tokens(2) + cluster.set_configuration_options(values={'num_tokens': 1}) + + logger.debug("[node1, node2] tokens: %r" % (tokens,)) + + keys = 10000 + + # Create a single node cluster + cluster.populate(1) + node1 = cluster.nodelist()[0] + node1.set_configuration_options(values={'initial_token': tokens[0]}) + cluster.start() + + session = self.patient_cql_connection(node1) + create_ks(session, 'ks', 'EverywhereStrategy') + create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) + + insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')") + execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)]) + + node1.flush() + node1.compact() + + # Reads inserted data all during the bootstrap process. We shouldn't + # get any error + query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE) + session.shutdown() + + # Bootstrapping a new node in the current version + node2 = new_node(cluster) + node2.set_configuration_options(values={'initial_token': tokens[1]}) + node2.start(wait_for_binary_proto=True) + node2.compact() + + node1.cleanup() + logger.debug("node1 size for ks.cf after cleanup: %s" % float(data_size(node1,'ks','cf'))) + node1.compact() + logger.debug("node1 size for ks.cf after compacting: %s" % float(data_size(node1,'ks','cf'))) + + logger.debug("node2 size for ks.cf after compacting: %s" % float(data_size(node2,'ks','cf'))) + + size1 = float(data_size(node1,'ks','cf')) + size2 = float(data_size(node2,'ks','cf')) + assert_almost_equal(size1, size2, error=0.3) + + assert_bootstrap_state(self, node2, 'COMPLETED') + @since('4.1') def test_invalid_host_id(self): """ diff --git a/byteman/guardrails/disk_usage_full.btm b/byteman/guardrails/disk_usage_full.btm new file mode 100644 index 0000000000..bbdf8ddca9 --- /dev/null +++ b/byteman/guardrails/disk_usage_full.btm @@ -0,0 +1,8 @@ +RULE return FULL disk usage +CLASS org.apache.cassandra.service.disk.usage.DiskUsageMonitor +METHOD getState +AT EXIT +IF TRUE +DO + return org.apache.cassandra.service.disk.usage.DiskUsageState.FULL; +ENDRULE \ No newline at end of file diff --git a/byteman/guardrails/disk_usage_stuffed.btm b/byteman/guardrails/disk_usage_stuffed.btm new file mode 100644 index 0000000000..3256211304 --- /dev/null +++ b/byteman/guardrails/disk_usage_stuffed.btm @@ -0,0 +1,8 @@ +RULE return STUFFED disk usage +CLASS org.apache.cassandra.service.disk.usage.DiskUsageMonitor +METHOD getState +AT EXIT +IF TRUE +DO + return org.apache.cassandra.service.disk.usage.DiskUsageState.STUFFED; +ENDRULE \ No newline at end of file diff --git a/client_request_metrics_test.py b/client_request_metrics_test.py index 6c5ef4dd9a..1900ddb099 100644 --- a/client_request_metrics_test.py +++ b/client_request_metrics_test.py @@ -42,7 +42,7 @@ def fixture_add_additional_log_patterns(self, fixture_dtest_setup): fixture_dtest_setup.ignore_log_patterns = ( 'Testing write failures', # The error to simulate a write failure 'ERROR WRITE_FAILURE', # Logged in DEBUG mode for write failures - f"Scanned over {TOMBSTONE_FAILURE_THRESHOLD + 1} tombstones during query" # Caused by the read failure tests + f"Scanned over {TOMBSTONE_FAILURE_THRESHOLD + 1} (tombstones|tombstone rows) during query" # Caused by the read failure tests ) def setup_once(self): @@ -50,6 +50,7 @@ def setup_once(self): cluster.set_configuration_options({'read_request_timeout_in_ms': 3000, 'write_request_timeout_in_ms': 3000, 'phi_convict_threshold': 12, + 'tombstone_warn_threshold': -1, 'tombstone_failure_threshold': TOMBSTONE_FAILURE_THRESHOLD, 'enable_materialized_views': 'true'}) cluster.populate(2, debug=True) diff --git a/compaction_test.py b/compaction_test.py index d84b5dcd0f..ba5486d0eb 100644 --- a/compaction_test.py +++ b/compaction_test.py @@ -339,7 +339,10 @@ def test_large_compaction_warning(self): Check that we log a warning when the partition size is bigger than compaction_large_partition_warning_threshold_mb """ cluster = self.cluster - cluster.set_configuration_options({'compaction_large_partition_warning_threshold_mb': 1}) + if self.supports_guardrails: + cluster.set_configuration_options({'guardrails': {'partition_size_warn_threshold_in_mb': 1}}) + else: + cluster.set_configuration_options({'compaction_large_partition_warning_threshold_mb': 1}) cluster.populate(1).start() [node] = cluster.nodelist() @@ -361,7 +364,10 @@ def test_large_compaction_warning(self): node.nodetool('compact ks large') verb = 'Writing' if self.cluster.version() > '2.2' else 'Compacting' sizematcher = '\d+ bytes' if self.cluster.version() < LooseVersion('3.6') else '\d+\.\d{3}(K|M|G)iB' - node.watch_log_for('{} large partition ks/large:user \({}'.format(verb, sizematcher), from_mark=mark, timeout=180) + log_message = '{} large partition ks/large:user \({}'.format(verb, sizematcher) + if self.supports_guardrails: + log_message = "Detected partition 'user' in ks.large of size 2MB is greater than the maximum recommended size \(1MB\)" + node.watch_log_for(log_message, from_mark=mark, timeout=180) ret = list(session.execute("SELECT properties from ks.large where userid = 'user'")) assert_length_equal(ret, 1) diff --git a/conftest.py b/conftest.py index 33e188fd52..79b9714191 100644 --- a/conftest.py +++ b/conftest.py @@ -43,6 +43,9 @@ def check_required_loopback_interfaces_available(): def pytest_addoption(parser): + parser.addoption("--sstable-format", action="store", default="bti", + help="SSTable format to be used by default for all newly created SSTables: " + "big or bti (default: bti)") parser.addoption("--use-vnodes", action="store_true", default=False, help="Determines wither or not to setup clusters using vnodes for tests") parser.addoption("--use-off-heap-memtables", action="store_true", default=False, diff --git a/cqlsh_tests/cqlshrc.sample.cloud b/cqlsh_tests/cqlshrc.sample.cloud new file mode 100644 index 0000000000..62528670c4 --- /dev/null +++ b/cqlsh_tests/cqlshrc.sample.cloud @@ -0,0 +1,17 @@ +; Copyright DataStax, Inc. +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. +; +; Sample ~/.cqlshrc file with cloud configuration. +[connection] +secure_connect_bundle = /path/to/creds.zip diff --git a/cqlsh_tests/secure-connect-test.zip b/cqlsh_tests/secure-connect-test.zip new file mode 100644 index 0000000000..bcd4a7fb2a Binary files /dev/null and b/cqlsh_tests/secure-connect-test.zip differ diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index 78bde3aba6..15dda8390e 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -23,6 +23,7 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import BatchStatement, BatchType from ccmlib import common +from ccmlib.node import ToolError from .cqlsh_tools import monkeypatch_driver, unmonkeypatch_driver from dtest import Tester, create_ks, create_cf @@ -92,6 +93,7 @@ def run_cqlsh(self, node, cmds, cqlsh_options=None, env_vars=None): logger.debug("Cqlsh command stderr:\n" + stderr) return stdout, stderr + class TestCqlsh(Tester, CqlshMixin): # override cluster options to enable user defined functions @@ -1103,6 +1105,7 @@ def get_test_table_output(self, has_val=True, has_val_idx=True): AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = {} AND crc_check_chance = 1.0 AND default_time_to_live = 0 AND extensions = {} @@ -1192,6 +1195,7 @@ def get_users_table_output(self): AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = {} AND crc_check_chance = 1.0 AND default_time_to_live = 0 AND extensions = {} @@ -1298,6 +1302,7 @@ def get_users_by_state_mv_output(self): AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = {} AND crc_check_chance = 1.0 AND default_time_to_live = 0 AND extensions = {} @@ -1829,8 +1834,11 @@ def test_client_warnings(self): """ max_partitions_per_batch = 5 self.cluster.populate(3) - self.cluster.set_configuration_options({ - 'unlogged_batch_across_partitions_warn_threshold': str(max_partitions_per_batch)}) + + config_opts = {'unlogged_batch_across_partitions_warn_threshold': str(max_partitions_per_batch)} + if self.supports_guardrails: + config_opts = {"guardrails": config_opts} + self.cluster.set_configuration_options(config_opts) self.cluster.start() @@ -1880,6 +1888,52 @@ def test_connect_timeout(self): stdout, stderr = self.run_cqlsh(node1, cmds='USE system', cqlsh_options=['--debug', '--connect-timeout=10']) assert "Using connect timeout: 10 seconds" in stderr + @since('4.0') + def test_consistency_level_options(self): + """ + Tests for new cmdline consistency options: + - consistency-level + - serial-consistency-level + @jira_ticket STAR-432 + """ + self.cluster.populate(1) + self.cluster.start() + + node1, = self.cluster.nodelist() + + def expect_output_no_errors(cmd, options, output): + stdout, stderr = self.run_cqlsh(node1, cmds=cmd, cqlsh_options=options) + assert output in stdout, stderr + assert stderr == '' + + expect_output_no_errors('CONSISTENCY', [], + 'Current consistency level is ONE.') + + expect_output_no_errors('CONSISTENCY', ['--consistency-level', 'quorum'], + 'Current consistency level is QUORUM.') + + expect_output_no_errors('SERIAL CONSISTENCY', [], + 'Current serial consistency level is SERIAL.') + + expect_output_no_errors('SERIAL CONSISTENCY', ['--serial-consistency-level', 'local_serial'], + 'Current serial consistency level is LOCAL_SERIAL.') + + def expect_error(cmd, options, error_msg): + stdout, stderr = self.run_cqlsh(node1, cmds=cmd, cqlsh_options=options) + assert error_msg in stderr + + expect_error('CONSISTENCY', ['--consistency-level', 'foop'], + '"foop" is not a valid consistency level') + + expect_error('CONSISTENCY', ['--consistency-level', 'serial'], + '"serial" is not a valid consistency level') + + expect_error('SERIAL CONSISTENCY', ['--serial-consistency-level', 'foop'], + '"foop" is not a valid serial consistency level') + + expect_error('SERIAL CONSISTENCY', ['--serial-consistency-level', 'ONE'], + '"ONE" is not a valid serial consistency level') + @since('3.0.19') def test_protocol_negotiation(self): """ @@ -2453,6 +2507,50 @@ def test_cjk_output(self): """ assert stdout_lines_sorted.find(expected) >= 0 + @since('4.0') + def test_no_file_io(self): + def run_cqlsh_catch_toolerror(cmd, env): + """ + run_cqlsh will throw ToolError if cqlsh exits with a non-zero exit code. + """ + out = "" + err = "" + try: + out, err, _ = self.node1.run_cqlsh(cmd, env) + except ToolError as e: + return e.stdout, e.stderr + return out, err + + create_ks(self.session, 'foo', rf=1) + create_cf(self.session, 'bar', key_type='int', columns={'name': 'text'}) + + cqlsh_stdout, cqlsh_stderr, _ = self.node1.run_cqlsh('COPY foo.bar TO \'/dev/null\';', []) + assert '0 rows exported to 1 files' in cqlsh_stdout + assert cqlsh_stderr == '' + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('COPY foo.bar TO \'/dev/null\';', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('DEBUG', []) + assert '(Pdb)' in cqlsh_stdout + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('DEBUG', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('CAPTURE \'nah\'', []) + assert cqlsh_stdout == 'Now capturing query output to \'nah\'.\n' + assert cqlsh_stderr == '' + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('CAPTURE \'nah\'', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('SOURCE \'nah\'', []) + assert cqlsh_stdout == '' + assert cqlsh_stderr == '' + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('SOURCE \'nah\'', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + class TestCqlLogin(Tester, CqlshMixin): """ diff --git a/cqlsh_tests/test_cqlsh_cloud.py b/cqlsh_tests/test_cqlsh_cloud.py new file mode 100644 index 0000000000..626eaaba09 --- /dev/null +++ b/cqlsh_tests/test_cqlsh_cloud.py @@ -0,0 +1,125 @@ +# coding=utf-8 + +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import pytest +from ccmlib.node import ToolError + +from dtest import Tester + +logger = logging.getLogger(__name__) +since = pytest.mark.since + + +@since("4.0") +class TestSecureBundleConnection(Tester): + """ + Tests related to cqlsh behavior for cloud e.g. secure bundle connection. + We only test cqlshrc behavior. + Testing if the connection using secure bundle is really working + requires a true cluster with generated secure bundle to run. + And this is not possible without testing infrastructure/tooling changes. + + We can assume that it is correctly tested by the python driver or + will be tested in the next stage of testing (cloud native). + + Validation is done using --debug information or error msgs. + + Inspired by STAR-765. + """ + + CQLSHRC_PATH = 'cqlsh_tests/cqlshrc.sample.cloud' + BUNDLE_PATH = 'cqlsh_tests/secure-connect-test.zip' + + def prepare(self, start=False): + if not self.cluster.nodelist(): + self.cluster.populate(1) + if start: + self.cluster.start() + return self.cluster.nodelist()[0] + + def _expect_tool_error(self, cmds, options, msg): + node = self.cluster.nodelist()[0] + with pytest.raises(ToolError, match=msg): + out, err, _ = node.run_cqlsh(cmds=cmds, cqlsh_options=options) + return out, err + + def test_start_fails_on_non_existing_file(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', 'not-existing-file.zip'], + msg='No such file or directory') + + def test_start_fails_when_file_not_a_bundle(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', self.CQLSHRC_PATH], + msg='Unable to open the zip file for the cloud config') + + def test_read_bundle_path_from_cqlshrc(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--cqlshrc', self.CQLSHRC_PATH], + msg="No such file or directory: '/path/to/creds.zip'") + + def test_host_and_port_are_ignored_with_secure_bundle(self): + # it should connect with provided host and port to the started ccm node + node = self.prepare(start=True) + node.run_cqlsh("HELP", []) + # but fail with secure bundle even if port and host are set + expected_msg = "https://1263dd11-0aa5-41ef-8e56-17fa5fc7036e-europe-west1.db.astra.datastax.com:31669" + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', self.BUNDLE_PATH, node.ip_addr, '9042'], + msg=expected_msg) + + def test_default_consistency_level_for_secure_connect_bundle_param(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', 'not-existing-file.zip', '--debug'], + msg='Using consistency level:.*LOCAL_QUORUM') + + def test_default_consistency_level_for_secure_connect_bundle_in_clqshrc(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--cqlshrc', self.CQLSHRC_PATH, '--debug'], + msg='Using consistency level:.*LOCAL_QUORUM') + + def test_set_consistency_level_for_secure_connect_bundle_in_clqshrc(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--cqlshrc', self.CQLSHRC_PATH, '--debug', '--consistency-level', 'TWO'], + msg='Using consistency level:.*TWO') + + def test_debug_should_include_cloud_details(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', 'not-existing-file.zip', '--debug'], + msg='Using secure connect bundle.*not-existing-file.zip') + + @pytest.mark.skip("we cannot test it without ccm secure conn bundle support in ccm") + def test_endpoint_load_balancing_policy_is_used(self): + # to test this we would need a 3 nodes cloud cluster + assert False, "TODO: implement" + + @pytest.mark.skip("we cannot test it without ccm secure conn bundle support in ccm") + def test_connects_correctly(self): + assert False, "TODO: implement" + + @pytest.mark.skip("we cannot test it without ccm secure conn bundle support in ccm") + def test_login_command_keeps_cloud_connection_using_bundle(self): + # cqlsh.py -b some-bundle.zip -u user -p password + # LOGIN user(password) + assert False diff --git a/cqlsh_tests/test_cqlsh_copy.py b/cqlsh_tests/test_cqlsh_copy.py index 396de32e15..458804c768 100644 --- a/cqlsh_tests/test_cqlsh_copy.py +++ b/cqlsh_tests/test_cqlsh_copy.py @@ -2481,8 +2481,12 @@ def test_bulk_round_trip_blogposts(self): @jira_ticket CASSANDRA-9302 """ + config_opts = {'batch_size_warn_threshold_in_kb': '10'} + if self.supports_guardrails: # batch size thresholds moved to guardrails in 4.0 + config_opts = {'guardrails': config_opts} + self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=10000, - configuration_options={'batch_size_warn_threshold_in_kb': '10'}, + configuration_options=config_opts, profile=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'blogposts.yaml'), stress_table='stresscql.blogposts') @@ -2495,9 +2499,16 @@ def test_bulk_round_trip_blogposts_with_max_connections(self): @jira_ticket CASSANDRA-10938 """ + batch_size_warn_threshold_in_kb = '10' + native_transport_max_concurrent_connections = '12' + if self.supports_guardrails: # batch size thresholds moved to guardrails in 4.0 + config_opts = {'guardrails': {'batch_size_warn_threshold_in_kb': batch_size_warn_threshold_in_kb}, + 'native_transport_max_concurrent_connections': native_transport_max_concurrent_connections} + else: + config_opts = {'native_transport_max_concurrent_connections': native_transport_max_concurrent_connections, + 'batch_size_warn_threshold_in_kb': batch_size_warn_threshold_in_kb} self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=10000, - configuration_options={'native_transport_max_concurrent_connections': '12', - 'batch_size_warn_threshold_in_kb': '10'}, + configuration_options=config_opts, profile=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'blogposts.yaml'), stress_table='stresscql.blogposts', copy_to_options={'NUMPROCESSES': 5, 'MAXATTEMPTS': 20}, @@ -2827,8 +2838,13 @@ def test_copy_from_with_large_cql_rows(self): @jira_ticket CASSANDRA-11474 """ num_records = 100 - self.prepare(nodes=1, configuration_options={'batch_size_warn_threshold_in_kb': '1', # warn with 1kb and fail - 'batch_size_fail_threshold_in_kb': '5'}) # with 5kb size batches + batch_size_warn_threshold_in_kb = '1' # warn with 1kb and fail + batch_size_fail_threshold_in_kb = '5' # with 5kb size batches + config_opts = {'batch_size_warn_threshold_in_kb': batch_size_warn_threshold_in_kb, + 'batch_size_fail_threshold_in_kb': batch_size_fail_threshold_in_kb} + if self.supports_guardrails: # batch size thresholds moved to guardrails in 4.0 + config_opts = {'guardrails': config_opts} + self.prepare(nodes=1, configuration_options=config_opts) logger.debug('Running stress') stress_table_name = 'standard1' @@ -3336,3 +3352,79 @@ def _test_invalid_data_for_maps(): _test_invalid_data_for_sets() _test_invalid_data_for_lists() _test_invalid_data_for_maps() + + @since('4.0') + def test_geotypes_copy(self): + """ + Tests whether cqlsh COPY properly handles geo types with empty and null values. + + @since 4.0.0 + + Steps: + * insert several geoTypes with null and empty values among them into a table + * cqlsh copy contents to .csv file and save them in a list + * wipe the table comletely of all data + * cqlsh copy contents from .csv back into the table + * get the contents of the table into a list + * assert the pre wiped data is the same as the newly copied data + :return + """ + self.prepare() + + self.session.execute("create table geo (k int primary key, point 'PointType', line 'LineStringType', poly 'PolygonType');") + self.session.execute("insert into geo (k, point, line, poly) VALUES (0, 'point(1.2 3.4)', 'linestring(1.0 1.1, 2.0 2.1, 3.0 3.1)', 'POLYGON ((10.1 10.0, 110.0 10.0, 110.0 110.0, 10.0 110.0, 10.0 10.0), (20.0 20.0, 20.0 30.0, 30.0 30.0, 30.0 20.0, 20.0 20.0))');") + self.session.execute("insert into geo (k, point, line, poly) VALUES (2, 'point(1.2 3.4)', 'linestring EMPTY', 'POLYGON EMPTY');") + self.session.execute("insert into geo (k) VALUES (1);") + + # make sure data is inserted + data_actual = rows_to_list(self.session.execute("select * from geo;")) + assert len(data_actual) == 3 + + # dump data to CSV and truncate + tempfile = self.get_temp_file() + self.run_cqlsh(cmds="COPY ks.geo TO '{name}'".format(name=tempfile.name)) + self.run_cqlsh(cmds="truncate ks.geo;") + + # import data back + self.run_cqlsh(cmds="COPY ks.geo FROM '{name}'".format(name=tempfile.name)) + data_copy = rows_to_list(self.session.execute("select * from geo;")) + + assert data_actual == data_copy + + @since("4.0") + def test_date_range_copy(self): + """ + DateRangeTests.test_copy_command + + Tests whether cqlsh COPY properly handles date_range types, including null values. + @note we cannot insert empty value ('') as it is not presented as null in cqlsh but it is in COPY + """ + self.prepare() + + self.session.execute("create table incomes (org text, period 'DateRangeType', incomes int, ver 'DateRangeType', primary key (org, period));") + # insert some data + self.session.execute("insert into incomes(org, period, incomes) values ('A','2014', 20140);") + self.session.execute("insert into incomes(org, period, incomes) values ('A','2015', 201500);") + self.session.execute("insert into incomes(org, period, incomes) values ('A','[2016-01-01 TO 2016-06-30]', 1007);") + self.session.execute("insert into incomes(org, period, incomes) values ('B','[2017-02-12 12:30:07 TO 2017-02-17 13:39:43.789]', 777);") + self.session.execute("insert into incomes(org, period, incomes, ver) values ('X','2011', 0, null);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','*', 996);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','[* TO *]', 997);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','[* TO 2015-01]', 998);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','[2015-01 TO *]', 999);") + + # make sure data is inserted + data_actual = rows_to_list(self.session.execute("select * from incomes;")) + assert len(data_actual) == 9 + + # dump data to CSV and truncate + tempfile = self.get_temp_file() + self.run_cqlsh(cmds="COPY ks.incomes TO '{name}'".format(name=tempfile.name)) + self.run_cqlsh(cmds="truncate ks.incomes;") + + # import data back + self.run_cqlsh(cmds="COPY ks.incomes FROM '{name}'".format(name=tempfile.name)) + data_copy = rows_to_list(self.session.execute("select * from incomes;")) + + assert data_actual == data_copy + diff --git a/cqlsh_tests/test_cqlsh_types.py b/cqlsh_tests/test_cqlsh_types.py new file mode 100644 index 0000000000..11e4604c7e --- /dev/null +++ b/cqlsh_tests/test_cqlsh_types.py @@ -0,0 +1,67 @@ +import logging +import pytest + +from dtest import Tester, create_ks + +logger = logging.getLogger(__name__) +since = pytest.mark.since + + +@since("4.0") +class TestCqlshTypes(Tester): + + def prepare(self, workload=None): + if not self.cluster.nodelist(): + self.allow_log_errors = True + self.cluster.populate(1) + if workload is not None: + for node in self.cluster.nodelist(): + node.set_workload(workload) + logger.debug('About to start cluster') + self.cluster.start() + logger.debug('Cluster started') + for node in self.cluster.nodelist(): + node.watch_log_for('Starting listening for CQL clients', timeout=60) + self.cluster.nodelist()[0].watch_log_for('Created default superuser') + self.node = self.cluster.nodelist()[0] + + conn = self.patient_cql_connection(self.node) + create_ks(conn, 'ks', 1) + + logger.debug('prepare completed') + + def test_point(self): + self.prepare() + + expected = 'POINT (1.2 2.3)' + self.node.run_cqlsh("CREATE TABLE ks.point_tbl (k INT PRIMARY KEY, point 'PointType');") + self.node.run_cqlsh("INSERT INTO ks.point_tbl (k, point) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.point_tbl;") + assert expected in result[0], result + + def test_linestring(self): + self.prepare() + + expected = 'LINESTRING (30.0 10.0, 10.0 30.0, 40.0 40.0)' + self.node.run_cqlsh("CREATE TABLE ks.line_tbl (k INT PRIMARY KEY, linestring 'LineStringType');") + self.node.run_cqlsh("INSERT INTO ks.line_tbl (k, linestring) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.line_tbl;") + assert expected in result[0], result + + def test_polygon(self): + self.prepare() + + expected = 'POLYGON ((30.0 10.0, 40.0 40.0, 20.0 40.0, 10.0 20.0, 30.0 10.0))' + self.node.run_cqlsh("CREATE TABLE ks.polygon_tbl (k INT PRIMARY KEY, polygon 'PolygonType');") + self.node.run_cqlsh("INSERT INTO ks.polygon_tbl (k, polygon) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.polygon_tbl;") + assert expected in result[0], result + + def test_date_range(self): + self.prepare() + + expected = '[2015-01 TO *]' + self.node.run_cqlsh("CREATE TABLE ks.date_range_tbl (k INT PRIMARY KEY, date_range_tbl 'DateRangeType');") + self.node.run_cqlsh("INSERT INTO ks.date_range_tbl (k, date_range_tbl) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.date_range_tbl;") + assert expected in result[0], result diff --git a/dtest.py b/dtest.py index f2c89b21dc..cb2470cbf3 100644 --- a/dtest.py +++ b/dtest.py @@ -359,6 +359,8 @@ def create_ks(session, name, rf): if isinstance(rf, int): # we assume simpleStrategy query = query % (name, "'class':'SimpleStrategy', 'replication_factor':%d" % rf) + elif 'EverywhereStrategy' in rf: + query = query % (name, "'class':'org.apache.cassandra.locator.EverywhereStrategy'") else: assert len(rf) >= 0, "At least one datacenter/rf pair is needed" # we assume networkTopologyStrategy diff --git a/dtest_config.py b/dtest_config.py index 86e8c96b25..ad454d49f7 100644 --- a/dtest_config.py +++ b/dtest_config.py @@ -11,6 +11,7 @@ class DTestConfig: def __init__(self): + self.sstable_format = "bti" self.use_vnodes = True self.use_off_heap_memtables = False self.num_tokens = -1 @@ -41,6 +42,10 @@ def setup(self, config): self.cassandra_version_from_build = self.get_version_from_build() return + self.sstable_format = config.getoption("--sstable-format") + if self.sstable_format: + assert self.sstable_format in ['bti', 'big'], "SSTable format {} is invalid - must be either bti or big".format(self.sstable_format) + self.use_vnodes = config.getoption("--use-vnodes") self.use_off_heap_memtables = config.getoption("--use-off-heap-memtables") self.num_tokens = config.getoption("--num-tokens") @@ -90,6 +95,17 @@ def setup(self, config): "--use-off-heap-memtables, see https://issues.apache.org/jira/browse/CASSANDRA-9472 " "for details" % version) + self.apply_to_env(os.environ, "JVM_EXTRA_OPTS") + + def apply_to_env(self, env, key="JVM_OPTS"): + current = env.get(key) or "" + if self.sstable_format: + default_sstable_format_prop = " -Dcassandra.sstable.format.default=" + self.sstable_format + if not current.__contains__("-Dcassandra.sstable.format.default"): + env.update({key: (env.get(key) or "") + default_sstable_format_prop}) + else: + logger.debug("Skipped adding {} because it is already in the env key {}: {}".format(default_sstable_format_prop, key, current)) + def get_version_from_build(self): # There are times when we want to know the C* version we're testing against # before we do any cluster. In the general case, we can't know that -- the diff --git a/dtest_setup.py b/dtest_setup.py index d04fb001bc..009fb220f6 100644 --- a/dtest_setup.py +++ b/dtest_setup.py @@ -332,6 +332,10 @@ def dump_jfr_recording(self, nodes): def supports_v5_protocol(self, cluster_version): return cluster_version >= LooseVersion('4.0') + def supports_guardrails(self): + return self.cluster.version() >= LooseVersion('4.0') + + def cleanup_last_test_dir(self): if os.path.exists(self.last_test_dir): os.remove(self.last_test_dir) diff --git a/guardrails_test.py b/guardrails_test.py new file mode 100644 index 0000000000..bf883bba98 --- /dev/null +++ b/guardrails_test.py @@ -0,0 +1,99 @@ +import logging +import time +import pytest +import re + +from cassandra import InvalidRequest + +from dtest import Tester, create_ks +from tools.assertions import assert_one + +since = pytest.mark.since +logger = logging.getLogger(__name__) + +class BaseGuardrailsTester(Tester): + + def prepare(self, rf=1, options=None, nodes=3, install_byteman=False, extra_jvm_args=None, **kwargs): + if options is None: + options = {} + + if extra_jvm_args is None: + extra_jvm_args = [] + + cluster = self.cluster + cluster.set_log_level('TRACE') + cluster.populate(nodes, install_byteman=install_byteman) + if options: + cluster.set_configuration_options(values=options) + + cluster.start(jvm_args=extra_jvm_args) + node1 = cluster.nodelist()[0] + + session = self.patient_cql_connection(node1, **kwargs) + create_ks(session, 'ks', rf) + + return session + + +@since('4.0') +class TestGuardrails(BaseGuardrailsTester): + + def test_disk_usage_guardrail(self): + """ + Test disk usage guardrail will warn if exceeds warn threshold and reject writes if exceeds failure threshold + """ + + self.fixture_dtest_setup.ignore_log_patterns = ["Write request failed because disk usage exceeds failure threshold"] + guardrails_config = {'guardrails': {'disk_usage_percentage_warn_threshold': 98, + 'disk_usage_percentage_failure_threshold': 99}} + + logger.debug("prepare 2-node cluster with rf=1 and guardrails enabled") + session = self.prepare(rf=1, nodes=2, options=guardrails_config, extra_jvm_args=['-Dcassandra.disk_usage.monitor_interval_ms=100'], install_byteman=True) + node1, node2 = self.cluster.nodelist() + session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") + + logger.debug("Inject FULL to node1, expect log on node1 and node2 rejects writes") + mark = node1.mark_log() + self.disk_usage_injection(node1, "full", False) + node1.watch_log_for("Adding state DISK_USAGE: FULL", filename='debug.log', from_mark=mark, timeout=10) + + # verify node2 will reject writes if node1 is the replica + session2 = self.patient_exclusive_cql_connection(node2, keyspace="ks") + rows = 100 + failed = 0 + for x in range(rows): + try: + session2.execute("INSERT INTO t(id, v) VALUES({v}, {v})".format(v=x)) + except InvalidRequest as e: + assert re.search("Write request failed because disk usage exceeds failure threshold", str(e)) + failed = failed + 1 + + assert rows != failed, "Expect node2 rejects some writes, but rejected all" + assert 0 != failed, "Expect node2 rejects some writes, but rejected nothing" + assert_one(session2, "SELECT COUNT(*) FROM t", [rows - failed]) + + logger.debug("Inject STUFFED to node1, node2 should warn client") + session2.execute("TRUNCATE t") + mark = node1.mark_log() + self.disk_usage_injection(node1, "stuffed") + node1.watch_log_for("Adding state DISK_USAGE: STUFFED", filename='debug.log', from_mark=mark, timeout=10) + + warnings = 0 + for x in range(rows): + fut = session2.execute_async("INSERT INTO t(id, v) VALUES({v}, {v})".format(v=x)) + fut.result() + if fut.warnings: + assert ["Replica disk usage exceeds warn threshold"] == fut.warnings + warnings = warnings + 1 + + assert rows != warnings,"Expect node2 emits some warnings, but got all warnings" + assert 0 != warnings,"Expect node2 emits some warnings, but got no warnings" + assert_one(session2, "SELECT COUNT(*) FROM t", [rows]) + + session.cluster.shutdown() + session2.cluster.shutdown() + + def disk_usage_injection(self, node, state, clear_byteman=True): + if clear_byteman: + node.byteman_submit(['-u']) + node.byteman_submit(["./byteman/guardrails/disk_usage_{}.btm".format(state)]) diff --git a/offline_tools_test.py b/offline_tools_test.py index 04ad4c0133..e11ddcefd5 100644 --- a/offline_tools_test.py +++ b/offline_tools_test.py @@ -271,9 +271,9 @@ def test_sstableverify(self): hashcomputed = False for line in outlines: if sstable in line: - if "Verifying BigTableReader" in line: + if "Verifying " in line: verified = True - elif "Checking computed hash of BigTableReader" in line: + elif "Checking computed hash of " in line: hashcomputed = True else: logger.debug(line) diff --git a/paging_test.py b/paging_test.py index 6983d3f05c..6d666df871 100644 --- a/paging_test.py +++ b/paging_test.py @@ -18,6 +18,7 @@ assert_one, assert_lists_equal_ignoring_order) from tools.data import rows_to_list from tools.datahelp import create_rows, flatten_into_set, parse_data_into_dicts +from tools.misc import restart_cluster_and_update_config from tools.paging import PageAssertionMixin, PageFetcher since = pytest.mark.since @@ -3423,19 +3424,26 @@ def test_failure_threshold_deletions(self): supports_v5_protocol = self.supports_v5_protocol(self.cluster.version()) self.fixture_dtest_setup.allow_log_errors = True - self.cluster.set_configuration_options( - values={'tombstone_failure_threshold': 500} - ) + if self.supports_guardrails: + config_opts = {'guardrails': {'tombstone_failure_threshold': 500, + 'tombstone_warn_threshold': -1, + 'write_consistency_levels_disallowed': {}}} + else: + config_opts = {'tombstone_failure_threshold': 500} + restart_cluster_and_update_config(self.cluster, config_opts) self.session = self.prepare() self.setup_data() - # Add more data + if self.supports_guardrails: + # cell tombstones are not counted towards the threshold, so we delete rows + query = "delete from paging_test where id = 1 and mytext = '{}'" + else: + # Add more data + query = "insert into paging_test (id, mytext, col1) values (1, '{}', null)" + values = [uuid.uuid4() for i in range(3000)] for value in values: - self.session.execute(SimpleStatement( - "insert into paging_test (id, mytext, col1) values (1, '{}', null) ".format( - value - ), + self.session.execute(SimpleStatement(query.format(value), consistency_level=CL.ALL )) @@ -3456,7 +3464,7 @@ def test_failure_threshold_deletions(self): failure_msg = ("Scanned over.* tombstones in test_paging_size." "paging_test.* query aborted") else: - failure_msg = ("Scanned over.* tombstones during query.* query aborted") + failure_msg = ("Scanned over.* (tombstones|tombstone rows) during query.* query aborted") self.cluster.wait_for_any_log(failure_msg, 25) diff --git a/pushed_notifications_test.py b/pushed_notifications_test.py index a3b1bdcd5a..0e25e1b27f 100644 --- a/pushed_notifications_test.py +++ b/pushed_notifications_test.py @@ -388,13 +388,18 @@ def test_tombstone_failure_threshold_message(self): have_v5_protocol = self.supports_v5_protocol(self.cluster.version()) self.fixture_dtest_setup.allow_log_errors = True - self.cluster.set_configuration_options( - values={ - 'tombstone_failure_threshold': 500, - 'read_request_timeout_in_ms': 30000, # 30 seconds - 'range_request_timeout_in_ms': 40000 - } - ) + + if self.supports_guardrails: + config_options = {'guardrails': {'tombstone_warn_threshold': -1, + 'tombstone_failure_threshold': 500}, + 'read_request_timeout_in_ms': 30000, # 30 seconds + 'range_request_timeout_in_ms': 40000} + else: + config_options = {'tombstone_failure_threshold': 500, + 'read_request_timeout_in_ms': 30000, # 30 seconds + 'range_request_timeout_in_ms': 40000} + + self.cluster.set_configuration_options(values=config_options) self.cluster.populate(3).start() node1, node2, node3 = self.cluster.nodelist() proto_version = 5 if have_v5_protocol else None @@ -407,17 +412,17 @@ def test_tombstone_failure_threshold_message(self): "PRIMARY KEY (id, mytext) )" ) - # Add data with tombstones + if self.supports_guardrails: + # cell tombstones are not counted towards the threshold, so we delete rows + query = "delete from test where id = 1 and mytext = '{}'" + else: + # Add data with tombstones + query = "insert into test (id, mytext, col1) values (1, '{}', null)" values = [str(i) for i in range(1000)] for value in values: - session.execute(SimpleStatement( - "insert into test (id, mytext, col1) values (1, '{}', null) ".format( - value - ), - consistency_level=CL.ALL - )) - - failure_msg = ("Scanned over.* tombstones.* query aborted") + session.execute(SimpleStatement(query.format(value),consistency_level=CL.ALL)) + + failure_msg = ("Scanned over.* (tombstones|tombstone rows).* query aborted") @pytest.mark.timeout(25) def read_failure_query(): diff --git a/read_failures_test.py b/read_failures_test.py index 475f27815d..664ca70ff4 100644 --- a/read_failures_test.py +++ b/read_failures_test.py @@ -4,6 +4,7 @@ from cassandra import ConsistencyLevel, ReadFailure, ReadTimeout from cassandra.policies import FallthroughRetryPolicy from cassandra.query import SimpleStatement +from distutils.version import LooseVersion from dtest import Tester @@ -21,7 +22,9 @@ class TestReadFailures(Tester): @pytest.fixture(autouse=True) def fixture_add_additional_log_patterns(self, fixture_dtest_setup): fixture_dtest_setup.ignore_log_patterns = ( - "Scanned over [1-9][0-9]* tombstones", # This is expected when testing read failures due to tombstones + # These are expected when testing read failures due to tombstones, + "Scanned over [1-9][0-9]* tombstones", + "Scanned over [1-9][0-9]* tombstone rows", ) return fixture_dtest_setup @@ -33,9 +36,15 @@ def fixture_dtest_setup_params(self): self.expected_expt = ReadFailure def _prepare_cluster(self): - self.cluster.set_configuration_options( - values={'tombstone_failure_threshold': self.tombstone_failure_threshold} - ) + if self.supports_guardrails: + self.cluster.set_configuration_options( + values={'guardrails': {'tombstone_warn_threshold': -1, + 'tombstone_failure_threshold': self.tombstone_failure_threshold}} + ) + else: + self.cluster.set_configuration_options( + values={'tombstone_failure_threshold': self.tombstone_failure_threshold} + ) self.cluster.populate(3) self.cluster.start() self.nodes = list(self.cluster.nodes.values()) diff --git a/repair_tests/repair_test.py b/repair_tests/repair_test.py index ddae777343..4ff6d024ea 100644 --- a/repair_tests/repair_test.py +++ b/repair_tests/repair_test.py @@ -1195,6 +1195,40 @@ def run_repair(): else: node1.nodetool('repair keyspace1 standard1 -inc -par') + @since('3.0') + def test_repair_one_node_cluster(self): + options = [] + fix_STAR582 = self.cluster.version() >= "4.0" + if not fix_STAR582: + options = ['--ignore-unreplicated-keyspaces'] + options + self._repair_abort_test(options=options, nodes=1, rf=2) + + @since('3.0') + def test_repair_one_node_in_local_dc(self): + self._repair_abort_test(options=['--ignore-unreplicated-keyspaces', '--in-local-dc'], nodes=[1, 1], rf={'dc1': 1, 'dc2': 1}, no_common_range=True) + + def _repair_abort_test(self, options=[], nodes=1, rf=1, no_common_range=False): + cluster = self.cluster + logger.debug("Starting cluster..") + cluster.populate(nodes).start(wait_for_binary_proto=True) + + node1 = self.cluster.nodelist()[0] + session = self.patient_cql_connection(node1) + create_ks(session, 'ks', rf=rf) + + support_preview = self.cluster.version() >= "4.0" + if support_preview: + logger.debug("Preview repair") + out = node1.repair(["--preview"] + options) + if no_common_range: + assert "Nothing to repair for " in str(out), "Expect 'Nothing to repair for '" + + logger.debug("Full repair") + node1.repair(["--full"] + options) + + logger.debug("Incremental repair") + node1.repair(options) + @since('2.2') def test_dead_sync_initiator(self): """ diff --git a/run_dtests.py b/run_dtests.py index 34dd5af766..3f3e1fda91 100755 --- a/run_dtests.py +++ b/run_dtests.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -usage: run_dtests.py [-h] [--use-vnodes] [--use-off-heap-memtables] [--num-tokens=NUM_TOKENS] [--data-dir-count-per-instance=DATA_DIR_COUNT_PER_INSTANCE] +usage: run_dtests.py [-h] [--sstable-format=FORMAT] [--use-vnodes] [--use-off-heap-memtables] [--num-tokens=NUM_TOKENS] [--data-dir-count-per-instance=DATA_DIR_COUNT_PER_INSTANCE] [--force-resource-intensive-tests] [--skip-resource-intensive-tests] [--cassandra-dir=CASSANDRA_DIR] [--cassandra-version=CASSANDRA_VERSION] [--delete-logs] [--execute-upgrade-tests] [--execute-upgrade-tests-only] [--disable-active-log-watching] [--keep-test-dir] [--enable-jacoco-code-coverage] [--dtest-enable-debug-logging] [--dtest-print-tests-only] [--dtest-print-tests-output=DTEST_PRINT_TESTS_OUTPUT] @@ -8,6 +8,7 @@ optional arguments: -h, --help show this help message and exit + --sstable-format SSTable format to be used by default for all newly created SSTables: big or bti (default: bti) --use-vnodes Determines wither or not to setup clusters using vnodes for tests (default: False) --use-off-heap-memtables Enable Off Heap Memtables when creating test clusters for tests (default: False) --num-tokens=NUM_TOKENS Number of tokens to set num_tokens yaml setting to when creating instances with vnodes enabled (default: 256) diff --git a/scrub_test.py b/scrub_test.py index 3d50d70c31..04c09650f4 100644 --- a/scrub_test.py +++ b/scrub_test.py @@ -110,12 +110,15 @@ def launch_nodetool_cmd(self, cmd): if not common.is_win(): # nodetool always prints out on windows assert_length_equal(response, 0) # nodetool does not print anything unless there is an error - def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_validate=False): + def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_validate=False, acceptable_errors=None): """ Launch the standalone scrub """ node1 = self.cluster.nodelist()[0] env = common.make_cassandra_env(node1.get_install_cassandra_root(), node1.get_node_cassandra_root()) + + self.dtest_config.apply_to_env(env, "JVM_OPTS") + scrub_bin = node1.get_tool('sstablescrub') logger.debug(scrub_bin) @@ -131,7 +134,7 @@ def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_vali # if we have less than 64G free space, we get this warning - ignore it if err and "Consider adding more capacity" not in err.decode("utf-8"): logger.debug(err.decode("utf-8")) - assert_stderr_clean(err.decode("utf-8")) + assert_stderr_clean(err.decode("utf-8"), acceptable_errors) def perform_node_tool_cmd(self, cmd, table, indexes): """ @@ -158,12 +161,12 @@ def scrub(self, table, *indexes): time.sleep(.1) return self.get_sstables(table, indexes) - def standalonescrub(self, table, *indexes): + def standalonescrub(self, table, *indexes, acceptable_errors=None): """ Launch standalone scrub on table and indexes, and then return all sstables in a dict keyed by the table or index name. """ - self.launch_standalone_scrub(KEYSPACE, table) + self.launch_standalone_scrub(ks=KEYSPACE, cf=table, acceptable_errors=acceptable_errors) for index in indexes: self.launch_standalone_scrub(KEYSPACE, '{}.{}'.format(table, index)) return self.get_sstables(table, indexes) @@ -443,7 +446,7 @@ def test_standalone_scrub_essential_files_only(self): self.delete_non_essential_sstable_files('users') - scrubbed_sstables = self.standalonescrub('users') + scrubbed_sstables = self.standalonescrub(table='users', acceptable_errors=["WARN.*Could not recreate or deserialize existing bloom filter, continuing with a pass-through bloom filter but this will significantly impact reads performance"]) self.increase_sstable_generations(initial_sstables) assert initial_sstables == scrubbed_sstables diff --git a/tools/assertions.py b/tools/assertions.py index 7491a4b5a7..7148d1ec26 100644 --- a/tools/assertions.py +++ b/tools/assertions.py @@ -293,12 +293,16 @@ def assert_stderr_clean(err, acceptable_errors=None): @param acceptable_errors A list that if used, the user chooses what messages are to be acceptable in stderr. """ + default_acceptable_errors = ["WARN.*JNA link failure.*unavailable.", + "objc.*Class JavaLaunchHelper.*?Which one is undefined.", + # Stress tool JMX connection failure, see CASSANDRA-12437 + "Failed to connect over JMX; not collecting these stats", + "Picked up JAVA_TOOL_OPTIONS:.*"] + if acceptable_errors is None: - acceptable_errors = ["WARN.*JNA link failure.*unavailable.", - "objc.*Class JavaLaunchHelper.*?Which one is undefined.", - # Stress tool JMX connection failure, see CASSANDRA-12437 - "Failed to connect over JMX; not collecting these stats", - "Picked up JAVA_TOOL_OPTIONS:.*"] + acceptable_errors = default_acceptable_errors + else: + acceptable_errors = default_acceptable_errors + acceptable_errors regex_str = r"^({}|\s*|\n)*$".format("|".join(acceptable_errors)) err_str = err.strip() diff --git a/tools/misc.py b/tools/misc.py index 542a889a5a..d746a9947e 100644 --- a/tools/misc.py +++ b/tools/misc.py @@ -157,3 +157,14 @@ def add_skip(cls, reason=""): else: cls.pytestmark = [pytest.mark.skip(reason)] return cls + + +def restart_cluster_and_update_config(cluster, config): + """ + Takes a new config, and applies it to a cluster. We need to restart + for it to take effect. We _could_ take a node here, but we don't want to. + If you really want to change the config of just one node, use JMX. + """ + cluster.stop() + cluster.set_configuration_options(values=config) + cluster.start() diff --git a/update-history/STAR-801/6-fe23e0b0 STAR-452 add EverywhereStrategy smoke test (#10) b/update-history/STAR-801/6-fe23e0b0 STAR-452 add EverywhereStrategy smoke test (#10) new file mode 100644 index 0000000000..5f923ebd22 --- /dev/null +++ b/update-history/STAR-801/6-fe23e0b0 STAR-452 add EverywhereStrategy smoke test (#10) @@ -0,0 +1,24 @@ +--- a/bootstrap_test.py ++++ b/bootstrap_test.py +@@ -1018,7 +1018,6 @@ + assert_bootstrap_state(self, node3, 'COMPLETED', user='cassandra', password='cassandra') + node3.wait_for_binary_interface() + +-<<<<<<< + @since('4.0') + @pytest.mark.no_vnodes + def test_simple_bootstrap_with_everywhere_strategy(self): +@@ -1069,7 +1068,7 @@ + assert_almost_equal(size1, size2, error=0.3) + + assert_bootstrap_state(self, node2, 'COMPLETED') +-======= ++ + @since('4.1') + def test_invalid_host_id(self): + """ +@@ -1149,4 +1148,3 @@ + # 3. check host_id in other node's table + session1 = self.patient_exclusive_cql_connection(node1) + assert_one(session1, "SELECT host_id FROM system.peers_v2 WHERE peer = {}".format(address2), [uuid.UUID(host_id)]) +->>>>>>>