Skip to content

Commit 0feb684

Browse files
authored
Improve segment serialization for v1 segments (#2183)
#### Reference Issues/PRs Ref Monday Ticket: 7852509418 #### What does this implement or fix? This PR is fixing a segment fault that occurs in the `CopyCompressedInterStoreTask` when used in enterprise. The problem is a bit hard to reproduce as it involves: 1. Having some version V being written with an older version of ArcticDB (e.g. 4.4.5) to some primary 2. Trying to replicate this version V to some secondary with a version of enterprise that is using a newer version of ArcticDB (e.g. 5.2.3) and that version should have a changed structure of the Segments (e.g. what happened [here](e3b5f53...757b4bd#diff-a17e4a0ad7760267af8c570795b362ec900136e414782a4b883485d36d9a8f84R167)) 3. Because the newer version is trying to write more data than allocated, we are getting a seg fault The bug is that `CopyCompressedInterStoreTask` is trying to copy the Segment but the information about its size is stale because it was saved/reserialized based on the information from the older version. The fix involves: - Add null pointer checks in `decode_header_and_fields` and `serialize_v1_header_to_buffer` - the check in `decode_header_and_fields` is not needed for the fix but it is a good change to prevent silly seg faults - Simplify `calculate_size()` by removing unnecessary conditional - Enhance buffer allocation and copying logic in `serialize_v1_header_to_buffer` - Add explicit size and pointer validation checks to prevent potential memory issues Technically, only [this change](https://github.com/man-group/ArcticDB/pull/2183/files#diff-d35d87cb4a49c502a6519b27dc94590231057f633894d26c6c34f506fee1980fR154) is needed for the fix of this bug. But I think that the change in the [segment.cpp](https://github.com/man-group/ArcticDB/pull/2183/files#diff-21f07ddd0b2b8ab0b82fd8392af726ab2a0b5ccafedab67d86497177d4f381a2R283) make the code there easier to read/reason about + I've added some more checks to make it more robust. **Because the change is quite hard to test here, I've added the relevant test for it in the enterprise repo with [this PR](#2183 ## Change Type (Required) - [x] **Patch** (Bug fix or non-breaking improvement) - [ ] **Minor** (New feature, but backward compatible) - [ ] **Major** (Breaking changes) - [ ] **Cherry pick** #### Any other comments? The fix is resolving seg faults like: ``` bash ==40129== Thread 123 IOPool0: ==40129== Invalid write of size 8 ==40129== at 0x4C2E8BE: memcpy@@GLIBC_2.14 (vg_replace_strmem.c:1035) ==40129== by 0xC22DBCF1: arcticdb::Segment::serialize_v1_header_to_buffer(unsigned long) (segment.cpp:301) ==40129== by 0xC22DBED6: arcticdb::Segment::serialize_header_v1() (segment.cpp:320) ==40129== by 0xC22DBF5D: arcticdb::Segment::serialize_header() (segment.cpp:326) ==40129== by 0xC3BD7A30: arcticdb::storage::s3::S3ClientImpl::put_object(std::string const&, arcticdb::Segment&, std::string const&, arcticdb::storage::s3::PutHeader) (s3_client_impl.cpp:192) ==40129== by 0xC3BF1289: void arcticdb::storage::s3::detail::do_write_impl<arcticdb::storage::object_store_utils::FlatBucketizer>(arcticdb::storage::KeySegmentPair&, std::string const&, std::string const&, arcticdb::storage::s3::S3ClientInterface&, arcticdb::storage::object_store_utils::FlatBucketizer&&) (detail-inl.hpp:140) ==40129== by 0xC3BE98A9: arcticdb::storage::s3::S3Storage::do_write(arcticdb::storage::KeySegmentPair&) (s3_storage.cpp:50) ==40129== by 0xE608111: void arcticdb::storage::Storage::write<arcticdb::storage::KeySegmentPair&>(arcticdb::storage::KeySegmentPair&) (storage.hpp:48) ==40129== by 0xE5F5C8B: arcticdb::storage::Storages::write(arcticdb::storage::KeySegmentPair&) (storages.hpp:46) ==40129== by 0xE5F8245: arcticdb::storage::Library::write(arcticdb::storage::KeySegmentPair&) (library.hpp:90) ==40129== by 0xE698D85: arcticdb::async::AsyncStore<arcticdb::util::SysClock>::write_compressed_sync(arcticdb::storage::KeySegmentPair) (async_store.hpp:180) ==40129== by 0xE70ABDF: arcticdb::async::CopyCompressedInterStoreTask::copy() (tasks.hpp:402) ==40129== Address 0x1416413b0 is 0 bytes after a block of size 1,151,856 alloc'd ==40129== at 0x4C29F73: malloc (vg_replace_malloc.c:309) ==40129== by 0xC373EE45: arcticdb::AllocatorImpl<arcticdb::NullTracingPolicy, arcticdb::util::LinearClock>::internal_alloc(unsigned long) (allocator.cpp:194) ==40129== by 0xC373F143: arcticdb::AllocatorImpl<arcticdb::NullTracingPolicy, arcticdb::util::LinearClock>::aligned_alloc(unsigned long, bool) (allocator.cpp:303) ==40129== by 0xC1DAD983: arcticdb::Buffer::resize(unsigned long) (buffer.hpp:232) ==40129== by 0xC1DAD78A: arcticdb::Buffer::ensure(unsigned long) (buffer.hpp:177) ==40129== by 0xC22DBBC5: arcticdb::Segment::serialize_v1_header_to_buffer(unsigned long) (segment.cpp:283) ==40129== by 0xC22DBED6: arcticdb::Segment::serialize_header_v1() (segment.cpp:320) ==40129== by 0xC22DBF5D: arcticdb::Segment::serialize_header() (segment.cpp:326) ==40129== by 0xC3BD7A30: arcticdb::storage::s3::S3ClientImpl::put_object(std::string const&, arcticdb::Segment&, std::string const&, arcticdb::storage::s3::PutHeader) (s3_client_impl.cpp:192) ==40129== by 0xC3BF1289: void arcticdb::storage::s3::detail::do_write_impl<arcticdb::storage::object_store_utils::FlatBucketizer>(arcticdb::storage::KeySegmentPair&, std::string const&, std::string const&, arcticdb::storage::s3::S3ClientInterface&, arcticdb::storage::object_store_utils::FlatBucketizer&&) (detail-inl.hpp:140) ==40129== by 0xC3BE98A9: arcticdb::storage::s3::S3Storage::do_write(arcticdb::storage::KeySegmentPair&) (s3_storage.cpp:50) ==40129== by 0xE608111: void arcticdb::storage::Storage::write<arcticdb::storage::KeySegmentPair&>(arcticdb::storage::KeySegmentPair&) (storage.hpp:48) ==40129== ``` #### Checklist <details> <summary> Checklist for code changes... </summary> - [ ] Have you updated the relevant docstrings, documentation and copyright notice? - [ ] Is this contribution tested against [all ArcticDB's features](../docs/mkdocs/docs/technical/contributing.md)? - [ ] Do all exceptions introduced raise appropriate [error messages](https://docs.arcticdb.io/error_messages/)? - [ ] Are API changes highlighted in the PR description? - [ ] Is the PR labelled as enhancement or bug so it appears in autogenerated release notes? </details> <!-- Thanks for contributing a Pull Request to ArcticDB! Please ensure you have taken a look at: - ArcticDB's Code of Conduct: https://github.com/man-group/ArcticDB/blob/master/CODE_OF_CONDUCT.md - ArcticDB's Contribution Licensing: https://github.com/man-group/ArcticDB/blob/master/docs/mkdocs/docs/technical/contributing.md#contribution-licensing -->
1 parent 4eafc74 commit 0feb684

File tree

8 files changed

+189
-109
lines changed

8 files changed

+189
-109
lines changed

.gitignore

+3
Original file line numberDiff line numberDiff line change
@@ -50,3 +50,6 @@ docs/mkdocs/docs/notebooks/.ipynb_checkpoints/
5050
# Ignore asv results
5151
python/.asv/html/
5252
python/.asv/results/
53+
54+
# Ignore azurite files
55+
__azurite_*

cpp/arcticdb/codec/segment.cpp

+35-7
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ struct DeserializedSegmentData {
139139
};
140140

141141
DeserializedSegmentData decode_header_and_fields(const uint8_t*& src, bool copy_data) {
142+
util::check(src != nullptr, "Got null data ptr from segment");
142143
auto* fixed_hdr = reinterpret_cast<const FixedHeader*>(src);
143144
ARCTICDB_DEBUG(log::codec(), "Reading header: {} + {} = {}", FIXED_HEADER_SIZE, fixed_hdr->header_bytes, FIXED_HEADER_SIZE + fixed_hdr->header_bytes);
144145

@@ -279,14 +280,41 @@ std::pair<uint8_t*, size_t> Segment::serialize_v1_header_in_place(size_t total_h
279280

280281
std::tuple<uint8_t*, size_t, std::unique_ptr<Buffer>> Segment::serialize_v1_header_to_buffer(size_t hdr_size) {
281282
auto tmp = std::make_unique<Buffer>();
282-
ARCTICDB_TRACE(log::storage(), "Header doesn't fit in internal buffer, needed {} bytes but had {}, writing to temp buffer at {:x}", hdr_size, buffer_.preamble_bytes(),uintptr_t(tmp->data()));
283-
tmp->ensure(calculate_size());
283+
auto bytes_to_copy = buffer().bytes();
284+
auto offset = FIXED_HEADER_SIZE + hdr_size;
285+
286+
auto total_size = offset + bytes_to_copy;
287+
288+
// Verify we have enough space for everything
289+
tmp->ensure(total_size);
290+
util::check(tmp->available() >= total_size,
291+
"Buffer available space {} is less than required size {}",
292+
tmp->available(),
293+
total_size);
294+
295+
// This is both a sanity check and a way to populate the segment with the correct size
296+
auto calculated_size = calculate_size();
297+
util::check(total_size == calculated_size, "Expected total size {} to be equal to calculated size {}", total_size, calculated_size);
298+
284299
auto* dst = tmp->preamble();
285-
write_proto_header(dst);
286-
std::memcpy(dst + FIXED_HEADER_SIZE + hdr_size,
287-
buffer().data(),
288-
buffer().bytes());
289-
return std::make_tuple(tmp->preamble(), calculate_size(), std::move(tmp));
300+
util::check(dst != nullptr, "Expected dst to be non-null");
301+
302+
auto written_hdr_size = write_proto_header(dst);
303+
util::check(written_hdr_size == hdr_size, "Expected written header size {} to be equal to expected header size {}", written_hdr_size, hdr_size);
304+
305+
auto *final_dst = dst + offset;
306+
307+
auto *src = buffer().data();
308+
if (src != nullptr) {
309+
std::memcpy(final_dst,
310+
src,
311+
bytes_to_copy);
312+
} else {
313+
util::check(bytes_to_copy == 0, "Expected bytes_to_copy to be 0 when src is nullptr");
314+
ARCTICDB_DEBUG(log::codec(), "src is nullptr, skipping memcpy");
315+
}
316+
317+
return std::make_tuple(dst, total_size, std::move(tmp));
290318
}
291319

292320
std::tuple<uint8_t*, size_t, std::unique_ptr<Buffer>> Segment::serialize_header_v1() {

cpp/arcticdb/codec/segment.hpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,7 @@ class Segment {
151151
}
152152

153153
[[nodiscard]] std::size_t calculate_size() {
154-
if(!size_.has_value())
155-
size_ = FIXED_HEADER_SIZE + segment_header_bytes_size() + buffer_bytes();
154+
size_ = FIXED_HEADER_SIZE + segment_header_bytes_size() + buffer_bytes();
156155

157156
return *size_;
158157
}

cpp/arcticdb/toolbox/storage_mover.hpp

+12-1
Original file line numberDiff line numberDiff line change
@@ -507,14 +507,25 @@ class ARCTICDB_VISIBILITY_HIDDEN StorageMover {
507507
size_t copied = 0;
508508
for (size_t offset = start; offset < end; ++offset) {
509509
if (VariantKey& key = vkeys[offset]; source_store_->key_exists(key).get() && !target_store_->key_exists(key).get()) {
510+
util::check(variant_key_type(key) != KeyType::UNDEFINED, "Key type is undefined");
510511
keys_to_copy[copied++] = std::pair{std::move(key), [copied, &segments](storage::KeySegmentPair&& ks) {
511512
segments[copied] = std::move(ks);
512513
return segments[copied].variant_key();
513514
}};
515+
} else {
516+
log::storage().warn("Key {} not found in source or already exists in target", key);
514517
}
515518
}
519+
// check that there are no undefined keys due to failed key_exists calls
520+
std::erase_if(keys_to_copy, [](const auto& key) { return variant_key_type(key.first) == KeyType::UNDEFINED; });
521+
if (keys_to_copy.empty()) {
522+
continue;
523+
}
524+
516525
total_copied += copied;
517-
[[maybe_unused]] auto keys = source_store_->batch_read_compressed(std::move(keys_to_copy), BatchReadArgs{}).get();
526+
auto keys = source_store_->batch_read_compressed(std::move(keys_to_copy), BatchReadArgs{}).get();
527+
std::erase_if(segments, [](const auto& segment) { return variant_key_type(segment.variant_key()) == KeyType::UNDEFINED; });
528+
util::check(keys.size() == segments.size(), "Keys and segments size mismatch, maybe due to parallel deletes");
518529
write_futs.push_back(target_store_->batch_write_compressed(std::move(segments)));
519530
}
520531
folly::collect(write_futs).get();

python/tests/compat/conftest.py python/arcticdb/util/venv.py

+32-74
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,15 @@
1-
import pytest
2-
import subprocess
3-
import os
4-
import venv
5-
import tempfile
61
import logging
2+
import os
73
import shutil
8-
from typing import Union, Optional, Dict, List
9-
from ..util.mark import (
10-
AZURE_TESTS_MARK,
11-
MONGO_TESTS_MARK,
12-
VENV_COMPAT_TESTS_MARK,
13-
PANDAS_2_COMPAT_TESTS_MARK
14-
)
4+
import subprocess
5+
import tempfile
6+
import venv
7+
8+
from typing import Dict, List, Optional, Union
9+
1510
from packaging.version import Version
11+
from arcticdb_ext import set_config_int, unset_config_int
12+
from arcticdb.arctic import Arctic
1613

1714
logger = logging.getLogger("Compatibility tests")
1815

@@ -45,8 +42,9 @@ def run_shell_command(
4542
stdin=subprocess.DEVNULL,
4643
)
4744
if result.returncode != 0:
48-
logger.warning(
49-
f"Command failed, stdout: {str(result.stdout)}, stderr: {str(result.stderr)}"
45+
logger.error(
46+
f"Command '{command_string}' failed with return code {result.returncode}\n"
47+
f"stdout:\n{result.stdout.decode('utf-8')}\nstderr:\n{result.stderr.decode('utf-8')}"
5048
)
5149
return result
5250

@@ -148,6 +146,10 @@ def create_library(self, lib_name: str) -> "VenvLib":
148146
def get_library(self, lib_name: str) -> "VenvLib":
149147
return VenvLib(self, lib_name, create_if_not_exists=False)
150148

149+
def cleanup(self):
150+
ac = Arctic(self.uri)
151+
for lib in ac.list_libraries():
152+
ac.delete_library(lib)
151153

152154
class VenvLib:
153155
def __init__(self, arctic, lib_name, create_if_not_exists=True):
@@ -177,68 +179,24 @@ def assert_read(self, sym: str, df) -> None:
177179
return self.execute(python_commands, {"expected_df": df})
178180

179181

180-
@pytest.fixture(
181-
# scope="session",
182-
params=[
183-
pytest.param("1.6.2", marks=VENV_COMPAT_TESTS_MARK),
184-
pytest.param("4.5.1", marks=VENV_COMPAT_TESTS_MARK),
185-
pytest.param("5.0.0", marks=VENV_COMPAT_TESTS_MARK),
186-
], # TODO: Extend this list with other old versions
187-
)
188-
def old_venv(request, tmp_path):
189-
version = request.param
190-
path = os.path.join("venvs", tmp_path, version)
191-
compat_dir = os.path.dirname(os.path.abspath(__file__))
192-
requirements_file = os.path.join(compat_dir, f"requirements-{version}.txt")
193-
with Venv(path, requirements_file, version) as old_venv:
194-
yield old_venv
195-
196-
197-
@pytest.fixture(
198-
params=[
199-
pytest.param("tmp_path", marks=PANDAS_2_COMPAT_TESTS_MARK)
200-
]
201-
)
202-
def pandas_v1_venv(request):
203-
"""A venv with Pandas v1 installed (and an old ArcticDB version). To help test compat across Pandas versions."""
204-
version = "1.6.2"
205-
tmp_path = request.getfixturevalue(request.param)
206-
path = os.path.join("venvs", tmp_path, version)
207-
compat_dir = os.path.dirname(os.path.abspath(__file__))
208-
requirements_file = os.path.join(compat_dir, f"requirements-{version}.txt")
209-
with Venv(path, requirements_file, version) as old_venv:
210-
yield old_venv
211-
212-
213-
@pytest.fixture(
214-
params=[
215-
"lmdb",
216-
"s3_ssl_disabled",
217-
pytest.param("azurite", marks=AZURE_TESTS_MARK),
218-
pytest.param("mongo", marks=MONGO_TESTS_MARK),
219-
]
220-
)
221-
def arctic_uri(request):
182+
class CurrentVersion:
222183
"""
223-
arctic_uri is a fixture which provides uri to all storages to be used for creating both old and current Arctic instances.
184+
For many of the compatibility tests we need to maintain a single open connection to the library.
185+
For example LMDB on Windows starts to fail if we at the same time we use an old_venv and current connection.
224186
225-
We use s3_ssl_disabled because which allows running tests for older versions like 1.6.2
187+
So we use `with CurrentVersion` construct to ensure we delete all our outstanding references to the library.
226188
"""
227-
storage_fixture = request.getfixturevalue(request.param + "_storage")
228-
if request.param == "mongo":
229-
return storage_fixture.mongo_uri
230-
else:
231-
return storage_fixture.arctic_uri
232-
233-
234-
@pytest.fixture
235-
def old_venv_and_arctic_uri(old_venv, arctic_uri):
236-
if arctic_uri.startswith("mongo") and "1.6.2" in old_venv.version:
237-
pytest.skip("Mongo storage backend is not supported in 1.6.2")
189+
def __init__(self, uri, lib_name):
190+
self.uri = uri
191+
self.lib_name = lib_name
238192

239-
if arctic_uri.startswith("lmdb") and Version(old_venv.version) < Version("5.0.0"):
240-
pytest.skip(
241-
"LMDB storage backed has a bug in versions before 5.0.0 which leads to flaky segfaults"
242-
)
193+
def __enter__(self):
194+
set_config_int("VersionMap.ReloadInterval", 0) # We disable the cache to be able to read the data written from old_venv
195+
self.ac = Arctic(self.uri)
196+
self.lib = self.ac.get_library(self.lib_name)
197+
return self
243198

244-
return old_venv, arctic_uri
199+
def __exit__(self, exc_type, exc_val, exc_tb):
200+
unset_config_int("VersionMap.ReloadInterval")
201+
del self.lib
202+
del self.ac

python/tests/compat/arcticdb/test_compatibility.py

+1-25
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,17 @@
11
import pytest
22
from packaging import version
33
import pandas as pd
4-
from arcticdb.arctic import Arctic
54
from arcticdb.util.test import assert_frame_equal
6-
from arcticdb_ext import set_config_int, unset_config_int
75
from arcticdb.options import ModifiableEnterpriseLibraryOption
86
from arcticdb.toolbox.library_tool import LibraryTool
97
from tests.util.mark import ARCTICDB_USING_CONDA
108

9+
from arcticdb.util.venv import CurrentVersion
1110

1211
if ARCTICDB_USING_CONDA:
1312
pytest.skip("These tests rely on pip based environments", allow_module_level=True)
1413

1514

16-
class CurrentVersion:
17-
"""
18-
For many of the compatibility tests we need to maintain a single open connection to the library.
19-
For example LMDB on Windows starts to fail if we at the same time we use an old_venv and current connection.
20-
21-
So we use `with CurrentVersion` construct to ensure we delete all our outstanding references to the library.
22-
"""
23-
def __init__(self, uri, lib_name):
24-
self.uri = uri
25-
self.lib_name = lib_name
26-
27-
def __enter__(self):
28-
set_config_int("VersionMap.ReloadInterval", 0) # We disable the cache to be able to read the data written from old_venv
29-
self.ac = Arctic(self.uri)
30-
self.lib = self.ac.get_library(self.lib_name)
31-
return self
32-
33-
def __exit__(self, exc_type, exc_val, exc_tb):
34-
unset_config_int("VersionMap.ReloadInterval")
35-
del self.lib
36-
del self.ac
37-
38-
3915
def test_compat_write_read(old_venv_and_arctic_uri, lib_name):
4016
old_venv, arctic_uri = old_venv_and_arctic_uri
4117
sym = "sym"

python/tests/conftest.py

+75
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,12 @@
5353
MONGO_TESTS_MARK,
5454
REAL_S3_TESTS_MARK,
5555
SSL_TEST_SUPPORTED,
56+
VENV_COMPAT_TESTS_MARK,
57+
PANDAS_2_COMPAT_TESTS_MARK
5658
)
5759
from arcticdb.storage_fixtures.utils import safer_rmtree
60+
from packaging.version import Version
61+
from arcticdb.util.venv import Venv
5862

5963

6064
# region =================================== Misc. Constants & Setup ====================================
@@ -1089,3 +1093,74 @@ def in_memory_version_store_tiny_segment(in_memory_store_factory):
10891093
@pytest.fixture(params=["lmdb_version_store_tiny_segment", "in_memory_version_store_tiny_segment"])
10901094
def lmdb_or_in_memory_version_store_tiny_segment(request):
10911095
return request.getfixturevalue(request.param)
1096+
1097+
1098+
@pytest.fixture(
1099+
# scope="session",
1100+
params=[
1101+
pytest.param("1.6.2", marks=VENV_COMPAT_TESTS_MARK),
1102+
pytest.param("4.5.1", marks=VENV_COMPAT_TESTS_MARK),
1103+
pytest.param("5.0.0", marks=VENV_COMPAT_TESTS_MARK),
1104+
], # TODO: Extend this list with other old versions
1105+
)
1106+
def old_venv(request, tmp_path):
1107+
version = request.param
1108+
path = os.path.join("venvs", tmp_path, version)
1109+
test_dir = os.path.dirname(os.path.abspath(__file__))
1110+
compat_dir = os.path.join(test_dir, "compat")
1111+
requirements_file = os.path.join(compat_dir, f"requirements-{version}.txt")
1112+
with Venv(path, requirements_file, version) as old_venv:
1113+
yield old_venv
1114+
1115+
1116+
@pytest.fixture(
1117+
params=[
1118+
pytest.param("tmp_path", marks=PANDAS_2_COMPAT_TESTS_MARK)
1119+
]
1120+
)
1121+
def pandas_v1_venv(request):
1122+
"""A venv with Pandas v1 installed (and an old ArcticDB version). To help test compat across Pandas versions."""
1123+
version = "1.6.2"
1124+
tmp_path = request.getfixturevalue(request.param)
1125+
path = os.path.join("venvs", tmp_path, version)
1126+
test_dir = os.path.dirname(os.path.abspath(__file__))
1127+
compat_dir = os.path.join(test_dir, "compat")
1128+
requirements_file = os.path.join(compat_dir, f"requirements-{version}.txt")
1129+
with Venv(path, requirements_file, version) as old_venv:
1130+
yield old_venv
1131+
1132+
1133+
@pytest.fixture(
1134+
params=[
1135+
"lmdb",
1136+
"s3_ssl_disabled",
1137+
pytest.param("azurite", marks=AZURE_TESTS_MARK),
1138+
pytest.param("mongo", marks=MONGO_TESTS_MARK),
1139+
]
1140+
)
1141+
def arctic_uri(request):
1142+
"""
1143+
arctic_uri is a fixture which provides uri to all storages to be used for creating both old and current Arctic instances.
1144+
1145+
We use s3_ssl_disabled because which allows running tests for older versions like 1.6.2
1146+
"""
1147+
storage_fixture = request.getfixturevalue(request.param + "_storage")
1148+
if request.param == "mongo":
1149+
return storage_fixture.mongo_uri
1150+
else:
1151+
return storage_fixture.arctic_uri
1152+
1153+
1154+
@pytest.fixture
1155+
def old_venv_and_arctic_uri(old_venv, arctic_uri):
1156+
if arctic_uri.startswith("mongo") and "1.6.2" in old_venv.version:
1157+
pytest.skip("Mongo storage backend is not supported in 1.6.2")
1158+
1159+
if arctic_uri.startswith("lmdb") and Version(old_venv.version) < Version("5.0.0"):
1160+
pytest.skip(
1161+
"LMDB storage backed has a bug in versions before 5.0.0 which leads to flaky segfaults"
1162+
)
1163+
1164+
yield old_venv, arctic_uri
1165+
1166+
old_venv.create_arctic(arctic_uri).cleanup()

0 commit comments

Comments
 (0)