Skip to content
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,5 @@ exclude = .tox,.git,*/migrations/*,*/static/CACHE/*,docs,node_modules,.venv,*/cd

# F401 - Unused imports -- this is the only way to have a file-wide rule exception
per-file-ignores =
# We utilize * imports on test files here to dynamically collect test cases
conftest.py: F401,F403
Original file line number Diff line number Diff line change
Expand Up @@ -192,19 +192,21 @@ public static class DocParams implements TransformerParams {
public String getTransformerConfigParameterArgPrefix() {
return DOC_CONFIG_PARAMETER_ARG_PREFIX;
}
private static final String DOC_CONFIG_PARAMETER_ARG_PREFIX = "doc-";
private static final String DOC_CONFIG_PARAMETER_ARG_PREFIX = "doc";

@Parameter(
required = false,
names = "--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "transformer-config-base64",
names = { "--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "-transformer-config-base64",
"--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "TransformerConfigBase64" },
arity = 1,
description = "Configuration of doc transformers. The same contents as --doc-transformer-config but " +
"Base64 encoded so that the configuration is easier to pass as a command line parameter.")
private String transformerConfigEncoded;

@Parameter(
required = false,
names = "--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "transformer-config",
names = { "--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "-transformer-config",
"--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "TransformerConfig" },
arity = 1,
description = "Configuration of doc transformers. Either as a string that identifies the "
+ "transformer that should be run (with default settings) or as json to specify options "
Expand All @@ -215,7 +217,8 @@ public String getTransformerConfigParameterArgPrefix() {

@Parameter(
required = false,
names = "--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "transformer-config-file",
names = { "--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "-transformer-config-file",
"--" + DOC_CONFIG_PARAMETER_ARG_PREFIX + "TransformerConfigFile" },
arity = 1,
description = "Path to the JSON configuration file of doc transformers.")
private String transformerConfigFile;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def __init__(self, config: Dict, client_options: Optional[ClientOptions] = None)
raise ValueError("Invalid config file for cluster", v.errors)

self.endpoint = config["endpoint"]
self.version = config.get("version", None)
self.allow_insecure = config.get("allow_insecure", False) if self.endpoint.startswith(
"https") else config.get("allow_insecure", True)
if 'no_auth' in config:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,64 +1,47 @@
## E2E Integration Testing
Developers can run a test script which will verify the end-to-end Docker Solution.

### Compatibility
* Python >= 3.7

### Pre-requisites

* Have all containers from Docker solution running.

To run the test script, users must navigate to this directory,
install the required packages and then run the script:
## Migration Assistant E2E Integration Testing
This library contains E2E integration tests to execute against a Migration Assistant deployment

### Installing dependencies
To install the required dependencies
```
pip install -r requirements.txt
pytest tests.py
pipenv install
```

### Running in Docker setup
### Creating an E2E test case
Test cases created within the `test_cases` directory are performed by the `ma_workflow_test.py` test structure. The link between these two are created in the pytest
configuration `conftest.py` file. Any test class created in an existing file within the `test_cases` directory will automatically be added to the list of test cases
to attempt when the `ma_workflow_test.py` file is executed with pytest. The `conftest.py` file achieves this by collecting all test cases initially, and then filters
out any test cases that don't apply to the given source and target clusters versions, as well as on filters that a user can provide such as `--test_ids`. Once the final
list is determined, the `conftest.py` file will dynamically create a parameterized tag on the `ma_workflow_test.py` test, resulting in multiple executions of this test
based on the final list of test cases to be executed. If a new test file is created within the `test_cases` directory it should be imported into the `conftest.py` file
like other test files.

From the root of this repository bring up the Docker environment
```shell
./gradlew -p TrafficCapture dockerSolution:ComposeUp -x test -x spotlessCheck --info --stacktrace
```

The Docker compose file being used can be found [here](../../../docker-compose.yml)
* The integ_test `lib` directory can be directly mounted as a volume on the migration console container to spe
### Running tests in K8s setup

Follow the quickstart guide [here](../../../../../../../../deployment/k8s/quickstart.md) to set up a Migration Assistant environment with source and
target test clusters

To run one of the integration test suites a command like below can be used:
Access the migration console:
```shell
docker exec $(docker ps --filter "name=migration-console" -q) pipenv run pytest /root/lib/integ_test/integ_test/full_tests.py --unique_id="testindex" -s
kubectl exec --stdin --tty $(kubectl get pods -l app=ma-migration-console --sort-by=.metadata.creationTimestamp -o jsonpath="{.items[-1].metadata.name}") -- /bin/bash
```

To teardown, execute the following command at the root of this repository
Perform pytest:
```shell
./gradlew -p TrafficCapture dockerSolution:ComposeDown
pytest ~/lib/integ_test/integ_test/ma_workflow_test.py
```

#### Notes

##### Ports Setup
The test script, by default, uses the ports assigned to the containers in this
[docker-compose file](../../../docker-compose.yml), so if the Docker solution in
its current setup started with no issues, then the test script will run as is. If for any reason
the user changed the ports in that file, they must also either, provide the following parameters variables:
`proxy_endpoint`, `source_endpoint`, and `target_endpoint` respectively, or update the default value
for them in [conftest.py](integ_test/conftest.py).
To tear-down resources, follow the end of the quickstart guide [here](../../../../../../../../deployment/k8s/quickstart.md#cleanup)


#### Script Parameters
### Pytest parameters

This script accepts various parameters to customize its behavior. Below is a list of available parameters along with their default values and acceptable choices:
Pytest has been configured to accepts various parameters to customize its behavior. Below is a list of available parameters along with their default values and acceptable choices:

- `--unique_id`: The unique identifier to apply to created indices/documents.
- Default: Generated uuid
- `--config_file_path`: The services yaml config file path for the console library.
- Default: `/config/migration_services.yaml`


#### Clean Up
The test script is implemented with a setup and teardown functions that are ran after
each and every test where additions made to the endpoints are deleted, *mostly* cleaning up after themselves, however,
as we log all operations going through the proxy (which is capturing the traffic), those are only being
deleted after the Docker solution is shut down.
- `--test_ids`: Specify test IDs like `'0001,0003'` to filter tests to execute.
- Default: Attempt to execute all tests
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@
from console_link.models.command_result import CommandResult
from console_link.models.metadata import Metadata
from console_link.cli import Context
from common_operations import (get_document, create_document, create_index, check_doc_counts_match,
EXPECTED_BENCHMARK_DOCS)
from .common_utils import EXPECTED_BENCHMARK_DOCS
from .default_operations import DefaultOperationsLibrary

logger = logging.getLogger(__name__)
ops = DefaultOperationsLibrary()


def preload_data(source_cluster: Cluster, target_cluster: Cluster):
Expand All @@ -29,9 +30,9 @@ def preload_data(source_cluster: Cluster, target_cluster: Cluster):
# test_backfill_0001
index_name = f"test_backfill_0001_{pytest.unique_id}"
doc_id = "backfill_0001_doc"
create_index(cluster=source_cluster, index_name=index_name)
create_document(cluster=source_cluster, index_name=index_name, doc_id=doc_id,
expected_status_code=HTTPStatus.CREATED)
ops.create_index(cluster=source_cluster, index_name=index_name)
ops.create_document(cluster=source_cluster, index_name=index_name, doc_id=doc_id,
expected_status_code=HTTPStatus.CREATED)

# test_backfill_0002
run_test_benchmarks(source_cluster)
Expand Down Expand Up @@ -85,23 +86,23 @@ def test_backfill_0001_single_document(self):
target_cluster: Cluster = pytest.console_env.target_cluster

# Assert preloaded document exists
get_document(cluster=source_cluster, index_name=index_name, doc_id=doc_id, test_case=self)
ops.get_document(cluster=source_cluster, index_name=index_name, doc_id=doc_id, test_case=self)

# TODO Determine when backfill is completed

get_document(cluster=target_cluster, index_name=index_name, doc_id=doc_id, max_attempts=30, delay=30.0,
test_case=self)
ops.get_document(cluster=target_cluster, index_name=index_name, doc_id=doc_id, max_attempts=30, delay=30.0,
test_case=self)

def test_backfill_0002_sample_benchmarks(self):
source_cluster: Cluster = pytest.console_env.source_cluster
target_cluster: Cluster = pytest.console_env.target_cluster

# Confirm documents on source
check_doc_counts_match(cluster=source_cluster, expected_index_details=EXPECTED_BENCHMARK_DOCS,
test_case=self)
ops.check_doc_counts_match(cluster=source_cluster, expected_index_details=EXPECTED_BENCHMARK_DOCS,
test_case=self)

# TODO Determine when backfill is completed

# Confirm documents on target after backfill
check_doc_counts_match(cluster=target_cluster, expected_index_details=EXPECTED_BENCHMARK_DOCS,
max_attempts=30, delay=30.0, test_case=self)
ops.check_doc_counts_match(cluster=target_cluster, expected_index_details=EXPECTED_BENCHMARK_DOCS,
max_attempts=30, delay=30.0, test_case=self)
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import re


class ClusterVersion:
pattern = re.compile(r"^(ES|OS)_([0-9]+)\.([0-9]+|x|X)$")

def __init__(self, version_str: str):
match = self.pattern.match(version_str)
if not match:
raise ValueError(f"Invalid version format: {version_str}. Cluster versions must be in format ES_x.y or "
f"OS_x.y, where y is a number or 'x' for any minor version.")

self.cluster_type = match.group(1)
self.major_version = int(match.group(2))

minor_version = match.group(3)
if minor_version.lower() == 'x':
self.minor_version = 'x'
else:
self.minor_version = int(minor_version)

def __str__(self):
return f"{self.cluster_type}_{self.major_version}.{self.minor_version}"


ElasticsearchV5_X = ClusterVersion("ES_5.x")
ElasticsearchV6_X = ClusterVersion("ES_6.x")
ElasticsearchV7_X = ClusterVersion("ES_7.x")
OpensearchV1_X = ClusterVersion("OS_1.x")
OpensearchV2_X = ClusterVersion("OS_2.x")


def is_incoming_version_supported(limiting_version: ClusterVersion, incoming_version: ClusterVersion):
if (limiting_version.cluster_type == incoming_version.cluster_type and
limiting_version.major_version == incoming_version.major_version):
if isinstance(limiting_version.minor_version, str):
return True
else:
return limiting_version.minor_version == incoming_version.minor_version
return False
Loading
Loading