Skip to content

Basic check for missing storage system and running pods #12040

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
May 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions ocs_ci/ocs/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -762,3 +762,7 @@ class ActiveMdsValueNotMatch(Exception):

class DistributionStatusError(Exception):
pass


class InvalidPodPresent(Exception):
pass
49 changes: 49 additions & 0 deletions ocs_ci/ocs/resources/storage_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from ocs_ci.ocs.exceptions import (
CephHealthRecoveredException,
CommandFailed,
InvalidPodPresent,
ResourceNotFoundError,
UnsupportedFeatureError,
PVNotSufficientException,
Expand All @@ -36,6 +37,7 @@
from ocs_ci.ocs.resources import csv, deployment
from ocs_ci.ocs.resources.ocs import get_ocs_csv
from ocs_ci.ocs.resources.pod import (
get_all_pods,
get_pods_having_label,
get_osd_pods,
get_mon_pods,
Expand Down Expand Up @@ -3309,3 +3311,50 @@ def get_deviceset_sc_name_per_deviceclass():
"""
device_sets = get_all_device_sets()
return {get_deviceset_sc_name(d): get_deviceclass_name(d) for d in device_sets}


def check_unnecessary_pods_present():
"""
Based on configuration, check that pods that are not necessary are not
present.
"""
no_noobaa = config.COMPONENTS["disable_noobaa"]
no_ceph = (
config.DEPLOYMENT["external_mode"] or config.ENV_DATA["mcg_only_deployment"]
)
pod_names = [
pod.name for pod in get_all_pods(namespace=config.ENV_DATA["cluster_namespace"])
]
log.info(f"Checking if only required operator pods are available in : {pod_names}")
invalid_pods_found = []
if no_noobaa:
for invalid_pod_name in [
constants.NOOBAA_OPERATOR_DEPLOYMENT,
constants.NOOBAA_ENDPOINT_DEPLOYMENT,
constants.NOOBAA_DB_STATEFULSET,
constants.NOOBAA_CORE_STATEFULSET,
]:
invalid_pods_found.extend(
[
pod_name
for pod_name in pod_names
if pod_name.startswith(invalid_pod_name)
]
)
if invalid_pods_found:
raise InvalidPodPresent(
f"Pods {invalid_pods_found} should not be present because NooBaa is not available"
)
if no_ceph:
for invalid_pod_name in [constants.ROOK_CEPH_OPERATOR]:
invalid_pods_found.extend(
[
pod_name
for pod_name in pod_names
if pod_name.startswith(invalid_pod_name)
]
)
if invalid_pods_found:
raise InvalidPodPresent(
f"Pods {invalid_pods_found} should not be present because Ceph is not available"
)
23 changes: 23 additions & 0 deletions tests/functional/deployment/test_operator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import pytest
from ocs_ci.framework.testlib import (
brown_squad,
ManageTest,
tier1,
)
from ocs_ci.ocs.resources.storage_cluster import check_unnecessary_pods_present


@brown_squad
class TestOperator(ManageTest):
"""
Verify that operator resources are deployed as expected.
"""

@tier1
@pytest.mark.polarion_id("OCS-6843")
def test_unnecessary_pods(self):
"""
1. Based on deployment type check that there are no unnecessary operator
pods deployed.
"""
check_unnecessary_pods_present()
54 changes: 54 additions & 0 deletions tests/functional/z_cluster/test_storagesystem.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import logging

import pytest
from ocs_ci.ocs import constants, ocp
from ocs_ci.framework import config
from ocs_ci.framework.testlib import (
brown_squad,
ManageTest,
tier1,
)
from ocs_ci.framework.logger_helper import log_step

logger = logging.getLogger(__name__)


@brown_squad
class TestStorageSystem(ManageTest):
"""
Verify the ceph full thresholds storagecluster parameters move to cephcluster

"""

@tier1
@pytest.mark.polarion_id("OCS-6842")
def test_storagesystem_not_present(self):
"""
1. Storage System is not present
2. Storage Cluster owner reference doesn't contain storage system

"""
log_step("Verify that Storage System is not present")
storage_system = ocp.OCP(
kind=constants.STORAGESYSTEM, namespace=config.ENV_DATA["cluster_namespace"]
)
storage_system_data = storage_system.get()
assert not storage_system_data.get("items")
log_step(
"Verify that Storage Cluster owner reference doesn't contain storage system"
)
storage_cluster = ocp.OCP(
kind=constants.STORAGECLUSTER,
namespace=config.ENV_DATA["cluster_namespace"],
)
storage_cluster_data = storage_cluster.get()
owner_references = storage_cluster_data.get("metadata").get(
"ownerReferences", {}
)
assert not any(
[
reference
for reference in owner_references
if reference["kind"] == "StorageSystem"
]
)