diff --git a/ocs_ci/deployment/aws.py b/ocs_ci/deployment/aws.py index 25f31466db5..7471cd11291 100644 --- a/ocs_ci/deployment/aws.py +++ b/ocs_ci/deployment/aws.py @@ -16,7 +16,7 @@ from ocs_ci.ocs import constants, exceptions, ocp, machine from ocs_ci.ocs.resources import pod from ocs_ci.ocs.node import drain_nodes -from ocs_ci.utility import cco, templating, version +from ocs_ci.utility import cco, templating from ocs_ci.utility.aws import ( AWS as AWSUtil, create_and_attach_volume_for_all_workers, @@ -56,9 +56,6 @@ class AWSBase(CloudDeploymentBase): - # default storage class for StorageCluster CRD on AWS platform - DEFAULT_STORAGECLASS = "gp2" - def __init__(self): """ This would be base for both IPI and UPI deployment @@ -68,20 +65,9 @@ def __init__(self): # dict of cluster prefixes with special handling rules (for existence # check or during a cluster cleanup) self.cluster_prefixes_special_rules = CLUSTER_PREFIXES_SPECIAL_RULES - ocp_version = version.get_semantic_ocp_version_from_config() - if ocp_version >= version.VERSION_4_12: - self.DEFAULT_STORAGECLASS = "gp2-csi" def deploy_ocp(self, log_cli_level="DEBUG"): super(AWSBase, self).deploy_ocp(log_cli_level) - ocp_version = version.get_semantic_ocp_version_from_config() - ocs_version = version.get_semantic_ocs_version_from_config() - - if ocs_version >= version.VERSION_4_10 and ocp_version >= version.VERSION_4_9: - # If we don't customize the storage class, we will use the default one - self.DEFAULT_STORAGECLASS = config.DEPLOYMENT.get( - "customized_deployment_storage_class", self.DEFAULT_STORAGECLASS - ) def host_network_update(self): """ diff --git a/ocs_ci/deployment/azure.py b/ocs_ci/deployment/azure.py index 11c34d68bb6..82c73c136d9 100644 --- a/ocs_ci/deployment/azure.py +++ b/ocs_ci/deployment/azure.py @@ -14,7 +14,7 @@ from ocs_ci.deployment.cloud import IPIOCPDeployment from ocs_ci.deployment.ocp import OCPDeployment as BaseOCPDeployment from ocs_ci.ocs import constants -from ocs_ci.utility import cco, version +from ocs_ci.utility import cco from ocs_ci.utility.azure_utils import AZURE as AzureUtil, AzureAroUtil from ocs_ci.utility.deployment import get_ocp_release_image_from_installer from ocs_ci.utility.utils import exec_cmd @@ -34,13 +34,6 @@ class AZUREBase(CloudDeploymentBase): comparable with other platforms. """ - # default storage class for StorageCluster CRD on Azure platform - # From OCP 4.11, default storage class is managed-csi - if version.get_semantic_ocp_version_from_config() >= version.VERSION_4_11: - DEFAULT_STORAGECLASS = "managed-csi" - else: - DEFAULT_STORAGECLASS = "managed-premium" - def __init__(self): super(AZUREBase, self).__init__() self.azure_util = AzureUtil() diff --git a/ocs_ci/deployment/deployment.py b/ocs_ci/deployment/deployment.py index 143453055f5..6298a57d939 100644 --- a/ocs_ci/deployment/deployment.py +++ b/ocs_ci/deployment/deployment.py @@ -18,6 +18,7 @@ from botocore.exceptions import EndpointConnectionError, BotoCoreError +from ocs_ci.deployment.helpers import storage_class from ocs_ci.deployment.ocp import OCPDeployment as BaseOCPDeployment from ocs_ci.deployment.helpers.external_cluster_helpers import ( ExternalCluster, @@ -191,32 +192,14 @@ class Deployment(object): Base for all deployment platforms """ - # Default storage class for StorageCluster CRD, - # every platform specific class which extending this base class should - # define it - DEFAULT_STORAGECLASS = None - - # Default storage class for LSO deployments. While each platform specific - # subclass can redefine it, there is a well established platform - # independent default value (based on OCS Installation guide), and it's - # redefinition is not necessary in normal cases. - DEFAULT_STORAGECLASS_LSO = "localblock" - - CUSTOM_STORAGE_CLASS_PATH = None - """str: filepath of yaml file with custom storage class if necessary - - For some platforms, one have to create custom storage class for OCS to make - sure ceph uses disks of expected type and parameters (eg. OCS requires - ssd). This variable is either None (meaning that such custom storage class - is not needed), or point to a yaml file with custom storage class. - """ - def __init__(self): self.platform = config.ENV_DATA["platform"] self.ocp_deployment_type = config.ENV_DATA["deployment_type"] self.cluster_path = config.ENV_DATA["cluster_path"] self.namespace = config.ENV_DATA["cluster_namespace"] self.sts_role_arn = None + self.storage_class = storage_class.get_storageclass() + self.custom_storage_class_path = None class OCPDeployment(BaseOCPDeployment): """ @@ -475,7 +458,6 @@ def do_deploy_mce(self): """ if config.ENV_DATA["skip_ocs_deployment"]: - if config.ENV_DATA.get("deploy_mce"): mce_installer = MCEInstaller() mce_installer.deploy_mce() @@ -621,7 +603,6 @@ def do_deploy_hyperconverged(self): Should run on OCP deployment phase """ if config.ENV_DATA["skip_ocs_deployment"]: - if config.ENV_DATA.get( "deploy_hyperconverged" ) and not config.DEPLOYMENT.get("cnv_deployment"): @@ -712,11 +693,11 @@ def deploy_cluster(self, log_cli_level="DEBUG"): perform_lso_standalone_deployment = config.DEPLOYMENT.get( "lso_standalone_deployment", False ) and not ocp.OCP(kind=constants.STORAGECLASS).is_exist( - resource_name=self.DEFAULT_STORAGECLASS_LSO + resource_name=constants.DEFAULT_STORAGECLASS_LSO ) if perform_lso_standalone_deployment: cleanup_nodes_for_lso_install() - setup_local_storage(storageclass=self.DEFAULT_STORAGECLASS_LSO) + setup_local_storage(storageclass=constants.DEFAULT_STORAGECLASS_LSO) self.do_deploy_lvmo() self.do_deploy_submariner() self.do_gitops_deploy() @@ -1130,14 +1111,13 @@ def deploy_ocs_via_operator(self, image=None): if local_storage: log_step("Deploy and setup Local Storage Operator") - setup_local_storage(storageclass=self.DEFAULT_STORAGECLASS_LSO) + setup_local_storage(storageclass=constants.DEFAULT_STORAGECLASS_LSO) log_step("Creating namespace and operator group") # patch OLM YAML with the namespace olm_ns_op_group_data = list(templating.load_yaml(constants.OLM_YAML, True)) if self.namespace != constants.OPENSHIFT_STORAGE_NAMESPACE: - for cr in olm_ns_op_group_data: if cr["kind"] == "Namespace": cr["metadata"]["name"] = self.namespace @@ -1372,13 +1352,10 @@ def deploy_ocs_via_operator(self, image=None): ) # create custom storage class for StorageCluster CR if necessary - if self.CUSTOM_STORAGE_CLASS_PATH is not None: - with open(self.CUSTOM_STORAGE_CLASS_PATH, "r") as custom_sc_fo: - custom_sc = yaml.load(custom_sc_fo, Loader=yaml.SafeLoader) - # set value of DEFAULT_STORAGECLASS to mach the custom storage cls - self.DEFAULT_STORAGECLASS = custom_sc["metadata"]["name"] - log_step(f"Creating custom storage class {self.DEFAULT_STORAGECLASS}") - run_cmd(f"oc create -f {self.CUSTOM_STORAGE_CLASS_PATH}") + if self.custom_storage_class_path is not None: + self.storage_class_name = storage_class.create_custom_storageclass( + self.custom_storage_class_path + ) # Set rook log level self.set_rook_log_level() @@ -1466,7 +1443,7 @@ def deploy_ocs_via_operator(self, image=None): constants.HCI_BAREMETAL, ]: pv_size_list = helpers.get_pv_size( - storageclass=self.DEFAULT_STORAGECLASS_LSO + storageclass=constants.DEFAULT_STORAGECLASS_LSO ) pv_size_list.sort() deviceset_data["dataPVCTemplate"]["spec"]["resources"]["requests"][ @@ -1477,16 +1454,11 @@ def deploy_ocs_via_operator(self, image=None): "storage" ] = f"{device_size}Gi" - if self.platform.lower() == constants.ROSA_HCP_PLATFORM: - self.DEFAULT_STORAGECLASS = config.DEPLOYMENT.get( - "customized_deployment_storage_class", self.DEFAULT_STORAGECLASS - ) - # set storage class to OCS default on current platform - if self.DEFAULT_STORAGECLASS: + if self.storage_class: deviceset_data["dataPVCTemplate"]["spec"][ "storageClassName" - ] = self.DEFAULT_STORAGECLASS + ] = self.storage_class # StorageCluster tweaks for LSO if local_storage: @@ -1496,7 +1468,7 @@ def deploy_ocs_via_operator(self, image=None): deviceset_data["portable"] = False deviceset_data["dataPVCTemplate"]["spec"][ "storageClassName" - ] = self.DEFAULT_STORAGECLASS_LSO + ] = constants.DEFAULT_STORAGECLASS_LSO lso_type = config.DEPLOYMENT.get("type") if ( self.platform.lower() == constants.AWS_PLATFORM @@ -1581,7 +1553,7 @@ def deploy_ocs_via_operator(self, image=None): "spec": { "accessModes": ["ReadWriteOnce"], "resources": {"requests": {"storage": "20Gi"}}, - "storageClassName": self.DEFAULT_STORAGECLASS, + "storageClassName": self.storage_class, "volumeMode": "Filesystem", } } @@ -2410,14 +2382,13 @@ def patch_default_sc_to_non_default(self): """ Patch storage class which comes as default with installation to non-default """ - if not self.DEFAULT_STORAGECLASS: + if not self.storage_class: logger.info( - "Default StorageClass is not set for this class: " - f"{self.__class__.__name__}" + f"Default StorageClass is not set for this class: {self.__class__.__name__}" ) return - sc_to_patch = self.DEFAULT_STORAGECLASS + sc_to_patch = self.storage_class if ( config.ENV_DATA.get("use_custom_sc_in_deployment") and self.platform.lower() == constants.VSPHERE_PLATFORM diff --git a/ocs_ci/deployment/gcp.py b/ocs_ci/deployment/gcp.py index 305f902c4fd..de4b968e3ac 100644 --- a/ocs_ci/deployment/gcp.py +++ b/ocs_ci/deployment/gcp.py @@ -73,15 +73,14 @@ class GCPIPI(GCPBase): A class to handle GCP IPI specific deployment """ - # storage class for StorageCluster CRD on Google Cloud platform - # uses a custom storageclass, which is created prior creating - # StorageCluster CR during OCS installation - CUSTOM_STORAGE_CLASS_PATH = os.path.join( - TEMPLATE_DEPLOYMENT_DIR, "storageclass.gcp.yaml" - ) - OCPDeployment = IPIOCPDeployment def __init__(self): self.name = self.__class__.__name__ super(GCPIPI, self).__init__() + # storage class for StorageCluster CRD on Google Cloud platform + # uses a custom storageclass, which is created prior creating + # StorageCluster CR during OCS installation + self.custom_storage_class_path = os.path.join( + TEMPLATE_DEPLOYMENT_DIR, "storageclass.gcp.yaml" + ) diff --git a/ocs_ci/deployment/helpers/storage_class.py b/ocs_ci/deployment/helpers/storage_class.py new file mode 100644 index 00000000000..82c0f7d8b34 --- /dev/null +++ b/ocs_ci/deployment/helpers/storage_class.py @@ -0,0 +1,64 @@ +import logging +import yaml + +from ocs_ci.framework import config +from ocs_ci.framework.logger_helper import log_step +from ocs_ci.ocs import constants +from ocs_ci.utility.utils import run_cmd + + +logger = logging.getLogger(__name__) + +DEFAULT_STORAGE_CLASS_MAP = { + constants.AWS_PLATFORM: "gp2-csi", + constants.IBMCLOUD_PLATFORM: "ibmc-vpc-block-10iops-tier", + constants.VSPHERE_PLATFORM: "thin-csi", + constants.AZURE_PLATFORM: "managed-csi", + constants.GCP_PLATFORM: None, + constants.ROSA_HCP_PLATFORM: None, + constants.RHV_PLATFORM: "ovirt-csi-sc", +} + + +def get_storageclass() -> str: + """ + Retrieve the storageclass to use from the config or based on platform + + Returns: + str: Name of the storageclass + + """ + logger.info("Getting storageclass") + platform = config.ENV_DATA.get("platform") + customized_deployment_storage_class = config.DEPLOYMENT.get( + "customized_deployment_storage_class" + ) + + if customized_deployment_storage_class: + storage_class = customized_deployment_storage_class + else: + storage_class = DEFAULT_STORAGE_CLASS_MAP[platform] + + logger.info(f"Using storage class: {storage_class}") + return storage_class + + +def create_custom_storageclass(storage_class_path: str) -> str: + """ + Create a custom storageclass using the yaml file defined at the storage_class_path + + Args: + storage_class_path (str): Filepath to storageclass yaml definition + + Returns: + str: Name of the storageclass + + """ + with open(storage_class_path, "r") as custom_sc_fo: + custom_sc = yaml.load(custom_sc_fo, Loader=yaml.SafeLoader) + + storage_class_name = custom_sc["metadata"]["name"] + log_step(f"Creating custom storage class {storage_class_name}") + run_cmd(f"oc create -f {storage_class_path}") + + return storage_class_name diff --git a/ocs_ci/deployment/ibmcloud.py b/ocs_ci/deployment/ibmcloud.py index cebc6503c79..68529ee849f 100644 --- a/ocs_ci/deployment/ibmcloud.py +++ b/ocs_ci/deployment/ibmcloud.py @@ -89,8 +89,6 @@ class IBMCloud(CloudDeploymentBase): Deployment class for IBM Cloud """ - DEFAULT_STORAGECLASS = "ibmc-vpc-block-10iops-tier" - OCPDeployment = IBMCloudOCPDeployment def __init__(self): @@ -139,7 +137,6 @@ class IBMCloudIPI(CloudDeploymentBase): A class to handle IBM Cloud IPI specific deployment """ - DEFAULT_STORAGECLASS = "ibmc-vpc-block-10iops-tier" OCPDeployment = IPIOCPDeployment def __init__(self): @@ -207,7 +204,7 @@ def destroy_cluster(self, log_level="DEBUG"): if resource_group: try: self.delete_bucket() - scale_down_pods_and_remove_pvcs(self.DEFAULT_STORAGECLASS) + scale_down_pods_and_remove_pvcs(self.storage_class) except Exception as err: logger.warning( f"Failed to scale down mon/osd pods or failed to remove PVC's. Error: {err}" diff --git a/ocs_ci/deployment/rhv.py b/ocs_ci/deployment/rhv.py index ebcc61c391a..272fb32b560 100644 --- a/ocs_ci/deployment/rhv.py +++ b/ocs_ci/deployment/rhv.py @@ -21,9 +21,6 @@ class RHVBASE(OnPremDeploymentBase): RHV deployment base class, with code common to both IPI and UPI. """ - # default storage class for StorageCluster CRD on RHV platform - DEFAULT_STORAGECLASS = "ovirt-csi-sc" - def __init__(self): super(RHVBASE, self).__init__() if config.ENV_DATA.get("default_cluster_name"): diff --git a/ocs_ci/deployment/tests/test_cloud.py b/ocs_ci/deployment/tests/test_cloud.py index dd074f5b7d9..4bb1dfeae28 100644 --- a/ocs_ci/deployment/tests/test_cloud.py +++ b/ocs_ci/deployment/tests/test_cloud.py @@ -17,9 +17,6 @@ class TestCloudDeployment(CloudDeploymentBase): will fail (which is expected for such base class). """ - # avoid raising NotImplementedError so that testing base class is possible - DEFAULT_STORAGECLASS = "cloudstorage" - def test_clouddeploymentbase_init(clusterdir): """ diff --git a/ocs_ci/deployment/vmware.py b/ocs_ci/deployment/vmware.py index 8ce59c312b6..25385d96e50 100644 --- a/ocs_ci/deployment/vmware.py +++ b/ocs_ci/deployment/vmware.py @@ -98,17 +98,6 @@ class VSPHEREBASE(Deployment): - # default storage class for StorageCluster CRD on VmWare platform - if version.get_semantic_ocp_version_from_config() >= version.VERSION_4_13: - if config.ENV_DATA.get("use_custom_sc_in_deployment"): - CUSTOM_STORAGE_CLASS_PATH = os.path.join( - constants.TEMPLATE_DEPLOYMENT_DIR, "storageclass_thin-csi-odf.yaml" - ) - else: - DEFAULT_STORAGECLASS = "thin-csi" - else: - DEFAULT_STORAGECLASS = "thin" - def __init__(self): """ This would be base for both IPI and UPI deployment @@ -129,6 +118,10 @@ def __init__(self): self.cluster_launcer_repo_path = os.path.join( constants.EXTERNAL_DIR, "v4-scaleup" ) + if config.ENV_DATA.get("use_custom_sc_in_deployment"): + self.custom_storage_class_path = os.path.join( + constants.TEMPLATE_DEPLOYMENT_DIR, "storageclass_thin-csi-odf.yaml" + ) os.environ["TF_LOG"] = config.ENV_DATA.get("TF_LOG_LEVEL", "TRACE") os.environ["TF_LOG_PATH"] = os.path.join( config.ENV_DATA.get("cluster_path"), config.ENV_DATA.get("TF_LOG_FILE") @@ -1248,7 +1241,7 @@ def deploy_ocp(self, log_cli_level="DEBUG"): sc_data["parameters"]["diskformat"] = "zeroedthick" templating.dump_data_to_temp_yaml(sc_data, sc_data_yaml.name) run_cmd(f"oc create -f {sc_data_yaml.name}") - self.DEFAULT_STORAGECLASS = "thick" + self.storage_class = "thick" def destroy_cluster(self, log_level="DEBUG"): """ diff --git a/ocs_ci/framework/conf/default_config.yaml b/ocs_ci/framework/conf/default_config.yaml index 631a9fe5a92..33c360b728c 100644 --- a/ocs_ci/framework/conf/default_config.yaml +++ b/ocs_ci/framework/conf/default_config.yaml @@ -167,7 +167,7 @@ ENV_DATA: local_storage_namespace: 'openshift-local-storage' monitoring_enabled: true persistent-monitoring: true - platform: 'AWS' + platform: 'aws' deployment_type: 'ipi' region: 'us-east-2' base_domain: 'qe.rh-ocs.com' diff --git a/ocs_ci/ocs/constants.py b/ocs_ci/ocs/constants.py index 79f6cac8cef..13e627351f3 100644 --- a/ocs_ci/ocs/constants.py +++ b/ocs_ci/ocs/constants.py @@ -492,6 +492,11 @@ DEFAULT_STORAGECLASS_RGW = f"{DEFAULT_CLUSTERNAME}-ceph-rgw" DEFAULT_STORAGECLASS_RBD_THICK = f"{DEFAULT_CLUSTERNAME}-ceph-rbd-thick" DEFAULT_OCS_STORAGECLASS = "default-ocs-storage-class" +# Default storage class for LSO deployments. While each platform specific +# subclass can redefine it, there is a well established platform +# independent default value (based on OCS Installation guide), and it's +# redefinition is not necessary in normal cases. +DEFAULT_STORAGECLASS_LSO = "localblock" THIN_CSI_STORAGECLASS = "thin-csi"