Skip to content

Changing pool name to default block pool from custom pools #12037

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 16 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 28 additions & 8 deletions ocs_ci/helpers/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1549,9 +1549,9 @@ def get_provision_time(interface, pvc_name, status="start"):

"""
# Define the status that need to retrieve
operation = "started"
operation = "Started"
if status.lower() == "end":
operation = "succeeded"
operation = "Succeeded"

this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
Expand All @@ -1563,15 +1563,18 @@ def get_provision_time(interface, pvc_name, status="start"):
logs = logs.split("\n")
# Extract the time for the one PVC provisioning
if isinstance(pvc_name, str):
stat = [i for i in logs if re.search(f"provision.*{pvc_name}.*{operation}", i)]
stat = [i for i in logs if re.search(f'Started.*PVC="[^"]*/{re.escape(pvc_name)}"', i)]
logger.info(f"linee{i}")
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
# Extract the time for the list of PVCs provisioning
if isinstance(pvc_name, list):
all_stats = []
for i in range(0, len(pvc_name)):
name = pvc_name[i].name
stat = [i for i in logs if re.search(f"provision.*{name}.*{operation}", i)]
logger.info(f"nameee{name}")
stat = [logger.info(f"ii{i}") or i for i in logs if re.search(f'Started.*PVC="[^"]*/{re.escape(name)}"', i)]
logger.info(f"lnee{i}")
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
all_stats.append(stat)
Expand Down Expand Up @@ -1762,11 +1765,11 @@ def measure_pv_deletion_time_bulk(
no_data_list = list()
for pv in pv_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f'delete "{pv}": started', i)]
start = [i for i in logs if re.search(f'"shouldDelete is true".*PV="{re.escape(pv)}"', i)]
end = [
i
for i in logs
if re.search(f'delete "{pv}": {delete_suffix_to_search}', i)
if re.search(f'deleted succeeded.*PV="{re.escape(pv)}"', i)
]
if not start or not end:
no_data_list.append(pv)
Expand All @@ -1793,15 +1796,15 @@ def measure_pv_deletion_time_bulk(
this_year = str(datetime.datetime.now().year)
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
start = [i for i in logs if re.search(f'"shouldDelete is true".*PV="{re.escape(pv_name)}"', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start_tm = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start_tm, DATE_TIME_FORMAT)
# Extract the deletion end time for the PV
end = [
i
for i in logs
if re.search(f'delete "{pv_name}": {delete_suffix_to_search}', i)
if re.search(f'deleted succeeded.*PV="{re.escape(pv_name)}"', i)
]
mon_day = " ".join(end[0].split(" ")[0:2])
end_tm = f"{this_year} {mon_day}"
Expand Down Expand Up @@ -6001,3 +6004,20 @@ def find_cephfilesystemsubvolumegroup(storageclient_uid=None):
cephbfssubvolumegroup = storage_consumer.get_cephfs_subvolumegroup()

return cephbfssubvolumegroup

def set_configmap_log_level_csi_sidecar(value):
"""
Set CSI_SIDECAR log level on configmap of rook-ceph-operator

Args:
value (int): type of log

"""
configmap_obj = OCP(
kind=constants.CONFIGMAP,
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=constants.ROOK_OPERATOR_CONFIGMAP,
)
logger.info(f"Setting CSI_SIDECAR log level to: {value}")
params = f'{{"data": {{"CSI_SIDECAR_LOG_LEVEL": "{value}"}}}}'
configmap_obj.patch(params=params, format_type="merge")
21 changes: 12 additions & 9 deletions ocs_ci/helpers/performance_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,14 +248,14 @@ def measure_pvc_creation_time(interface, pvc_name, start_time):
# time), the earliest start time and the latest end time are taken
for sublog in logs:
for line in sublog:
logger.info(f"lynn{line}")
if (
st is None
and "provision" in line
and "Started" in line
and pvc_name in line
and "started" in line
):
st = string_to_time(line.split(" ")[1])
elif "provision" in line and pvc_name in line and "succeeded" in line:
elif pvc_name in line and "Succeeded" in line:
et = string_to_time(line.split(" ")[1])
del logs
if st is None:
Expand Down Expand Up @@ -658,16 +658,18 @@ def get_pvc_provision_times(interface, pvc_name, start_time, time_type="all", op
if prov_logs:
for sublog in prov_logs:
for line in sublog:
logger.info(f"sublogg : {line}")
logger.info(f"opp{op}")
for i in range(0, len(pvc_name)):
name = pvc_name[i].name
pv_name = pvc_name[i].backed_pv
if op in ["all", "create"]:
if re.search(f"provision.*{name}.*started", line):
if re.search(f'Started.*PVC="[^"]*/{re.escape(name)}"', line):
if results[name]["create"]["start"] is None:
results[name]["create"]["start"] = (
extruct_timestamp_from_log(line)
)
if re.search(f"provision.*{name}.*succeeded", line):
if re.search(f'Succeeded.*{re.escape(name)}', line, re.IGNORECASE):
if results[name]["create"]["end"] is None:
results[name]["create"]["end"] = (
extruct_timestamp_from_log(line)
Expand All @@ -678,21 +680,22 @@ def get_pvc_provision_times(interface, pvc_name, start_time, time_type="all", op
)
)
if op in ["all", "delete"]:
if re.search(f'delete "{pv_name}": started', line):
if re.search(f'"shouldDelete is true".*PV="{re.escape(pv_name)}"', line):
logger.info(f"del true")
if results[name]["delete"]["start"] is None:
results[name]["delete"]["start"] = (
extruct_timestamp_from_log(line)
)
if (
re.search(f'delete "{pv_name}": succeeded', line)
re.search(f'deleted succeeded.*PV="{re.escape(pv_name)}"', line)
and (
version.get_semantic_ocs_version_from_config()
<= version.VERSION_4_13
)
) or re.search(
f'delete "{pv_name}": persistentvolume deleted succeeded',
line,
f'deleted succeeded.*PV="{re.escape(pv_name)}"', line
):
logger.info(f"dell true")
if results[name]["delete"]["end"] is None:
results[name]["delete"]["end"] = (
extruct_timestamp_from_log(line)
Expand Down
29 changes: 28 additions & 1 deletion ocs_ci/ocs/benchmark_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,9 @@ def __init__(self, **kwargs):
"""
log.info("Initialize the benchmark-operator object")
self.args = kwargs
self.kubeconfig = os.path.join(
self.cluster_path, config.RUN.get("kubeconfig_location")
)
self.repo = self.args.get("repo", BMO_REPO)
self.branch = self.args.get("branch", "master")
# the namespace is a constant for the benchmark-operator
Expand Down Expand Up @@ -149,7 +152,20 @@ def deploy(self):
Deploy the benchmark-operator

"""
_env = kwargs.pop("env", os.environ.copy())
kubeconfig_path = config.RUN.get("kubeconfig")
if kubeconfig_path:
_env["KUBECONFIG"] = kubeconfig_path
log.info("Deploy the benchmark-operator project")
try:
a = cmd(b)
except Exception as ex:
print(f"First attempt failed with error: {ex}")
try:
a = cmd(b) # Retry once
except Exception as ex2:
print(f"Second attempt also failed with error: {ex2}")
a = None # Or handle the failure accordingly
try:
bo_image = "quay.io/ocsci/benchmark-operator:testing"
if config.DEPLOYMENT.get("disconnected"):
Expand All @@ -159,9 +175,20 @@ def deploy(self):
shell=True,
check=True,
cwd=self.dir,
env=_env,
)
except Exception as ex:
log.error(f"Failed to deploy benchmark operator : {ex}")
print(f"First attempt failed with error: {ex}")
OCP.set_kubeconfig(self.kubeconfig)
try:
run(
f"make deploy IMG={bo_image}",
shell=True,
check=True,
cwd=self.dir,
)
except Exception as ex2:
log.error(f"Failed to deploy benchmark operator : {ex2}")

log.info("Wait for the benchmark-operator deployment be available")
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@
from ocs_ci.helpers import helpers, performance_lib
from ocs_ci.utility.utils import convert_device_size
from ocs_ci.ocs.perfresult import ResultsAnalyse
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.helpers.helpers import (
get_full_test_logs_path,
set_configmap_log_level_csi_sidecar,
)
from ocs_ci.ocs.perftests import PASTest
from ocs_ci.ocs.resources import pvc, ocs
from ocs_ci.ocs.exceptions import PVCNotCreated, PodNotCreated
Expand Down Expand Up @@ -118,6 +121,10 @@ def setup(self):
# Collecting environment information
self.get_env_info()

#Set CSI side car log level

set_configmap_log_level_csi_sidecar(value=5)

self.number_of_clones = 11
if self.dev_mode:
self.number_of_clones = 3
Expand All @@ -129,60 +136,20 @@ def teardown(self):
Cleanup the test environment
"""
logger.info("Starting the test environment cleanup")
try:
logger.info(f"Deleting the test StorageClass : {self.sc_obj.name}")
self.sc_obj.delete()
logger.info("Wait until the SC is deleted.")
self.sc_obj.ocp.wait_for_delete(resource_name=self.sc_obj.name)
except Exception as ex:
logger.warning(f"Can not delete the test sc : {ex}")
# Delete the test project (namespace)
self.delete_test_project()

logger.info(f"Try to delete the Storage pool {self.pool_name}")
try:
self.delete_ceph_pool(self.pool_name)
except Exception:
pass
finally:
# Verify deletion by checking the backend CEPH pools using the toolbox
if self.interface == constants.CEPHBLOCKPOOL:
results = self.ceph_cluster.toolbox.exec_cmd_on_pod("ceph osd pool ls")
logger.debug(f"Existing pools are : {results}")
if self.pool_name in results.split():
logger.warning(
"The pool did not deleted by CSI, forcing delete it manually"
)
self.ceph_cluster.toolbox.exec_cmd_on_pod(
f"ceph osd pool delete {self.pool_name} {self.pool_name} "
"--yes-i-really-really-mean-it"
)
else:
logger.info(f"The pool {self.pool_name} was deleted successfully")
helpers.set_configmap_log_level_csi_sidecar(value=1)

self.delete_test_project()

super(TestPVCClonePerformance, self).teardown()

def create_new_pool_and_sc(self, secret_factory):
self.pool_name = (
f"pas-test-pool-{Interfaces_info[self.interface]['name'].lower()}"
)
secret = secret_factory(interface=self.interface)
self.create_new_pool(self.pool_name)
# Creating new StorageClass (pool) for the test.
self.sc_obj = helpers.create_storage_class(
interface_type=self.interface,
interface_name=self.pool_name,
secret_name=secret.name,
sc_name=self.pool_name,
fs_name=self.pool_name,
)
logger.info(f"The new SC is : {self.sc_obj.name}")

def create_pvc_and_wait_for_bound(self):
logger.info("Creating PVC to be cloned")
try:
self.pvc_obj = helpers.create_pvc(
sc_name=self.sc_obj.name,
sc_name=Interfaces_info[self.interface]["sc"],
pvc_name="pvc-pas-test",
size=f"{self.pvc_size}Gi",
namespace=self.namespace,
Expand Down Expand Up @@ -343,17 +310,16 @@ def test_clone_create_delete_performance(
test_start_time = self.get_time()

# Create new pool and sc only for RBD, for CepgFS use thr default
self.sc_obj = ocs.OCS(
kind="StorageCluster",
metadata={
"namespace": self.namespace,
"name": Interfaces_info[self.interface]["sc"],
},
)
if self.interface == constants.CEPHBLOCKPOOL:
# Creating new pool to run the test on it
self.create_new_pool_and_sc(secret_factory)
self.pool_name = "ocs-storagecluster-cephblockpool"
else:
self.sc_obj = ocs.OCS(
kind="StorageCluster",
metadata={
"namespace": self.namespace,
"name": Interfaces_info[self.interface]["sc"],
},
)
self.pool_name = "ocs-storagecluster-cephfilesystem"
# Create a PVC
self.create_pvc_and_wait_for_bound()
Expand Down Expand Up @@ -464,17 +430,16 @@ def test_pvc_clone_performance_multiple_files(
test_start_time = self.get_time()

# Create new pool and sc only for RBD, for CepgFS use thr default
self.sc_obj = ocs.OCS(
kind="StorageCluster",
metadata={
"namespace": self.namespace,
"name": Interfaces_info[self.interface]["sc"],
},
)
if self.interface == constants.CEPHBLOCKPOOL:
# Creating new pool to run the test on it
self.create_new_pool_and_sc(secret_factory)
self.pool_name = "ocs-storagecluster-cephblockpool"
else:
self.sc_obj = ocs.OCS(
kind="StorageCluster",
metadata={
"namespace": self.namespace,
"name": Interfaces_info[self.interface]["sc"],
},
)
self.pool_name = "ocs-storagecluster-cephfilesystem"
# Create a PVC
self.create_pvc_and_wait_for_bound()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ def setup(self):
super(TestPVCCreationDeletionPerformance, self).setup()
self.benchmark_name = "PVC_Creation-Deletion"
self.create_test_project()
set_configmap_log_level_csi_sidecar(value=5)

def teardown(self):
"""
Expand All @@ -61,6 +62,7 @@ def teardown(self):
log.info("Starting the test environment celanup")
# Delete the test project (namespace)
self.delete_test_project()
set_configmap_log_level_csi_sidecar(value=1)
super(TestPVCCreationDeletionPerformance, self).teardown()

def create_fio_pod_yaml(self, pvc_size=1):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from ocs_ci.framework.pytest_customization.marks import grey_squad
from ocs_ci.ocs.ocp import OCP, switch_to_project
from ocs_ci.utility import templating
from ocs_ci.helpers.helpers import set_configmap_log_level_csi_sidecar
from ocs_ci.ocs import constants
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.version import get_environment_info
Expand Down Expand Up @@ -101,6 +102,7 @@ def setup(self):
super(TestPvcSnapshotPerformance, self).setup()
self.benchmark_name = "pvc_snaspshot_performance"
self.tests_numbers = 3 # number of tests to run
set_configmap_log_level_csi_sidecar(value=5)

def init_full_results(self, full_results):
"""
Expand Down Expand Up @@ -403,6 +405,7 @@ def test_pvc_snapshot_performance(self, pvc_size):
self.pod_object.delete()
self.pvc_obj.delete()
self.delete_test_project()
set_configmap_log_level_csi_sidecar(value=1)

# logging the test summary, all info in one place for easy log reading
c_speed, c_runtime, c_csi_runtime, r_speed, r_runtime, r_csi_runtime = (
Expand Down Expand Up @@ -676,6 +679,8 @@ def test_pvc_snapshot_performance_multiple_files(
log.info("Deleting the elastic-search instance")
self.es.cleanup()

set_configmap_log_level_csi_sidecar(value=1)

creation_times = [t["creation_time"] for t in all_results]
avg_c_time = statistics.mean(creation_times)
csi_creation_times = [t["csi_creation_time"] for t in all_results]
Expand Down