From 4d8429da1ccbc95d677c39d7b0f708d1001f1853 Mon Sep 17 00:00:00 2001 From: ThisIsClark Date: Mon, 2 Nov 2020 11:16:06 +0800 Subject: [PATCH 01/15] Code improvement (#363) --- delfin/alert_manager/snmp_validator.py | 1 - delfin/drivers/api.py | 7 +- .../dell_emc/vmax/alert_handler/oid_mapper.py | 5 +- .../vmax/alert_handler/snmp_alerts.py | 12 ++- .../vmax/alert_handler/unisphere_alerts.py | 2 +- delfin/drivers/dell_emc/vmax/client.py | 3 +- delfin/drivers/dell_emc/vmax/rest.py | 36 ++------- delfin/drivers/dell_emc/vmax/vmax.py | 3 +- delfin/drivers/driver.py | 4 +- delfin/drivers/fake_storage/__init__.py | 3 +- delfin/drivers/hpe/hpe_3par/alert_handler.py | 28 ++++--- .../drivers/hpe/hpe_3par/component_handler.py | 4 +- delfin/drivers/hpe/hpe_3par/hpe_3parstor.py | 12 ++- delfin/drivers/hpe/hpe_3par/rest_handler.py | 3 +- delfin/drivers/hpe/hpe_3par/ssh_handler.py | 2 + .../drivers/huawei/oceanstor/alert_handler.py | 24 +++--- delfin/drivers/huawei/oceanstor/oceanstor.py | 3 +- delfin/drivers/huawei/oceanstor/oid_mapper.py | 5 +- .../drivers/huawei/oceanstor/rest_client.py | 13 ++-- delfin/drivers/manager.py | 3 + delfin/drivers/utils/rest_client.py | 36 ++------- delfin/drivers/utils/ssh_client.py | 4 +- delfin/exception.py | 8 +- delfin/ssl_utils.py | 9 ++- delfin/task_manager/manager.py | 11 ++- delfin/task_manager/rpcapi.py | 7 ++ delfin/task_manager/tasks/alerts.py | 17 +++++ delfin/test.py | 2 +- .../drivers/hpe/hpe_3par/test_hpe_3parstor.py | 24 ++---- delfin/tests/unit/drivers/test_api.py | 73 ++++++------------- 30 files changed, 160 insertions(+), 204 deletions(-) diff --git a/delfin/alert_manager/snmp_validator.py b/delfin/alert_manager/snmp_validator.py index b3b565e96..1c51f419e 100644 --- a/delfin/alert_manager/snmp_validator.py +++ b/delfin/alert_manager/snmp_validator.py @@ -37,7 +37,6 @@ def __init__(self): self.snmp_error_flag = {} def validate(self, ctxt, alert_source): - alert_source = dict(alert_source) engine_id = alert_source.get('engine_id') try: alert_source = self.validate_connectivity(alert_source) diff --git a/delfin/drivers/api.py b/delfin/drivers/api.py index 5b88e375f..e7f9c06fe 100644 --- a/delfin/drivers/api.py +++ b/delfin/drivers/api.py @@ -45,7 +45,6 @@ def discover_storage(self, context, access_info): access_info = db.access_info_create(context, access_info) storage['id'] = access_info['storage_id'] storage = db.storage_create(context, storage) - self.driver_manager.update_driver(storage['id'], driver) LOG.info("Storage found successfully.") return storage @@ -63,7 +62,6 @@ def update_access_info(self, context, access_info): helper.check_storage_consistency(context, storage_id, storage_new) access_info = db.access_info_update(context, storage_id, access_info) db.storage_update(context, storage_id, storage_new) - self.driver_manager.update_driver(storage_id, driver) LOG.info("Access information updated successfully.") return access_info @@ -97,7 +95,10 @@ def remove_trap_config(self, context, storage_id, trap_config): def parse_alert(self, context, storage_id, alert): """Parse alert data got from snmp trap server.""" - driver = self.driver_manager.get_driver(context, storage_id=storage_id) + access_info = db.access_info_get(context, storage_id) + driver = self.driver_manager.get_driver(context, + invoke_on_load=False, + **access_info) return driver.parse_alert(context, alert) def clear_alert(self, context, storage_id, sequence_number): diff --git a/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py b/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py index 55a94627b..9db7e33f9 100644 --- a/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py +++ b/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py @@ -32,14 +32,15 @@ class OidMapper(object): def __init__(self): pass - def map_oids(self, alert): + @staticmethod + def map_oids(alert): """Translate oids using static map.""" alert_model = dict() for attr in alert: # Remove the instance number at the end of oid before mapping oid_str = attr.rsplit('.', 1)[0] - key = self.OID_MAP.get(oid_str, None) + key = OidMapper.OID_MAP.get(oid_str, None) alert_model[key] = alert[attr] return alert_model diff --git a/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py b/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py index d344b2f6b..00a36b0e6 100644 --- a/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py +++ b/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py @@ -27,9 +27,6 @@ class AlertHandler(object): """Alert handling functions for vmax snmp traps""" - def __init__(self): - self.oid_mapper = oid_mapper.OidMapper() - # Translation of trap severity to alert model severity # Values are: # unknown 1, emergency 2, alert 3, critical 4, error 5, @@ -53,12 +50,13 @@ def __init__(self): 'emcAsyncEventComponentName', 'emcAsyncEventSource') - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data got from alert manager and fill the alert model.""" - alert = self.oid_mapper.map_oids(alert) + alert = oid_mapper.OidMapper.map_oids(alert) # Check for mandatory alert attributes - for attr in self._mandatory_alert_attributes: + for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr @@ -71,7 +69,7 @@ def parse_alert(self, context, alert): alert_model['alert_name'] = alert_mapper.alarm_id_name_mapping.get( alert_model['alert_id'], alert_model['alert_id']) - alert_model['severity'] = self.SEVERITY_MAP.get( + alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert['connUnitEventSeverity'], constants.Severity.INFORMATIONAL) diff --git a/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py b/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py index b3379710d..c1beaf532 100644 --- a/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py +++ b/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py @@ -51,7 +51,7 @@ def parse_queried_alerts(self, alert_list): alert['severity'], constants.Severity.NOT_SPECIFIED) # category and type are not part of queried alerts - alert_model['category'] = constants.Category.EVENT + alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert['alertId'] diff --git a/delfin/drivers/dell_emc/vmax/client.py b/delfin/drivers/dell_emc/vmax/client.py index 15c2551e0..05aa76bae 100644 --- a/delfin/drivers/dell_emc/vmax/client.py +++ b/delfin/drivers/dell_emc/vmax/client.py @@ -52,8 +52,7 @@ def init_connection(self, access_info): LOG.error(msg) raise e except (exception.SSLCertificateFailed, - exception.WrongTlsVersion, - exception.CipherNotMatch) as e: + exception.SSLHandshakeFailed) as e: msg = ("Failed to connect to VMAX: {}".format(e)) LOG.error(msg) raise diff --git a/delfin/drivers/dell_emc/vmax/rest.py b/delfin/drivers/dell_emc/vmax/rest.py index c91001c38..ef8be6ab9 100644 --- a/delfin/drivers/dell_emc/vmax/rest.py +++ b/delfin/drivers/dell_emc/vmax/rest.py @@ -15,23 +15,20 @@ # under the License. import json -import ssl import sys -from oslo_log import log as logging import requests import requests.auth import requests.exceptions as r_exc import six import urllib3 -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.poolmanager import PoolManager +from oslo_log import log as logging from delfin import cryptor from delfin import exception +from delfin import ssl_utils from delfin.common import alert_util from delfin.i18n import _ -from delfin import ssl_utils LOG = logging.getLogger(__name__) SLOPROVISIONING = 'sloprovisioning' @@ -52,24 +49,6 @@ VERSION_GET_TIME_OUT = 10 -class HostNameIgnoringAdapter(HTTPAdapter): - - def cert_verify(self, conn, url, verify, cert): - conn.assert_hostname = False - return super(HostNameIgnoringAdapter, self).cert_verify( - conn, url, verify, cert) - - def init_poolmanager(self, connections, maxsize, block=False, - **pool_kwargs): - self._pool_connections = connections - self._pool_maxsize = maxsize - self._pool_block = block - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, - ssl_version=ssl.PROTOCOL_TLSv1, - **pool_kwargs) - - class VMaxRest(object): """Rest class based on Unisphere for VMax Rest API.""" @@ -114,7 +93,7 @@ def establish_rest_session(self): LOG.debug("Enable certificate verification, ca_path: {0}".format( self.verify)) session.verify = self.verify - session.mount("https://", ssl_utils.HostNameIgnoreAdapter()) + session.mount("https://", ssl_utils.get_host_name_ignore_adapter()) self.session = session return session @@ -175,15 +154,10 @@ def request(self, target_uri, method, params=None, request_object=None, "message: %(e)s") % {'base_uri': self.base_uri, 'e': e} LOG.error(msg) err_str = six.text_type(e) - if 'wrong ssl version' in err_str or \ - 'sslv3 alert handshake failure' in err_str: - raise exception.WrongTlsVersion() - elif 'no cipher match' in err_str: - raise exception.CipherNotMatch() - elif 'certificate verify failed' in err_str: + if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: - raise e + raise exception.SSLHandshakeFailed() except (r_exc.Timeout, r_exc.ConnectionError, r_exc.HTTPError) as e: diff --git a/delfin/drivers/dell_emc/vmax/vmax.py b/delfin/drivers/dell_emc/vmax/vmax.py index 88e23330c..8ee904cd6 100644 --- a/delfin/drivers/dell_emc/vmax/vmax.py +++ b/delfin/drivers/dell_emc/vmax/vmax.py @@ -84,7 +84,8 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): return snmp_alerts.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, sequence_number): diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index b0f02f93d..77b6b4f80 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -58,8 +58,8 @@ def remove_trap_config(self, context, trap_config): """Remove trap receiver configuration from storage system.""" pass - @abc.abstractmethod - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data got from snmp trap server.""" """ diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index b5b643b2a..fbe5454a6 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -164,7 +164,8 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): pass def clear_alert(self, context, alert): diff --git a/delfin/drivers/hpe/hpe_3par/alert_handler.py b/delfin/drivers/hpe/hpe_3par/alert_handler.py index f5e728bb4..52df714f9 100644 --- a/delfin/drivers/hpe/hpe_3par/alert_handler.py +++ b/delfin/drivers/hpe/hpe_3par/alert_handler.py @@ -90,10 +90,11 @@ def __init__(self, rest_handler=None, ssh_handler=None): self.rest_handler = rest_handler self.ssh_handler = ssh_handler - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data got from alert manager and fill the alert model.""" # Check for mandatory alert attributes - for attr in self._mandatory_alert_attributes: + for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr @@ -103,17 +104,17 @@ def parse_alert(self, context, alert): alert_model = dict() # These information are sourced from device registration info alert_model['alert_id'] = alert.get(AlertHandler.OID_MESSAGECODE) - alert_model['alert_name'] = self.get_alert_type(alert.get( + alert_model['alert_name'] = AlertHandler.get_alert_type(alert.get( AlertHandler.OID_MESSAGECODE)) - alert_model['severity'] = self.SEVERITY_MAP.get( + alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert.get(AlertHandler.OID_SEVERITY), constants.Severity.NOT_SPECIFIED) - alert_model['category'] = self.CATEGORY_MAP.get( + alert_model['category'] = AlertHandler.CATEGORY_MAP.get( alert.get(AlertHandler.OID_STATE), constants.Category.NOT_SPECIFIED) alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert.get(AlertHandler.OID_ID) - alert_model['occur_time'] = self.get_time_stamp( + alert_model['occur_time'] = AlertHandler.get_time_stamp( alert.get(AlertHandler.OID_TIMEOCCURRED)) alert_model['description'] = alert.get(AlertHandler.OID_DETAILS) alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE @@ -155,12 +156,13 @@ def clear_alert(self, context, alert): LOG.error(err_msg) raise exception.InvalidResults(err_msg) - def judge_alert_time(self, map, query_para): + @staticmethod + def judge_alert_time(map, query_para): if len(map) <= 1: return False if query_para is None and len(map) > 1: return True - occur_time = self.get_time_stamp(map.get('occur_time')) + occur_time = AlertHandler.get_time_stamp(map.get('occur_time')) if query_para.get('begin_time') and query_para.get('end_time'): if occur_time >= int(query_para.get('begin_time')) and \ occur_time <= int(query_para.get('end_time')): @@ -187,10 +189,10 @@ def handle_alters(self, alertlist, query_para): value = self.ALERT_KEY_MAP.get( strinfo[0]) and strinfo[1] or '' map[key] = value - elif self.judge_alert_time(map, query_para): + elif AlertHandler.judge_alert_time(map, query_para): severity = self.ALERT_LEVEL_MAP.get(map.get('severity')) category = map.get('category') == 'New' and 'Fault' or '' - occur_time = self.get_time_stamp(map.get('occur_time')) + occur_time = AlertHandler.get_time_stamp(map.get('occur_time')) alert_id = map.get('message_code') and str(int(map.get( 'message_code'), 16)) or '' alert_model = { @@ -232,7 +234,8 @@ def list_alerts(self, context, query_para): LOG.error(err_msg) raise exception.InvalidResults(err_msg) - def get_time_stamp(self, time_str): + @staticmethod + def get_time_stamp(time_str): """ Time stamp to time conversion """ time_stamp = '' @@ -247,7 +250,8 @@ def get_time_stamp(self, time_str): return time_stamp - def get_alert_type(self, message_code): + @staticmethod + def get_alert_type(message_code): """ Get alert type diff --git a/delfin/drivers/hpe/hpe_3par/component_handler.py b/delfin/drivers/hpe/hpe_3par/component_handler.py index 7341fc9e5..7f50e05e8 100644 --- a/delfin/drivers/hpe/hpe_3par/component_handler.py +++ b/delfin/drivers/hpe/hpe_3par/component_handler.py @@ -68,9 +68,9 @@ def get_storage(self, context): status = constants.StorageStatus.ABNORMAL LOG.error('SSH check health Failed!') - used_cap = int(storage.get('totalCapacityMiB')) * units.Mi free_cap = int(storage.get('freeCapacityMiB')) * units.Mi - total_cap = used_cap + free_cap + used_cap = int(storage.get('allocatedCapacityMiB')) * units.Mi + total_cap = free_cap + used_cap raw_cap = int(storage.get('totalCapacityMiB')) * units.Mi result = { 'name': storage.get('name'), diff --git a/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py b/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py index 8869e1927..45b3f4296 100644 --- a/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py +++ b/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six from oslo_log import log from delfin import context @@ -48,7 +49,11 @@ def __init__(self, **kwargs): rest_handler=self.rest_handler, ssh_handler=self.ssh_handler) def reset_connection(self, context, **kwargs): - self.rest_handler.logout() + try: + self.rest_handler.logout() + except Exception as e: + LOG.warning('logout failed when resetting connection, ' + 'reason is %s' % six.text_type(e)) self.rest_client.verify = kwargs.get('verify', False) self.rest_handler.login() @@ -75,8 +80,9 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): - return self.alert_handler.parse_alert(context, alert) + @staticmethod + def parse_alert(context, alert): + return alert_handler.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, alert): return self.alert_handler.clear_alert(context, alert) diff --git a/delfin/drivers/hpe/hpe_3par/rest_handler.py b/delfin/drivers/hpe/hpe_3par/rest_handler.py index 14a0628c6..97f4cbb2f 100644 --- a/delfin/drivers/hpe/hpe_3par/rest_handler.py +++ b/delfin/drivers/hpe/hpe_3par/rest_handler.py @@ -88,7 +88,8 @@ def call(self, url, data=None, method=None): LOG.error('Rest exec failed') return res - + except exception.SSLCertificateFailed: + raise except Exception as e: err_msg = "Get RestHandler.call failed: %s" % (six.text_type(e)) LOG.error(err_msg) diff --git a/delfin/drivers/hpe/hpe_3par/ssh_handler.py b/delfin/drivers/hpe/hpe_3par/ssh_handler.py index d55d130b4..d5c69e4f4 100644 --- a/delfin/drivers/hpe/hpe_3par/ssh_handler.py +++ b/delfin/drivers/hpe/hpe_3par/ssh_handler.py @@ -18,6 +18,7 @@ from oslo_log import log as logging from delfin import exception +from delfin import utils from delfin.drivers.utils.ssh_client import SSHClient @@ -98,6 +99,7 @@ def remove_alerts(self, alert_id): Currently not implemented removes command : removealert """ ssh_client = SSHClient(**self.kwargs) + utils.check_ssh_injection([alert_id]) command_str = SSHHandler.HPE3PAR_COMMAND_REMOVEALERT % alert_id res = ssh_client.do_exec(command_str) if res: diff --git a/delfin/drivers/huawei/oceanstor/alert_handler.py b/delfin/drivers/huawei/oceanstor/alert_handler.py index 42bea4630..d2ad6e378 100644 --- a/delfin/drivers/huawei/oceanstor/alert_handler.py +++ b/delfin/drivers/huawei/oceanstor/alert_handler.py @@ -27,8 +27,6 @@ class AlertHandler(object): """Alert handling functions for huawei oceanstor driver""" - def __init__(self): - self.oid_mapper = oid_mapper.OidMapper() TIME_PATTERN = "%Y-%m-%d,%H:%M:%S.%f" @@ -76,13 +74,14 @@ def __init__(self): 'hwIsmReportingAlarmFaultTime' ) - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data and fill the alert model.""" # Check for mandatory alert attributes - alert = self.oid_mapper.map_oids(alert) + alert = oid_mapper.OidMapper.map_oids(alert) LOG.info("Get alert from storage: %s", alert) - for attr in self._mandatory_alert_attributes: + for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr @@ -93,29 +92,29 @@ def parse_alert(self, context, alert): # These information are sourced from device registration info alert_model['alert_id'] = alert['hwIsmReportingAlarmAlarmID'] alert_model['alert_name'] = alert['hwIsmReportingAlarmFaultTitle'] - alert_model['severity'] = self.SEVERITY_MAP.get( + alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert['hwIsmReportingAlarmFaultLevel'], constants.Severity.NOT_SPECIFIED) - alert_model['category'] = self.CATEGORY_MAP.get( + alert_model['category'] = AlertHandler.CATEGORY_MAP.get( alert['hwIsmReportingAlarmFaultCategory'], constants.Category.NOT_SPECIFIED) - alert_model['type'] = self.TYPE_MAP.get( + alert_model['type'] = AlertHandler.TYPE_MAP.get( alert['hwIsmReportingAlarmFaultType'], constants.EventType.NOT_SPECIFIED) alert_model['sequence_number'] \ = alert['hwIsmReportingAlarmSerialNo'] occur_time = datetime.strptime( alert['hwIsmReportingAlarmFaultTime'], - self.TIME_PATTERN) + AlertHandler.TIME_PATTERN) alert_model['occur_time'] = int(occur_time.timestamp() * 1000) description = alert['hwIsmReportingAlarmAdditionInfo'] - if self._is_hex(description): + if AlertHandler._is_hex(description): description = bytes.fromhex(description[2:]).decode('ascii') alert_model['description'] = description recovery_advice = alert['hwIsmReportingAlarmRestoreAdvice'] - if self._is_hex(recovery_advice): + if AlertHandler._is_hex(recovery_advice): recovery_advice = bytes.fromhex( recovery_advice[2:]).decode('ascii') @@ -189,7 +188,8 @@ def clear_alert(self, context, storage_id, alert): """Clear alert from storage system.""" pass - def _is_hex(self, value): + @staticmethod + def _is_hex(value): try: int(value, 16) except ValueError: diff --git a/delfin/drivers/huawei/oceanstor/oceanstor.py b/delfin/drivers/huawei/oceanstor/oceanstor.py index 3fb387006..76e92d92d 100644 --- a/delfin/drivers/huawei/oceanstor/oceanstor.py +++ b/delfin/drivers/huawei/oceanstor/oceanstor.py @@ -184,7 +184,8 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): return alert_handler.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, sequence_number): diff --git a/delfin/drivers/huawei/oceanstor/oid_mapper.py b/delfin/drivers/huawei/oceanstor/oid_mapper.py index 99d464554..2ee3ff64d 100644 --- a/delfin/drivers/huawei/oceanstor/oid_mapper.py +++ b/delfin/drivers/huawei/oceanstor/oid_mapper.py @@ -34,14 +34,15 @@ class OidMapper(object): def __init__(self): pass - def map_oids(self, alert): + @staticmethod + def map_oids(alert): """Translate oids using static map.""" alert_model = dict() for attr in alert: # Remove the instance number at the end of oid before mapping oid_str = attr.rsplit('.', 1)[0] - key = self.OID_MAP.get(oid_str, None) + key = OidMapper.OID_MAP.get(oid_str, None) alert_model[key] = alert[attr] return alert_model diff --git a/delfin/drivers/huawei/oceanstor/rest_client.py b/delfin/drivers/huawei/oceanstor/rest_client.py index bf3cd1010..c76fae3ad 100644 --- a/delfin/drivers/huawei/oceanstor/rest_client.py +++ b/delfin/drivers/huawei/oceanstor/rest_client.py @@ -103,11 +103,14 @@ def do_call(self, url, data, method, try: res = func(url, **kwargs) - except requests.exceptions.SSLError as ssl_exc: - LOG.exception('SSLError exception from server: %(url)s.' - ' Error: %(err)s', {'url': url, 'err': ssl_exc}) - return {"error": {"code": consts.ERROR_CONNECT_TO_SERVER, - "description": "Retry with valid certificate."}} + except requests.exceptions.SSLError as e: + LOG.error('SSLError exception from server: %(url)s.' + ' Error: %(err)s', {'url': url, 'err': e}) + err_str = six.text_type(e) + if 'certificate verify failed' in err_str: + raise exception.SSLCertificateFailed() + else: + raise exception.SSLHandshakeFailed() except Exception as err: LOG.exception('Bad response from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) diff --git a/delfin/drivers/manager.py b/delfin/drivers/manager.py index a85261ba1..be665ce52 100644 --- a/delfin/drivers/manager.py +++ b/delfin/drivers/manager.py @@ -80,6 +80,9 @@ def _get_driver_obj(self, context, cache_on_load=True, **kwargs): cls = self._get_driver_cls(**kwargs) return cls(**kwargs) + if kwargs['storage_id'] in self.driver_factory: + return self.driver_factory[kwargs['storage_id']] + with self._instance_lock: if kwargs['storage_id'] in self.driver_factory: return self.driver_factory[kwargs['storage_id']] diff --git a/delfin/drivers/utils/rest_client.py b/delfin/drivers/utils/rest_client.py index fd73741e1..9dc69ffe9 100644 --- a/delfin/drivers/utils/rest_client.py +++ b/delfin/drivers/utils/rest_client.py @@ -15,12 +15,10 @@ # under the License. import json -import ssl -import six + import requests +import six from oslo_log import log as logging -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.poolmanager import PoolManager from delfin import exception from delfin import ssl_utils @@ -30,24 +28,6 @@ LOG = logging.getLogger(__name__) -class HostNameIgnoringAdapter(HTTPAdapter): - - def cert_verify(self, conn, url, verify, cert): - conn.assert_hostname = False - return super(HostNameIgnoringAdapter, self).cert_verify( - conn, url, verify, cert) - - def init_poolmanager(self, connections, maxsize, block=False, - **pool_kwargs): - self._pool_connections = connections - self._pool_maxsize = maxsize - self._pool_block = block - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=False, - ssl_version=ssl.PROTOCOL_TLSv1, - **pool_kwargs) - - class RestClient(object): def __init__(self, **kwargs): @@ -81,7 +61,8 @@ def init_http_head(self): self.verify)) self.session.verify = self.verify self.session.trust_env = False - self.session.mount("https://", ssl_utils.HostNameIgnoreAdapter()) + self.session.mount("https://", + ssl_utils.get_host_name_ignore_adapter()) def do_call(self, url, data, method, calltimeout=consts.SOCKET_TIMEOUT): @@ -108,15 +89,10 @@ def do_call(self, url, data, method, except requests.exceptions.SSLError as e: LOG.error('SSLError for %s %s' % (method, url)) err_str = six.text_type(e) - if 'wrong ssl version' in err_str or \ - 'sslv3 alert handshake failure' in err_str: - raise exception.WrongTlsVersion() - elif 'no cipher match' in err_str: - raise exception.CipherNotMatch() - elif 'certificate verify failed' in err_str: + if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: - raise e + raise exception.SSLHandshakeFailed() except Exception as err: LOG.exception('Bad response from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) diff --git a/delfin/drivers/utils/ssh_client.py b/delfin/drivers/utils/ssh_client.py index bb7fbca3d..25a89b908 100644 --- a/delfin/drivers/utils/ssh_client.py +++ b/delfin/drivers/utils/ssh_client.py @@ -14,11 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. -import paramiko as paramiko +import paramiko from oslo_log import log as logging from paramiko.hostkeys import HostKeyEntry -from delfin import cryptor +from delfin import cryptor from delfin import exception LOG = logging.getLogger(__name__) diff --git a/delfin/exception.py b/delfin/exception.py index 2ae7b40a7..9a03d4b48 100644 --- a/delfin/exception.py +++ b/delfin/exception.py @@ -267,12 +267,8 @@ class SSLCertificateFailed(DelfinException): code = 400 -class CipherNotMatch(Invalid): - msg_fmt = _("Cipher Not Match.") - - -class WrongTlsVersion(Invalid): - msg_fmt = _("Wrong TLS Version.") +class SSLHandshakeFailed(Invalid): + msg_fmt = _("SSL handshake failure.") class StorageIsSyncing(Invalid): diff --git a/delfin/ssl_utils.py b/delfin/ssl_utils.py index 260711e1a..a17078bf1 100644 --- a/delfin/ssl_utils.py +++ b/delfin/ssl_utils.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import ssl import requests from oslo_config import cfg from oslo_log import log @@ -77,6 +76,10 @@ def reload_certificate(ca_path): _load_cert(fpath, file, ca_path) +def get_host_name_ignore_adapter(): + return HostNameIgnoreAdapter() + + class HostNameIgnoreAdapter(requests.adapters.HTTPAdapter): def cert_verify(self, conn, url, verify, cert): conn.assert_hostname = False @@ -89,6 +92,4 @@ def init_poolmanager(self, connections, maxsize, block=False, self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, - ssl_version=ssl.PROTOCOL_TLSv1_2, - **pool_kwargs) + block=block, strict=True, **pool_kwargs) diff --git a/delfin/task_manager/manager.py b/delfin/task_manager/manager.py index 45a5c965c..71e5c872a 100644 --- a/delfin/task_manager/manager.py +++ b/delfin/task_manager/manager.py @@ -36,7 +36,7 @@ class TaskManager(manager.Manager): RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): - self.alert_sync = alerts.AlertSyncTask() + self.alert_task = alerts.AlertSyncTask() super(TaskManager, self).__init__(*args, **kwargs) def sync_storage_resource(self, context, storage_id, resource_task): @@ -60,4 +60,11 @@ def remove_storage_in_cache(self, context, storage_id): def sync_storage_alerts(self, context, storage_id, query_para): LOG.info('Alert sync called for storage id:{0}' .format(storage_id)) - self.alert_sync.sync_alerts(context, storage_id, query_para) + self.alert_task.sync_alerts(context, storage_id, query_para) + + def clear_storage_alerts(self, context, storage_id, sequence_number_list): + LOG.info('Clear alerts called for storage id: {0}' + .format(storage_id)) + return self.alert_task.clear_alerts(context, + storage_id, + sequence_number_list) diff --git a/delfin/task_manager/rpcapi.py b/delfin/task_manager/rpcapi.py index 399b934f5..dc9231f29 100644 --- a/delfin/task_manager/rpcapi.py +++ b/delfin/task_manager/rpcapi.py @@ -66,3 +66,10 @@ def sync_storage_alerts(self, context, storage_id, query_para): 'sync_storage_alerts', storage_id=storage_id, query_para=query_para) + + def clear_storage_alerts(self, context, storage_id, sequence_number_list): + call_context = self.client.prepare(version='1.0') + return call_context.call(context, + 'clear_storage_alerts', + storage_id=storage_id, + sequence_number_list=sequence_number_list) diff --git a/delfin/task_manager/tasks/alerts.py b/delfin/task_manager/tasks/alerts.py index dc39a39e0..318b95644 100644 --- a/delfin/task_manager/tasks/alerts.py +++ b/delfin/task_manager/tasks/alerts.py @@ -55,3 +55,20 @@ def sync_alerts(self, ctx, storage_id, query_para): msg = _('Failed to sync alerts from storage device: {0}' .format(six.text_type(e))) LOG.error(msg) + + def clear_alerts(self, ctx, storage_id, sequence_number_list): + """ Clear alert from storage """ + + LOG.info('Clear alert for storage id:{0}'.format(storage_id)) + sequence_number_list = sequence_number_list or [] + failure_list = [] + for sequence_number in sequence_number_list: + try: + self.driver_manager.clear_alert(ctx, storage_id, + sequence_number) + except Exception as e: + LOG.error("Failed to clear alert with sequence number: %s " + "for storage: %s, reason: %s.", + sequence_number, storage_id, six.text_type(e)) + failure_list.append(sequence_number) + return failure_list diff --git a/delfin/test.py b/delfin/test.py index 16bba9fff..39e6bc5b3 100644 --- a/delfin/test.py +++ b/delfin/test.py @@ -90,7 +90,7 @@ def setUp(self): self.injected = [] self._services = [] # This will be cleaned up by the NestedTempfile fixture - lock_path = self.useFixture(fixtures.TempDir()).path + lock_path = '/' + self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') self.fixture.config( diff --git a/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py b/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py index a77f770f4..933f5cc91 100644 --- a/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py +++ b/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py @@ -114,9 +114,9 @@ def test_d_get_storage(self): 'serial_number': '1307327', 'firmware_version': '3.1.2.484', 'location': None, - 'total_capacity': 11300595826688, + 'total_capacity': 7793486594048, 'raw_capacity': 9594956939264, - 'used_capacity': 9594956939264, + 'used_capacity': 6087847706624, 'free_capacity': 1705638887424 } @@ -577,26 +577,12 @@ def test_h_parse_alert(self): # Verify that all other fields are matching self.assertDictEqual(expected_alert_model, alert_model) - def test_i_clear_alert(self): + def test_clear_alert(self): driver = create_driver() - alert = {'storage_id': 'abcd-1234-56789', - 'storage_name': 'storage1', 'vendor': 'fake vendor', - 'model': 'fake model', - 'hwIsmReportingAlarmLocationInfo': 'location1', - 'hwIsmReportingAlarmFaultTitle': 'Trap Test Alarm', - 'hwIsmReportingAlarmFaultType': 'equipmentFault', - 'hwIsmReportingAlarmFaultLevel': 'criticalAlarm', - 'hwIsmReportingAlarmAlarmID': '4294967294', - 'hwIsmReportingAlarmSerialNo': '4294967295', - 'hwIsmReportingAlarmAdditionInfo': 'This is just for ' - 'testing.Please ' - 'ignore it', - 'hwIsmReportingAlarmLocationAlarmID': '230584300921369', - 'hwIsmReportingAlarmFaultTime': '2020-6-25,1:42:26.0' - } + alert_id = '230584300921369' with self.assertRaises(Exception) as exc: - driver.clear_alert(context, alert) + driver.clear_alert(context, alert_id) self.assertIn('Exception in SSH protocol', str(exc.exception)) """ diff --git a/delfin/tests/unit/drivers/test_api.py b/delfin/tests/unit/drivers/test_api.py index 483821159..7212ae721 100644 --- a/delfin/tests/unit/drivers/test_api.py +++ b/delfin/tests/unit/drivers/test_api.py @@ -202,8 +202,10 @@ def test_remove_storage(self, mock_storage, mock_access_info, api.discover_storage(context, ACCESS_INFO) storage_id = '12345' + + # Verify that driver instance not added to factory driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) + self.assertIsNone(driver) api.remove_storage(context, storage_id) @@ -211,89 +213,58 @@ def test_remove_storage(self, mock_storage, mock_access_info, self.assertIsNone(driver) @mock.patch.object(FakeStorageDriver, 'get_storage') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_get_storage(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_get_storage(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() storage = copy.deepcopy(STORAGE) storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage mock_fake.return_value = storage api = API() - api.discover_storage(context, ACCESS_INFO) storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.get_storage(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called() @mock.patch.object(FakeStorageDriver, 'list_storage_pools') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_list_storage_pools(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_storage_pools(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.list_storage_pools(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_volumes') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_list_volumes(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_volumes(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) - storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.list_volumes(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'parse_alert') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_parse_alert(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + @mock.patch('delfin.db.access_info_get') + def test_parse_alert(self, mock_access_info, + driver_manager, mock_fake): mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.parse_alert(context, storage_id, 'alert') + mock_access_info.assert_called_once() + driver_manager.assert_called_once() mock_fake.assert_called_once() From 068fcc84f12b9d7314d9820952d3c599e2b543d9 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Sat, 21 Nov 2020 15:44:31 +0800 Subject: [PATCH 02/15] ibm storwize_svc add sshpool and fixed some issue (#381) Add storage driver for IBM Storwize and svc series --- delfin/drivers/ibm/__init__.py | 0 delfin/drivers/ibm/storwize_svc/__init__.py | 0 .../drivers/ibm/storwize_svc/ssh_handler.py | 401 +++++++++++++++++ .../drivers/ibm/storwize_svc/storwize_svc.py | 52 +++ delfin/drivers/utils/ssh_client.py | 116 ++++- delfin/tests/unit/drivers/ibm/__init__.py | 0 .../unit/drivers/ibm/storwize_svc/__init__.py | 0 .../ibm/storwize_svc/test_ibm_storwize_svc.py | 423 ++++++++++++++++++ setup.py | 1 + 9 files changed, 991 insertions(+), 2 deletions(-) create mode 100644 delfin/drivers/ibm/__init__.py create mode 100644 delfin/drivers/ibm/storwize_svc/__init__.py create mode 100644 delfin/drivers/ibm/storwize_svc/ssh_handler.py create mode 100644 delfin/drivers/ibm/storwize_svc/storwize_svc.py create mode 100644 delfin/tests/unit/drivers/ibm/__init__.py create mode 100644 delfin/tests/unit/drivers/ibm/storwize_svc/__init__.py create mode 100644 delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py diff --git a/delfin/drivers/ibm/__init__.py b/delfin/drivers/ibm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/ibm/storwize_svc/__init__.py b/delfin/drivers/ibm/storwize_svc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/ibm/storwize_svc/ssh_handler.py b/delfin/drivers/ibm/storwize_svc/ssh_handler.py new file mode 100644 index 000000000..f07eba692 --- /dev/null +++ b/delfin/drivers/ibm/storwize_svc/ssh_handler.py @@ -0,0 +1,401 @@ +# Copyright 2020 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import time + +import paramiko +import six +from oslo_log import log as logging +from oslo_utils import units + +from delfin import exception, utils +from delfin.common import constants +from delfin.drivers.utils.ssh_client import SSHPool + +LOG = logging.getLogger(__name__) + + +class SSHHandler(object): + OID_ERR_ID = '1.3.6.1.4.1.2.6.190.4.3' + OID_SEQ_NUMBER = '1.3.6.1.4.1.2.6.190.4.9' + OID_LAST_TIME = '1.3.6.1.4.1.2.6.190.4.10' + OID_OBJ_TYPE = '1.3.6.1.4.1.2.6.190.4.11' + OID_OBJ_NAME = '1.3.6.1.4.1.2.6.190.4.17' + OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' + + TRAP_SEVERITY_MAP = { + '1.3.6.1.4.1.2.6.190.1': constants.Severity.CRITICAL, + '1.3.6.1.4.1.2.6.190.2': constants.Severity.WARNING, + '1.3.6.1.4.1.2.6.190.3': constants.Severity.INFORMATIONAL, + } + + SEVERITY_MAP = {"warning": "Warning", + "informational": "Informational", + "error": "Major" + } + + SECONDS_TO_MS = 1000 + + def __init__(self, **kwargs): + self.ssh_pool = SSHPool(**kwargs) + + @staticmethod + def handle_split(split_str, split_char, arr_number): + split_value = '' + if split_str is not None and split_str != '': + tmp_value = split_str.split(split_char, 1) + if arr_number == 1 and len(tmp_value) > 1: + split_value = tmp_value[arr_number].strip() + elif arr_number == 0: + split_value = tmp_value[arr_number].strip() + return split_value + + @staticmethod + def parse_alert(alert): + try: + alert_model = dict() + alert_name = SSHHandler.handle_split(alert.get( + SSHHandler.OID_ERR_ID), ':', 1) + error_info = SSHHandler.handle_split(alert.get( + SSHHandler.OID_ERR_ID), ':', 0) + alert_id = SSHHandler.handle_split(error_info, '=', 1) + severity = SSHHandler.TRAP_SEVERITY_MAP.get( + alert.get(SSHHandler.OID_SEVERITY), + constants.Severity.INFORMATIONAL + ) + alert_model['alert_id'] = str(alert_id) + alert_model['alert_name'] = alert_name + alert_model['severity'] = severity + alert_model['category'] = 'Fault' + alert_model['type'] = constants.EventType.EQUIPMENT_ALARM + alert_model['sequence_number'] = SSHHandler. \ + handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1) + timestamp = SSHHandler. \ + handle_split(alert.get(SSHHandler.OID_LAST_TIME), '=', 1) + time_type = '%a %b %d %H:%M:%S %Y' + occur_time = int(time.mktime(time.strptime( + timestamp, + time_type))) + alert_model['occur_time'] = int(occur_time * SSHHandler. + SECONDS_TO_MS) + alert_model['description'] = alert_name + alert_model['resource_type'] = SSHHandler.handle_split( + alert.get(SSHHandler.OID_OBJ_TYPE), '=', 1) + alert_model['location'] = SSHHandler.handle_split(alert.get( + SSHHandler.OID_OBJ_NAME), '=', 1) + return alert_model + except Exception as e: + LOG.error(e) + msg = ("Failed to build alert model as some attributes missing " + "in alert message:%s.") % (six.text_type(e)) + raise exception.InvalidResults(msg) + + def login(self): + try: + with self.ssh_pool.item() as ssh: + SSHHandler.do_exec('lssystem', ssh) + except Exception as e: + LOG.error("Failed to login ibm storwize_svc %s" % + (six.text_type(e))) + raise e + + @staticmethod + def do_exec(command_str, ssh): + """Execute command""" + try: + utils.check_ssh_injection(command_str) + if command_str is not None and ssh is not None: + stdin, stdout, stderr = ssh.exec_command(command_str) + res, err = stdout.read(), stderr.read() + re = res if res else err + result = re.decode() + except paramiko.AuthenticationException as ae: + LOG.error('doexec Authentication error:{}'.format(ae)) + raise exception.InvalidUsernameOrPassword() + except Exception as e: + err = six.text_type(e) + LOG.error('doexec InvalidUsernameOrPassword error') + if 'timed out' in err: + raise exception.SSHConnectTimeout() + elif 'No authentication methods available' in err \ + or 'Authentication failed' in err: + raise exception.SSHInvalidUsernameOrPassword() + elif 'not a valid RSA private key file' in err: + raise exception.InvalidPrivateKey() + else: + raise exception.SSHException(err) + return result + + def exec_ssh_command(self, command): + try: + with self.ssh_pool.item() as ssh: + ssh_info = SSHHandler.do_exec(command, ssh) + return ssh_info + except Exception as e: + msg = "Failed to ssh ibm storwize_svc %s: %s" % \ + (command, six.text_type(e)) + raise exception.SSHException(msg) + + def change_capacity_to_bytes(self, unit): + unit = unit.upper() + if unit == 'TB': + result = units.Ti + elif unit == 'GB': + result = units.Gi + elif unit == 'MB': + result = units.Mi + elif unit == 'KB': + result = units.Ki + else: + result = 1 + return int(result) + + def parse_string(self, value): + capacity = 0 + if value: + if value.isdigit(): + capacity = float(value) + else: + unit = value[-2:] + capacity = float(value[:-2]) * int( + self.change_capacity_to_bytes(unit)) + return capacity + + def get_storage(self): + try: + system_info = self.exec_ssh_command('lssystem') + enclosure_info = self.exec_ssh_command('lsenclosure -delim :') + enclosure_res = enclosure_info.split('\n') + enclosure = enclosure_res[1].split(':') + serial_number = enclosure[7] + storage_map = {} + self.handle_detail(system_info, storage_map, split=' ') + + status = 'normal' if storage_map.get('statistics_status') == 'on' \ + else 'offline' + location = storage_map.get('location') + free_capacity = self.parse_string(storage_map.get( + 'total_free_space')) + used_capacity = self.parse_string(storage_map.get( + 'total_used_capacity')) + raw_capacity = self.parse_string(storage_map.get( + 'total_drive_raw_capacity')) + subscribed_capacity = self.parse_string(storage_map.get( + 'total_allocated_extent_capacity')) + s = { + 'name': storage_map.get('name'), + 'vendor': 'IBM', + 'model': storage_map.get('product_name'), + 'status': status, + 'serial_number': serial_number, + 'firmware_version': storage_map.get('code_level'), + 'location': location, + 'total_capacity': int(free_capacity + used_capacity), + 'raw_capacity': int(raw_capacity), + 'subscribed_capacity': int(subscribed_capacity), + 'used_capacity': int(used_capacity), + 'free_capacity': int(free_capacity) + } + return s + except exception.DelfinException as e: + err_msg = "Failed to get storage: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def handle_detail(self, deltail_info, detail_map, split): + detail_arr = deltail_info.split('\n') + for detail in detail_arr: + if detail is not None and detail != '': + strinfo = detail.split(split, 1) + key = strinfo[0] + value = '' + if len(strinfo) > 1: + value = strinfo[1] + detail_map[key] = value + + def list_storage_pools(self, storage_id): + try: + pool_list = [] + pool_info = self.exec_ssh_command('lsmdiskgrp') + pool_res = pool_info.split('\n') + for i in range(1, len(pool_res)): + if pool_res[i] is None or pool_res[i] == '': + continue + + pool_str = ' '.join(pool_res[i].split()) + strinfo = pool_str.split(' ') + detail_command = 'lsmdiskgrp %s' % strinfo[0] + deltail_info = self.exec_ssh_command(detail_command) + pool_map = {} + self.handle_detail(deltail_info, pool_map, split=' ') + status = 'normal' if pool_map.get('status') == 'online' \ + else 'offline' + total_cap = self.parse_string(pool_map.get('capacity')) + free_cap = self.parse_string(pool_map.get('free_capacity')) + used_cap = self.parse_string(pool_map.get('used_capacity')) + subscribed_cap = self.parse_string(pool_map. + get('real_capacity')) + p = { + 'name': pool_map.get('name'), + 'storage_id': storage_id, + 'native_storage_pool_id': pool_map.get('id'), + 'description': '', + 'status': status, + 'storage_type': constants.StorageType.BLOCK, + 'total_capacity': int(total_cap), + 'subscribed_capacity': int(subscribed_cap), + 'used_capacity': int(used_cap), + 'free_capacity': int(free_cap) + } + pool_list.append(p) + + return pool_list + except exception.DelfinException as e: + err_msg = "Failed to get storage pool: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage pool: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_volumes(self, storage_id): + try: + volume_list = [] + volume_info = self.exec_ssh_command('lsvdisk') + volume_res = volume_info.split('\n') + for i in range(1, len(volume_res)): + if volume_res[i] is None or volume_res[i] == '': + continue + volume_str = ' '.join(volume_res[i].split()) + strinfo = volume_str.split(' ') + volume_name = strinfo[1] + detail_command = 'lsvdisk -delim : %s' % volume_name + deltail_info = self.exec_ssh_command(detail_command) + volume_map = {} + self.handle_detail(deltail_info, volume_map, split=':') + status = 'normal' if volume_map.get('status') == 'online' \ + else 'offline' + volume_type = 'thin' if volume_map.get('se_copy') == 'yes' \ + else 'thick' + total_capacity = self.parse_string(volume_map.get('capacity')) + free_capacity = self.parse_string(volume_map. + get('free_capacity')) + used_capacity = self.parse_string(volume_map. + get('used_capacity')) + compressed = True + deduplicated = True + if volume_map.get('compressed_copy') == 'no': + compressed = False + if volume_map.get('deduplicated_copy') == 'no': + deduplicated = False + + v = { + 'name': volume_map.get('name'), + 'storage_id': storage_id, + 'description': '', + 'status': status, + 'native_volume_id': str(volume_map.get('id')), + 'native_storage_pool_id': volume_map.get('mdisk_grp_id'), + 'wwn': str(volume_map.get('vdisk_UID')), + 'type': volume_type, + 'total_capacity': int(total_capacity), + 'used_capacity': int(used_capacity), + 'free_capacity': int(free_capacity), + 'compressed': compressed, + 'deduplicated': deduplicated + } + volume_list.append(v) + + return volume_list + except exception.DelfinException as e: + err_msg = "Failed to get storage volume: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage volume: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def judge_alert_time(self, alert_map, query_para): + if len(alert_map) <= 1: + return False + if query_para is None and len(alert_map) > 1: + return True + occur_time = int(alert_map.get('last_timestamp_epoch')) * \ + self.SECONDS_TO_MS + if query_para.get('begin_time') and query_para.get('end_time'): + if occur_time >= int(query_para.get('begin_time')) and \ + occur_time <= int(query_para.get('end_time')): + return True + if query_para.get('begin_time'): + if occur_time >= int(query_para.get('begin_time')): + return True + if query_para.get('end_time'): + if occur_time <= int(query_para.get('end_time')): + return True + return False + + def list_alerts(self, query_para): + try: + alert_list = [] + alert_info = self.exec_ssh_command('lseventlog -monitoring yes') + alert_res = alert_info.split('\n') + for i in range(1, len(alert_res)): + if alert_res[i] is None or alert_res[i] == '': + continue + alert_str = ' '.join(alert_res[i].split()) + strinfo = alert_str.split(' ', 9) + detail_command = 'lseventlog %s' % strinfo[0] + deltail_info = self.exec_ssh_command(detail_command) + alert_map = {} + self.handle_detail(deltail_info, alert_map, split=' ') + if self.judge_alert_time(alert_map, query_para) is False: + continue + time_stamp = int(alert_map.get('last_timestamp_epoch')) * \ + self.SECONDS_TO_MS + alert_name = alert_map.get('event_id_text', '') + event_id = alert_map.get('event_id') + location = alert_map.get('object_name', '') + resource_type = alert_map.get('object_type', '') + severity = self.SEVERITY_MAP.get(alert_map. + get('notification_type')) + + alert_model = { + 'alert_id': event_id, + 'alert_name': alert_name, + 'severity': severity, + 'category': 'Fault', + 'type': 'EquipmentAlarm', + 'sequence_number': alert_map.get('sequence_number'), + 'occur_time': time_stamp, + 'description': alert_name, + 'resource_type': resource_type, + 'location': location + } + alert_list.append(alert_model) + + return alert_list + except exception.DelfinException as e: + err_msg = "Failed to get storage alert: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage alert: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) diff --git a/delfin/drivers/ibm/storwize_svc/storwize_svc.py b/delfin/drivers/ibm/storwize_svc/storwize_svc.py new file mode 100644 index 000000000..e0350cca3 --- /dev/null +++ b/delfin/drivers/ibm/storwize_svc/storwize_svc.py @@ -0,0 +1,52 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from delfin.drivers import driver +from delfin.drivers.ibm.storwize_svc import ssh_handler + + +class StorwizeSVCDriver(driver.StorageDriver): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.ssh_hanlder = ssh_handler.SSHHandler(**kwargs) + self.ssh_hanlder.login() + + def reset_connection(self, context, **kwargs): + self.ssh_hanlder.login() + + def get_storage(self, context): + return self.ssh_hanlder.get_storage() + + def list_storage_pools(self, context): + return self.ssh_hanlder.list_storage_pools(self.storage_id) + + def list_volumes(self, context): + return self.ssh_hanlder.list_volumes(self.storage_id) + + def list_alerts(self, context, query_para=None): + return self.ssh_hanlder.list_alerts(query_para) + + def add_trap_config(self, context, trap_config): + pass + + def remove_trap_config(self, context, trap_config): + pass + + @staticmethod + def parse_alert(context, alert): + return ssh_handler.SSHHandler().parse_alert(alert) + + def clear_alert(self, context, alert): + pass diff --git a/delfin/drivers/utils/ssh_client.py b/delfin/drivers/utils/ssh_client.py index 25a89b908..64d0573a8 100644 --- a/delfin/drivers/utils/ssh_client.py +++ b/delfin/drivers/utils/ssh_client.py @@ -1,5 +1,5 @@ # Copyright 2020 The SODA Authors. -# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# Copyright 2011 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,8 +13,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - import paramiko +import six +from eventlet import pools from oslo_log import log as logging from paramiko.hostkeys import HostKeyEntry @@ -127,3 +128,114 @@ def do_exec(self, command_str): finally: self.close() return re + + +class SSHPool(pools.Pool): + SOCKET_TIMEOUT = 10 + + def __init__(self, **kwargs): + ssh_access = kwargs.get('ssh') + if ssh_access is None: + raise exception.InvalidInput('Input ssh_access is missing') + self.ssh_host = ssh_access.get('host') + self.ssh_port = ssh_access.get('port') + self.ssh_username = ssh_access.get('username') + self.ssh_password = ssh_access.get('password') + self.ssh_pub_key_type = ssh_access.get('pub_key_type') + self.ssh_pub_key = ssh_access.get('pub_key') + self.ssh_conn_timeout = ssh_access.get('conn_timeout') + self.conn_timeout = self.SOCKET_TIMEOUT + if self.ssh_conn_timeout is None: + self.ssh_conn_timeout = SSHPool.SOCKET_TIMEOUT + super(SSHPool, self).__init__(min_size=0, max_size=3) + + def set_host_key(self, host_key, ssh): + """ + Set public key,because input kwargs parameter host_key is string, + not a file path,we can not use load file to get public key,so we set + it as a string. + :param str host_key: the public key which as a string + """ + if (len(host_key) == 0) or (host_key[0] == "#"): + return + try: + e = HostKeyEntry.from_line(host_key) + except exception.SSHException: + return + if e is not None: + host_names = e.hostnames + for h in host_names: + if ssh._host_keys.check(h, e.key): + e.hostnames.remove(h) + if len(e.hostnames): + ssh._host_keys._entries.append(e) + + def create(self): + ssh = paramiko.SSHClient() + try: + if self.ssh_pub_key is None: + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + else: + host_key = '%s %s %s' % \ + (self.ssh_host, self.ssh_pub_key_type, + self.ssh_pub_key) + self.set_host_key(host_key, ssh) + + ssh.connect(hostname=self.ssh_host, port=self.ssh_port, + username=self.ssh_username, + password=cryptor.decode(self.ssh_password), + timeout=self.ssh_conn_timeout) + if self.conn_timeout: + transport = ssh.get_transport() + transport.set_keepalive(self.SOCKET_TIMEOUT) + return ssh + except Exception as e: + err = six.text_type(e) + LOG.error('doexec InvalidUsernameOrPassword error') + if 'timed out' in err: + raise exception.SSHConnectTimeout() + elif 'No authentication methods available' in err \ + or 'Authentication failed' in err: + raise exception.SSHInvalidUsernameOrPassword() + elif 'not a valid RSA private key file' in err: + raise exception.InvalidPrivateKey() + elif 'not found in known_hosts' in err: + raise exception.SSHNotFoundKnownHosts(self.ssh_host) + else: + raise exception.SSHException(err) + + def get(self): + """Return an item from the pool, when one is available. + + This may cause the calling greenthread to block. Check if a + connection is active before returning it. For dead connections + create and return a new connection. + """ + if self.free_items: + conn = self.free_items.popleft() + if conn: + if conn.get_transport().is_active(): + return conn + else: + conn.close() + return self.create() + if self.current_size < self.max_size: + created = self.create() + self.current_size += 1 + return created + return self.channel.get() + + def remove(self, ssh): + """Close an ssh client and remove it from free_items.""" + ssh.close() + if ssh in self.free_items: + self.free_items.remove(ssh) + if self.current_size > 0: + self.current_size -= 1 + + def put(self, conn): + if self.current_size > self.max_size: + conn.close() + self.current_size -= 1 + return + super(SSHPool, self).put(conn) diff --git a/delfin/tests/unit/drivers/ibm/__init__.py b/delfin/tests/unit/drivers/ibm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/ibm/storwize_svc/__init__.py b/delfin/tests/unit/drivers/ibm/storwize_svc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py new file mode 100644 index 000000000..3078c482e --- /dev/null +++ b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py @@ -0,0 +1,423 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest import TestCase, mock + +import paramiko + +from delfin import context +from delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler +from delfin.drivers.ibm.storwize_svc.storwize_svc import StorwizeSVCDriver +from delfin.drivers.utils.ssh_client import SSHPool + + +class Request: + def __init__(self): + self.environ = {'delfin.context': context.RequestContext()} + pass + + +ACCESS_INFO = { + "storage_id": "12345", + "vendor": "hpe", + "model": "3par", + "rest": { + "host": "10.0.0.1", + "port": 8443, + "username": "user", + "password": "pass" + }, + "ssh": { + "host": "110.143.132.231", + "port": 22, + "username": "user", + "password": "pass", + "pub_key": "ddddddddddddddddddddddddd" + } +} + +system_info = """id 00000200A1207E1F +name Cluster_192.168.70.125 +location local +partnership +total_mdisk_capacity 8.1TB +space_in_mdisk_grps 8.1TB +space_allocated_to_vdisks 5.06TB +total_free_space 3.1TB +total_vdiskcopy_capacity 5.51TB +total_used_capacity 5.05TB +total_overallocation 67 +total_vdisk_capacity 5.51TB +total_allocated_extent_capacity 5.07TB +statistics_status on +statistics_frequency 5 +cluster_locale en_US +time_zone 246 Asia/Shanghai +code_level 7.4.0.11 (build 103.29.1609070000) +console_IP 51.10.58.200:443 +id_alias 00000200A1007E1F +gm_link_tolerance 300 +gm_inter_cluster_delay_simulation 0 +gm_intra_cluster_delay_simulation 0 +gm_max_host_delay 5 +email_reply +email_contact +email_contact_primary +email_contact_alternate +email_contact_location +email_contact2 +email_contact2_primary +email_contact2_alternate +email_state stopped +inventory_mail_interval 0 +cluster_ntp_IP_address +cluster_isns_IP_address +iscsi_auth_method none +iscsi_chap_secret +auth_service_configured no +auth_service_enabled no +auth_service_url +auth_service_user_name +auth_service_pwd_set no +auth_service_cert_set no +auth_service_type tip +relationship_bandwidth_limit 25 +tier ssd +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier enterprise +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier nearline +tier_capacity 8.13TB +tier_free_capacity 3.06TB +has_nas_key no +layer storage +rc_buffer_size 48 +compression_active no +compression_virtual_capacity 0.00MB +compression_compressed_capacity 0.00MB +compression_uncompressed_capacity 0.00MB +cache_prefetch on +email_organization +email_machine_address +email_machine_city +email_machine_state XX +email_machine_zip +email_machine_country +total_drive_raw_capacity 10.92TB +compression_destage_mode off +local_fc_port_mask 1111111111111111111111111111111 +partner_fc_port_mask 11111111111111111111111111111 +high_temp_mode off +topology standard +topology_status +rc_auth_method none +vdisk_protection_time 15 +vdisk_protection_enabled no +product_name IBM Storwize V7000 +max_replication_delay 0 +partnership_exclusion_threshold 315 +""" + +enclosure_info = """id:status:type:managed:IO_id:IO_group_name:product_MTM +1:online:control:yes:0:io_grp0:2076-124:78N16G4:2:2:2:2:24:0:0 +""" + +pools_info = """id name status mdisk_count vdisk_count capacity +1 mdiskgrp0 online 1 101 8.13TB 1024 3.06TB +""" + +pool_info = """id 1 +name mdiskgrp0 +status online +mdisk_count 1 +vdisk_count 101 +capacity 8.13TB +extent_size 1024 +free_capacity 3.06TB +virtual_capacity 5.51TB +used_capacity 5.05TB +real_capacity 5.06TB +overallocation 67 +warning 80 +easy_tier auto +easy_tier_status balanced +tier ssd +tier_mdisk_count 0 +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier enterprise +tier_mdisk_count 0 +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier nearline +tier_mdisk_count 1 +tier_capacity 8.13TB +tier_free_capacity 3.06TB +compression_active no +compression_virtual_capacity 0.00MB +compression_compressed_capacity 0.00MB +compression_uncompressed_capacity 0.00MB +site_id +site_name +parent_mdisk_grp_id 1 +parent_mdisk_grp_name mdiskgrp0 +child_mdisk_grp_count 0 +child_mdisk_grp_capacity 0.00MB +type parent +encrypt no +""" + +volumes_info = """id name IO_group_id IO_group_name status +0 V7000LUN_Mig 0 io_grp0 online 1 +""" + +volume_info = """id:0 +name:V7000LUN_Mig +IO_group_id:0 +IO_group_name:io_grp0 +status:online +mdisk_grp_id:1 +mdisk_grp_name:mdiskgrp0 +capacity:50.00GB +type:striped +formatted:no +mdisk_id: +mdisk_name: +FC_id: +FC_name: +RC_id: +RC_name: +vdisk_UID:60050768028401F87C00000000000000 +throttling:0 +preferred_node_id:3 +fast_write_state:empty +cache:readwrite +udid: +fc_map_count:0 +sync_rate:50 +copy_count:1 +se_copy_count:0 +filesystem: +mirror_write_priority:latency +RC_change:no +compressed_copy_count:0 +access_IO_group_count:1 +last_access_time:190531130236 +parent_mdisk_grp_id:1 +parent_mdisk_grp_name:mdiskgrp0 + +copy_id:0 +status:online +sync:yes +primary:yes +mdisk_grp_id:1 +mdisk_grp_name:mdiskgrp0 +type:striped +mdisk_id: +mdisk_name: +fast_write_state:empty +used_capacity:50.00GB +real_capacity:50.00GB +free_capacity:0.00MB +overallocation:100 +autoexpand: +warning: +grainsize: +se_copy:no +easy_tier:on +easy_tier_status:balanced +tier:ssd +tier_capacity:0.00MB +tier:enterprise +tier_capacity:0.00MB +tier:nearline +tier_capacity:50.00GB +compressed_copy:no +uncompressed_used_capacity:50.00GB +parent_mdisk_grp_id:1 +parent_mdisk_grp_name:mdiskgrp0 +""" + +alerts_info = """sequence_number last_timestamp object_type object_id +101 201111165750 node 3 node1 +""" + +alert_info = """sequence_number 101 +first_timestamp 201111165750 +first_timestamp_epoch 1605085070 +last_timestamp 201111165750 +last_timestamp_epoch 1605085070 +object_type node +object_id 3 +object_name node1 +copy_id +reporting_node_id 3 +reporting_node_name node1 +root_sequence_number +event_count 1 +status message +fixed no +auto_fixed no +notification_type informational +event_id 980221 +event_id_text Error log cleared +error_code +error_code_text +machine_type 2076124 +serial_number 78N16G4 +FRU None +fixed_timestamp +fixed_timestamp_epoch +callhome_type none +sense1 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense2 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense3 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense4 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense5 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense6 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense7 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense8 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +""" + +trap_info = { + '1.3.6.1.2.1.1.3.0': '0', + '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.2.6.190.3', + '1.3.6.1.4.1.2.6.190.4.1': '# Machine Type = 2076124', + '1.3.6.1.4.1.2.6.190.4.2': '# Serial Number = 78N16G4', + '1.3.6.1.4.1.2.6.190.4.3': '# Error ID = 981004 : FC discovery occurred, ' + 'no configuration changes were detected', + '1.3.6.1.4.1.2.6.190.4.4': '# Error Code = ', + '1.3.6.1.4.1.2.6.190.4.5': '# System Version = 7.4.0.11 (build 103.29.' + '1609070000)', + '1.3.6.1.4.1.2.6.190.4.6': '# FRU = None ', + '1.3.6.1.4.1.2.6.190.4.7': '# System Name = Cluster_192.168.70.125', + '1.3.6.1.4.1.2.6.190.4.8': '# Node ID = 3', + '1.3.6.1.4.1.2.6.190.4.9': '# Error Sequence Number = 165', + '1.3.6.1.4.1.2.6.190.4.10': '# Timestamp = Tue Nov 10 09:08:27 2020', + '1.3.6.1.4.1.2.6.190.4.11': '# Object Type = cluster', + '1.3.6.1.4.1.2.6.190.4.12': '# Object ID = 0', + '1.3.6.1.4.1.2.6.190.4.17': '# Object Name = Cluster_192.168.70.125', + '1.3.6.1.4.1.2.6.190.4.15': '# Copy ID = ', + '1.3.6.1.4.1.2.6.190.4.16': '# Machine Part Number = ', + '1.3.6.1.4.1.2.6.190.4.13': '# Additional Data (0 -> 63) = 01080000018A0', + '1.3.6.1.4.1.2.6.190.4.14': '# Additional Data (64 -> 127) = 00000000000', + 'transport_address': '51.10.58.200', + 'storage_id': '4992d7f5-4f73-4123-a27b-6e27889f3852' +} + + +def create_driver(): + + SSHHandler.login = mock.Mock( + return_value={""}) + + return StorwizeSVCDriver(**ACCESS_INFO) + + +class TestStorwizeSvcStorageDriver(TestCase): + driver = create_driver() + + def test_init(self): + SSHHandler.login = mock.Mock( + return_value={""}) + StorwizeSVCDriver(**ACCESS_INFO) + + def test_list_storage(self): + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[system_info, enclosure_info]) + self.driver.get_storage(context) + + def test_list_storage_pools(self): + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[pools_info, pool_info]) + self.driver.list_storage_pools(context) + + def test_list_volumes(self): + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[volumes_info, volume_info]) + self.driver.list_volumes(context) + + def test_list_alerts(self): + query_para = { + "begin_time": 160508506000, + "end_time": 160508507000 + } + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[alerts_info, alert_info]) + self.driver.list_alerts(context, query_para) + + def test_list_storage_with_error(self): + with self.assertRaises(Exception) as exc: + self.driver.get_storage(context) + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_list_pool_with_error(self): + with self.assertRaises(Exception) as exc: + self.driver.list_storage_pools(context) + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_list_volume_with_error(self): + with self.assertRaises(Exception) as exc: + self.driver.list_volumes(context) + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_init_ssh_exec(self): + with self.assertRaises(Exception) as exc: + ssh = paramiko.SSHClient() + SSHHandler.do_exec('lssystem', ssh) + self.assertIn('', str(exc.exception)) + + def test_ssh_pool_create(self): + with self.assertRaises(Exception) as exc: + kwargs = ACCESS_INFO + ssh_pool = SSHPool(**kwargs) + ssh_pool.create() + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_ssh_pool_put(self): + ssh_pool = SSHPool(**ACCESS_INFO) + ssh = paramiko.SSHClient() + ssh_pool.put(ssh) + ssh_pool.remove(ssh) + + def test_parse_alert(self): + SSHHandler.parse_alert(trap_info) + + def test_reset_connection(self): + self.driver.reset_connection(context, **ACCESS_INFO) + + def test_add_trap_config(self): + trap_config = '' + self.driver.add_trap_config(context, trap_config) + + def test_remove_trap_config(self): + trap_config = '' + self.driver.remove_trap_config(context, trap_config) + + def test_clear_alert(self): + alert = '' + self.driver.clear_alert(context, alert) diff --git a/setup.py b/setup.py index 211101549..001c54ef1 100644 --- a/setup.py +++ b/setup.py @@ -36,6 +36,7 @@ 'dellemc vmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver', 'hpe 3par = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver', 'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver' + 'ibm storwize_svc = delfin.drivers.ibm.storwize_svc.storwize_svc:StorwizeSVCDriver' ] }, ) From ae88070b1d0c4b460f3a493aaf848b0cff28b541 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Wed, 25 Nov 2020 15:26:54 +0800 Subject: [PATCH 03/15] add hitachi vsp storage driver to community (#388) Add hitachi vsp driver --- delfin/drivers/{ibm => hitachi}/__init__.py | 0 delfin/drivers/hitachi/vsp/__init__.py | 0 delfin/drivers/hitachi/vsp/consts.py | 20 + delfin/drivers/hitachi/vsp/rest_handler.py | 203 +++++++++ delfin/drivers/hitachi/vsp/vsp_stor.py | 297 +++++++++++++ delfin/tests/unit/drivers/hitachi/__init__.py | 0 .../unit/drivers/hitachi/vsp/__init__.py | 0 .../hitachi/vsp/test_hitachi_vspstor.py | 402 ++++++++++++++++++ setup.py | 3 +- 9 files changed, 924 insertions(+), 1 deletion(-) rename delfin/drivers/{ibm => hitachi}/__init__.py (100%) create mode 100644 delfin/drivers/hitachi/vsp/__init__.py create mode 100644 delfin/drivers/hitachi/vsp/consts.py create mode 100644 delfin/drivers/hitachi/vsp/rest_handler.py create mode 100644 delfin/drivers/hitachi/vsp/vsp_stor.py create mode 100644 delfin/tests/unit/drivers/hitachi/__init__.py create mode 100644 delfin/tests/unit/drivers/hitachi/vsp/__init__.py create mode 100644 delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py diff --git a/delfin/drivers/ibm/__init__.py b/delfin/drivers/hitachi/__init__.py similarity index 100% rename from delfin/drivers/ibm/__init__.py rename to delfin/drivers/hitachi/__init__.py diff --git a/delfin/drivers/hitachi/vsp/__init__.py b/delfin/drivers/hitachi/vsp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/hitachi/vsp/consts.py b/delfin/drivers/hitachi/vsp/consts.py new file mode 100644 index 000000000..fccd24983 --- /dev/null +++ b/delfin/drivers/hitachi/vsp/consts.py @@ -0,0 +1,20 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +SOCKET_TIMEOUT = 52 +ERROR_SESSION_INVALID_CODE = 403 +ERROR_SESSION_IS_BEING_USED_CODE = 409 +BLOCK_SIZE = 512 + +SUPPORTED_VSP_SERIES = ('VSP G350', 'VSP G370', 'VSP G700', 'VSP G900', + 'VSP F350', 'VSP F370', 'VSP F700', 'VSP F900') diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py new file mode 100644 index 000000000..f4771f893 --- /dev/null +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -0,0 +1,203 @@ +# Copyright 2020 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import threading + +import requests +import six +from oslo_log import log as logging + +from delfin import cryptor +from delfin import exception +from delfin.drivers.hitachi.vsp import consts +from delfin.drivers.utils.rest_client import RestClient + +LOG = logging.getLogger(__name__) + + +class RestHandler(RestClient): + COMM_URL = '/ConfigurationManager/v1/objects/storages' + LOGOUT_URL = '/ConfigurationManager/v1/objects/sessions/' + + AUTH_KEY = 'Authorization' + + def __init__(self, **kwargs): + super(RestHandler, self).__init__(**kwargs) + self.session_lock = threading.Lock() + self.session_id = None + self.storage_device_id = None + self.device_model = None + self.serial_number = None + + def call(self, url, data=None, method=None): + try: + res = self.do_call(url, data, method, + calltimeout=consts.SOCKET_TIMEOUT) + if (res.status_code == consts.ERROR_SESSION_INVALID_CODE + or res.status_code == + consts.ERROR_SESSION_IS_BEING_USED_CODE): + LOG.error("Failed to get token=={0}=={1},get token again" + .format(res.status_code, res.text)) + # if method is logout,return immediately + if method == 'DELETE' and RestHandler. \ + LOGOUT_URL in url: + return res + self.rest_auth_token = None + access_session = self.login() + if access_session is not None: + res = self. \ + do_call(url, data, method, + calltimeout=consts.SOCKET_TIMEOUT) + else: + LOG.error('Login error,get access_session failed') + elif res.status_code == 503: + raise exception.InvalidResults(res.text) + + return res + + except Exception as e: + err_msg = "Get RestHandler.call failed: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def get_rest_info(self, url, data=None): + result_json = None + res = self.call(url, data, 'GET') + if res.status_code == 200: + result_json = res.json() + return result_json + + def login(self): + try: + self.get_device_id() + access_session = self.rest_auth_token + if self.san_address: + url = '%s/%s/sessions' % \ + (RestHandler.COMM_URL, + self.storage_device_id) + data = {} + + with self.session_lock: + if self.session is None: + self.init_http_head() + self.session.auth = \ + requests.auth.HTTPBasicAuth( + self.rest_username, + cryptor.decode(self.rest_password)) + res = self. \ + do_call(url, data, 'POST', + calltimeout=consts.SOCKET_TIMEOUT) + if res.status_code == 200: + result = res.json() + self.session_id = result.get('sessionId') + access_session = 'Session %s' % result.get('token') + self.rest_auth_token = access_session + self.session.headers[ + RestHandler.AUTH_KEY] = access_session + else: + LOG.error("Login error. URL: %(url)s\n" + "Reason: %(reason)s.", + {"url": url, "reason": res.text}) + if 'invalid username or password' in res.text: + raise exception.InvalidUsernameOrPassword() + else: + raise exception.BadResponse(res.text) + else: + LOG.error('Login Parameter error') + + return access_session + except Exception as e: + LOG.error("Login error: %s", six.text_type(e)) + raise e + + def logout(self): + try: + url = RestHandler.LOGOUT_URL + if self.session_id is not None: + url = '%s/%s/sessions/%s' % \ + (RestHandler.COMM_URL, + self.storage_device_id, + self.session_id) + if self.san_address: + self.call(url, method='DELETE') + self.session_id = None + self.storage_device_id = None + self.device_model = None + self.serial_number = None + self.session = None + self.rest_auth_token = None + else: + LOG.error('logout error:session id not found') + except Exception as err: + LOG.error('logout error:{}'.format(err)) + raise exception.StorageBackendException( + reason='Failed to Logout from restful') + + def get_device_id(self): + try: + if self.session is None: + self.init_http_head() + storage_systems = self.get_system_info() + system_info = storage_systems.get('data') + for system in system_info: + if system.get('model') in consts.SUPPORTED_VSP_SERIES: + if system.get('ctl1Ip') == self.rest_host or \ + system.get('ctl2Ip') == self.rest_host: + self.storage_device_id = system.get('storageDeviceId') + self.device_model = system.get('model') + self.serial_number = system.get('serialNumber') + break + elif system.get('svpIp') == self.rest_host: + self.storage_device_id = system.get('storageDeviceId') + self.device_model = system.get('model') + self.serial_number = system.get('serialNumber') + break + if self.storage_device_id is None: + LOG.error("Get device id fail,model or something is wrong") + except Exception as e: + LOG.error("Get device id error: %s", six.text_type(e)) + raise e + + def get_firmware_version(self): + url = '%s/%s' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + if result_json is None: + return None + firmware_version = result_json.get('dkcMicroVersion') + + return firmware_version + + def get_capacity(self): + url = '%s/%s/total-capacities/instance' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + return result_json + + def get_all_pools(self): + url = '%s/%s/pools' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + return result_json + + def get_all_volumes(self): + url = '%s/%s/ldevs' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + return result_json + + def get_system_info(self): + result_json = self.get_rest_info(RestHandler.COMM_URL) + + return result_json diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py new file mode 100644 index 000000000..540765ee6 --- /dev/null +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -0,0 +1,297 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +import six +from oslo_log import log +from oslo_utils import units + +from delfin import exception +from delfin.common import alert_util +from delfin.common import constants +from delfin.drivers import driver +from delfin.drivers.hitachi.vsp import consts +from delfin.drivers.hitachi.vsp import rest_handler + +LOG = log.getLogger(__name__) + + +class HitachiVspDriver(driver.StorageDriver): + POOL_STATUS_MAP = {"POLN": constants.StoragePoolStatus.NORMAL, + "POLF": constants.StoragePoolStatus.ABNORMAL, + "POLS": constants.StoragePoolStatus.ABNORMAL, + "POLE": constants.StoragePoolStatus.OFFLINE + } + ALERT_LEVEL_MAP = {"Acute": constants.Severity.CRITICAL, + "Serious": constants.Severity.MAJOR, + "Moderate": constants.Severity.WARNING, + "Service": constants.Severity.INFORMATIONAL + } + + TIME_PATTERN = '%Y-%m-%dT%H:%M:%S' + + REFCODE_OID = '1.3.6.1.4.1.116.5.11.4.2.3' + DESC_OID = '1.3.6.1.4.1.116.5.11.4.2.7' + TRAP_TIME_OID = '1.3.6.1.4.1.116.5.11.4.2.6' + TRAP_DATE_OID = '1.3.6.1.4.1.116.5.11.4.2.5' + TRAP_NICKNAME_OID = '1.3.6.1.4.1.116.5.11.4.2.2' + LOCATION_OID = '1.3.6.1.4.1.116.5.11.4.2.4' + SECONDS_TO_MS = 1000 + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.rest_handler = rest_handler.RestHandler(**kwargs) + self.rest_handler.login() + + def reset_connection(self, context, **kwargs): + self.rest_handler.logout() + self.rest_handler.verify = kwargs.get('verify', False) + self.rest_handler.login() + + def close_connection(self): + self.rest_handler.logout() + + def get_storage(self, context): + self.rest_handler.get_device_id() + if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES: + capacity_json = self.rest_handler.get_capacity() + free_capacity = capacity_json.get("total").get("freeSpace") * \ + units.Ki + total_capacity = \ + capacity_json.get("total").get("totalCapacity") * units.Ki + else: + free_capacity = 0 + total_capacity = 0 + pools_info = self.rest_handler.get_all_pools() + if pools_info is not None: + pools = pools_info.get('data') + for pool in pools: + total_cap = \ + int(pool.get( + 'totalPoolCapacity')) * units.Mi + free_cap = int( + pool.get( + 'availableVolumeCapacity')) * units.Mi + free_capacity = free_capacity + free_cap + total_capacity = total_capacity + total_cap + firmware_version = self.rest_handler.get_firmware_version() + status = constants.StorageStatus.OFFLINE + if firmware_version is not None: + status = constants.StorageStatus.NORMAL + system_name = '%s_%s' % (self.rest_handler.device_model, + self.rest_handler.rest_host) + + s = { + 'name': system_name, + 'vendor': 'Hitachi', + 'description': 'Hitachi VSP Storage', + 'model': str(self.rest_handler.device_model), + 'status': status, + 'serial_number': str(self.rest_handler.serial_number), + 'firmware_version': str(firmware_version), + 'location': '', + 'raw_capacity': int(total_capacity), + 'total_capacity': int(total_capacity), + 'used_capacity': int(total_capacity - free_capacity), + 'free_capacity': int(free_capacity) + } + return s + + def list_storage_pools(self, context): + try: + pools_info = self.rest_handler.get_all_pools() + pool_list = [] + pools = pools_info.get('data') + for pool in pools: + status = self.POOL_STATUS_MAP.get( + pool.get('poolStatus'), + constants.StoragePoolStatus.ABNORMAL + ) + storage_type = constants.StorageType.BLOCK + total_cap = \ + int(pool.get('totalPoolCapacity')) * units.Mi + free_cap = int( + pool.get('availableVolumeCapacity')) * units.Mi + used_cap = total_cap - free_cap + p = { + 'name': pool.get('poolName'), + 'storage_id': self.storage_id, + 'native_storage_pool_id': str(pool.get('poolId')), + 'description': 'Hitachi VSP Pool', + 'status': status, + 'storage_type': storage_type, + 'subscribed_capacity': int(total_cap), + 'total_capacity': int(total_cap), + 'used_capacity': int(used_cap), + 'free_capacity': int(free_cap), + } + pool_list.append(p) + + return pool_list + except exception.DelfinException as err: + err_msg = "Failed to get pool metrics from hitachi vsp: %s" % \ + (six.text_type(err)) + LOG.error(err_msg) + raise err + except Exception as e: + err_msg = "Failed to get pool metrics from hitachi vsp: %s" % \ + (six.text_type(e)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_volumes(self, context): + try: + volumes_info = self.rest_handler.get_all_volumes() + + volume_list = [] + volumes = volumes_info.get('data') + for volume in volumes: + if volume.get('emulationType') == 'NOT DEFINED': + continue + orig_pool_id = volume.get('poolId') + compressed = False + deduplicated = False + if volume.get('dataReductionMode') == \ + 'compression_deduplication': + deduplicated = True + compressed = True + if volume.get('dataReductionMode') == 'compression': + compressed = True + if volume.get('status') == 'NML': + status = 'normal' + else: + status = 'abnormal' + + vol_type = constants.VolumeType.THICK + for voltype in volume.get('attributes'): + if voltype == 'HTI': + vol_type = constants.VolumeType.THIN + + total_cap = \ + int(volume.get('blockCapacity')) * consts.BLOCK_SIZE + used_cap = \ + int(volume.get('blockCapacity')) * consts.BLOCK_SIZE + # Because there is only subscribed capacity in device,so free + # capacity always 0 + free_cap = 0 + if volume.get('label'): + name = volume.get('label') + else: + name = 'ldev_%s' % str(volume.get('ldevId')) + + v = { + 'name': name, + 'storage_id': self.storage_id, + 'description': 'Hitachi VSP volume', + 'status': status, + 'native_volume_id': str(volume.get('ldevId')), + 'native_storage_pool_id': orig_pool_id, + 'type': vol_type, + 'total_capacity': total_cap, + 'used_capacity': used_cap, + 'free_capacity': free_cap, + 'compressed': compressed, + 'deduplicated': deduplicated, + } + + volume_list.append(v) + + return volume_list + except exception.DelfinException as err: + err_msg = "Failed to get volumes metrics from hitachi vsp: %s" % \ + (six.text_type(err)) + LOG.error(err_msg) + raise err + except Exception as e: + err_msg = "Failed to get volumes metrics from hitachi vsp: %s" % \ + (six.text_type(e)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + @staticmethod + def parse_queried_alerts(alerts, alert_list, query_para=None): + for alert in alerts: + occur_time = int(time.mktime(time.strptime( + alert.get('occurenceTime'), + HitachiVspDriver.TIME_PATTERN))) * \ + HitachiVspDriver.SECONDS_TO_MS + if not alert_util.is_alert_in_time_range(query_para, + occur_time): + continue + a = { + 'location': alert.get('location'), + 'alarm_id': alert.get('alertId'), + 'sequence_number': alert.get('alertIndex'), + 'description': alert.get('errorDetail'), + 'alert_name': alert.get('errorSection'), + 'resource_type': constants.DEFAULT_RESOURCE_TYPE, + 'occur_time': occur_time, + 'category': 'Fault', + 'type': constants.EventType.EQUIPMENT_ALARM, + 'severity': HitachiVspDriver.ALERT_LEVEL_MAP.get( + alert.get('errorLevel'), + constants.Severity.INFORMATIONAL + ), + } + alert_list.append(a) + + def list_alerts(self, context, query_para=None): + alert_list = [] + if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES: + alerts_info_ctl1 = self.resthanlder.get_alerts('type=CTL1') + alerts_info_ctl2 = self.resthanlder.get_alerts('type=CTL2') + alerts_info_dkc = self.resthanlder.get_alerts('type=DKC') + HitachiVspDriver.parse_queried_alerts(alerts_info_ctl1, + alert_list, query_para) + HitachiVspDriver.parse_queried_alerts(alerts_info_ctl2, + alert_list, query_para) + HitachiVspDriver.parse_queried_alerts(alerts_info_dkc, + alert_list, query_para) + + return alert_list + + def add_trap_config(self, context, trap_config): + pass + + def remove_trap_config(self, context, trap_config): + pass + + @staticmethod + def parse_alert(context, alert): + try: + alert_model = dict() + alert_model['alert_id'] = alert.get(HitachiVspDriver.REFCODE_OID) + alert_model['alert_name'] = alert.get(HitachiVspDriver.DESC_OID) + alert_model['severity'] = constants.Severity.INFORMATIONAL + alert_model['category'] = constants.Category.NOT_SPECIFIED + alert_model['type'] = constants.EventType.EQUIPMENT_ALARM + aler_time = '%s %s' % (alert.get(HitachiVspDriver.TRAP_DATE_OID), + alert.get(HitachiVspDriver.TRAP_TIME_OID)) + pattern = '%Y-%m-%d %H:%M:%S' + occur_time = time.strptime(aler_time, pattern) + alert_model['occur_time'] = int(time.mktime(occur_time) * + HitachiVspDriver.SECONDS_TO_MS) + alert_model['description'] = alert.get(HitachiVspDriver.DESC_OID) + alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE + alert_model['location'] = alert.get(HitachiVspDriver.LOCATION_OID) + + return alert_model + except Exception as e: + LOG.error(e) + msg = ("Failed to build alert model as some attributes missing in" + " alert message:%s") % (six.text_type(e)) + raise exception.InvalidResults(msg) + + def clear_alert(self, context, alert): + pass diff --git a/delfin/tests/unit/drivers/hitachi/__init__.py b/delfin/tests/unit/drivers/hitachi/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/hitachi/vsp/__init__.py b/delfin/tests/unit/drivers/hitachi/vsp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py new file mode 100644 index 000000000..10c15ebf0 --- /dev/null +++ b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py @@ -0,0 +1,402 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest import TestCase, mock + +from requests import Session + +from delfin import context +from delfin.drivers.hitachi.vsp.rest_handler import RestHandler +from delfin.drivers.hitachi.vsp.vsp_stor import HitachiVspDriver + + +class Request: + def __init__(self): + self.environ = {'delfin.context': context.RequestContext()} + pass + + +ACCESS_INFO = { + "storage_id": "12345", + "rest": { + "host": "110.143.132.231", + "port": "8443", + "username": "username", + "password": "cGFzc3dvcmQ=" + }, + "ssh": { + "host": "110.143.132.231", + "port": "22", + "username": "username", + "password": "password", + "host_key": "weqewrerwerwerwe" + }, + "vendor": "hitachi", + "model": "vsp", + "extra_attributes": { + "array_id": "00112233" + } +} +GET_DEVICE_ID = { + "data": [ + { + "storageDeviceId": "800000011633", + "model": "VSP G350", + "serialNumber": 11633, + "svpIp": "110.143.132.231" + } + ] +} +GET_ALL_POOLS = { + "data": [ + { + "poolId": 0, + "poolStatus": "POLN", + "usedCapacityRate": 56, + "snapshotCount": 0, + "poolName": "p3-1", + "availableVolumeCapacity": 7796586, + "totalPoolCapacity": 17821524, + "numOfLdevs": 8, + "firstLdevId": 4, + "warningThreshold": 70, + "depletionThreshold": 80, + "virtualVolumeCapacityRate": -1, + "isMainframe": False, + "isShrinking": False, + "locatedVolumeCount": 65, + "totalLocatedCapacity": 15694896, + "blockingMode": "NB", + "totalReservedCapacity": 0, + "reservedVolumeCount": 0, + "poolType": "HDP", + "duplicationNumber": 0, + "dataReductionAccelerateCompCapacity": 0, + "dataReductionCapacity": 0, + "dataReductionBeforeCapacity": 0, + "dataReductionAccelerateCompRate": 0, + "duplicationRate": 0, + "compressionRate": 0, + "dataReductionRate": 0, + "snapshotUsedCapacity": 0, + "suspendSnapshot": True + }, + { + "poolId": 1, + "poolStatus": "POLF", + "usedCapacityRate": 78, + "snapshotCount": 0, + "poolName": "hjw_test", + "availableVolumeCapacity": 3530184, + "totalPoolCapacity": 16221576, + "numOfLdevs": 6, + "firstLdevId": 0, + "warningThreshold": 70, + "depletionThreshold": 80, + "virtualVolumeCapacityRate": -1, + "isMainframe": False, + "isShrinking": False, + "locatedVolumeCount": 24, + "totalLocatedCapacity": 12702144, + "blockingMode": "NB", + "totalReservedCapacity": 0, + "reservedVolumeCount": 0, + "poolType": "HDP", + "duplicationNumber": 0, + "dataReductionAccelerateCompCapacity": 0, + "dataReductionCapacity": 0, + "dataReductionBeforeCapacity": 0, + "dataReductionAccelerateCompRate": 0, + "duplicationRate": 0, + "compressionRate": 0, + "dataReductionRate": 0, + "snapshotUsedCapacity": 0, + "suspendSnapshot": True + } + ] +} +GET_SPECIFIC_STORAGE = { + "storageDeviceId": "800000011633", + "model": "VSP G350", + "serialNumber": 11633, + "svpIp": "51.10.192.90", + "rmiPort": 1099, + "dkcMicroVersion": "80-06-70/00", + "communicationModes": [ + { + "communicationMode": "lanConnectionMode" + } + ], + "isSecure": False +} +GET_ALL_VOLUMES = { + "data": [ + { + "ldevId": 0, + "clprId": 0, + "emulationType": "OPEN-V", + "byteFormatCapacity": "2.57 T", + "blockCapacity": 5538459648, + "composingPoolId": 1, + "attributes": [ + "POOL" + ], + "raidLevel": "RAID5", + "raidType": "3D+1P", + "numOfParityGroups": 1, + "parityGroupIds": [ + "5-1" + ], + "driveType": "SLB5E-M1R9SS", + "driveByteFormatCapacity": "1.74 T", + "driveBlockCapacity": 3750000030, + "status": "NML", + "mpBladeId": 1, + "ssid": "0004", + "resourceGroupId": 0, + "isAluaEnabled": False + }, + { + "ldevId": 1, + "clprId": 0, + "emulationType": "OPEN-V", + "byteFormatCapacity": "2.57 T", + "blockCapacity": 5538459648, + "composingPoolId": 1, + "attributes": [ + "POOL" + ], + "raidLevel": "RAID5", + "raidType": "3D+1P", + "numOfParityGroups": 1, + "parityGroupIds": [ + "5-1" + ], + "driveType": "SLB5E-M1R9SS", + "driveByteFormatCapacity": "1.74 T", + "driveBlockCapacity": 3750000030, + "status": "NML", + "mpBladeId": 4, + "ssid": "0004", + "resourceGroupId": 0, + "isAluaEnabled": False + }, + { + "ldevId": 2, + "clprId": 0, + "emulationType": "OPEN-V-CVS", + "byteFormatCapacity": "500.00 G", + "blockCapacity": 1048576000, + "numOfPorts": 4, + "ports": [ + { + "portId": "CL3-A", + "hostGroupNumber": 1, + "hostGroupName": "3A84", + "lun": 0 + }, + { + "portId": "CL2-B", + "hostGroupNumber": 0, + "hostGroupName": "2B-G00", + "lun": 0 + }, + { + "portId": "CL4-A", + "hostGroupNumber": 1, + "hostGroupName": "75_197b", + "lun": 0 + }, + { + "portId": "CL2-A", + "hostGroupNumber": 1, + "hostGroupName": "198_126b", + "lun": 0 + } + ], + "attributes": [ + "CVS", + "HDP" + ], + "label": "hjw_test_lun0", + "status": "NML", + "mpBladeId": 0, + "ssid": "0004", + "poolId": 1, + "numOfUsedBlock": 1048621056, + "isFullAllocationEnabled": False, + "resourceGroupId": 0, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "isAluaEnabled": False + }, + { + "ldevId": 99, + "clprId": 0, + "emulationType": "OPEN-V-CVS", + "byteFormatCapacity": "500.00 G", + "blockCapacity": 1048576000, + "attributes": [ + "CVS", + "HDP" + ], + "label": "AIX_performance_test_zj", + "status": "NML", + "mpBladeId": 5, + "ssid": "0004", + "poolId": 0, + "numOfUsedBlock": 1048621056, + "isFullAllocationEnabled": False, + "resourceGroupId": 0, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "isAluaEnabled": False + } + ] +} +TRAP_INFO = { + "1.3.6.1.2.1.1.3.0": "0", + '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.2.6.190.3', + '1.3.6.1.4.1.116.5.11.4.2.3': 'eeeeeeeee', + '1.3.6.1.4.1.116.5.11.4.2.7': 'ddddddd', + '1.3.6.1.4.1.116.5.11.4.2.6': '14:10:10', + '1.3.6.1.4.1.116.5.11.4.2.5': '2020-11-20', + '1.3.6.1.4.1.116.5.11.4.2.2': ' System Version = 7.4.0.11 ', + '1.3.6.1.4.1.116.5.11.4.2.4': '# FRU = None ' +} +ALERT_INFO = [ + { + 'location': "test", + 'alertId': '223232', + 'alertIndex': '1111111', + 'errorDetail': 'test alert', + 'errorSection': 'someting wrong', + 'occurenceTime': '2020-11-20T10:10:10', + 'errorLevel': 'Serious' + } +] + + +def create_driver(): + kwargs = ACCESS_INFO + + RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) + + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'post', return_value=m): + m.raise_for_status.return_value = 201 + m.json.return_value = { + "token": "97c13b8082444b36bc2103026205fa64", + "sessionId": 9 + } + return HitachiVspDriver(**kwargs) + + +class TestHitachiVspStorStorageDriver(TestCase): + driver = create_driver() + + def test_initrest(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = GET_DEVICE_ID + kwargs = ACCESS_INFO + rh = RestHandler(**kwargs) + rh.get_device_id() + + def test_get_storage(self): + RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) + RestHandler.get_rest_info = mock.Mock( + side_effect=[GET_ALL_POOLS, GET_SPECIFIC_STORAGE]) + self.driver.get_storage(context) + + def test_list_storage_pools(self): + RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_POOLS) + self.driver.list_storage_pools(context) + + def test_list_volumes(self): + RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_VOLUMES) + self.driver.list_volumes(context) + + def test_list_alerts(self): + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + self.driver.list_alerts(context) + + def test_parse_queried_alerts(self): + alert_list = [] + HitachiVspDriver.parse_queried_alerts(ALERT_INFO, alert_list) + + def test_parse_alert(self): + self.driver.parse_alert(context, TRAP_INFO) + + def test_rest_close_connection(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'delete', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = None + re = self.driver.close_connection() + self.assertIsNone(re) + + def test_rest_handler_cal(self): + m = mock.MagicMock(status_code=403) + with self.assertRaises(Exception) as exc: + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 403 + m.json.return_value = None + url = 'http://test' + self.driver.rest_handler.call(url, '', 'GET') + self.assertIn('Invalid ip or port', str(exc.exception)) + + def test_reset_connection(self): + RestHandler.logout = mock.Mock(return_value={}) + RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'post', return_value=m): + m.raise_for_status.return_value = 201 + m.json.return_value = { + "token": "97c13b8082444b36bc2103026205fa64", + "sessionId": 9 + } + kwargs = ACCESS_INFO + re = self.driver.reset_connection(context, **kwargs) + self.assertIsNone(re) + + def test_err_storage_pools_err(self): + with self.assertRaises(Exception) as exc: + self.driver.list_storage_pools(context) + self.assertIn('Invalid ip or port', + str(exc.exception)) + + def test_err_volumes(self): + with self.assertRaises(Exception) as exc: + self.driver.list_volumes(context) + self.assertIn('Invalid ip or port', + str(exc.exception)) + + def test_list_volumes_call(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = GET_ALL_VOLUMES + self.driver.list_volumes(context) + + def test_add_trap_config(self): + self.driver.add_trap_config(context, None) + + def test_remove_trap_config(self): + self.driver.remove_trap_config(context, None) + + def test_clear_alert(self): + self.driver.clear_alert(context, None) diff --git a/setup.py b/setup.py index 001c54ef1..903437f16 100644 --- a/setup.py +++ b/setup.py @@ -34,8 +34,9 @@ 'delfin.storage.drivers': [ 'fake_storage fake_driver = delfin.drivers.fake_storage:FakeStorageDriver', 'dellemc vmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver', + 'hitachi vsp = delfin.drivers.hitachi.vsp.vsp_stor:HitachiVspDriver', 'hpe 3par = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver', - 'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver' + 'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver', 'ibm storwize_svc = delfin.drivers.ibm.storwize_svc.storwize_svc:StorwizeSVCDriver' ] }, From deb23be5c493e799b30b40bfecbe85daeba13128 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Fri, 27 Nov 2020 18:11:57 +0800 Subject: [PATCH 04/15] Fix vsp driver issue --- delfin/drivers/ibm/storwize_svc/ssh_handler.py | 11 ++++------- delfin/drivers/ibm/storwize_svc/storwize_svc.py | 3 ++- .../drivers/ibm/storwize_svc/test_ibm_storwize_svc.py | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/delfin/drivers/ibm/storwize_svc/ssh_handler.py b/delfin/drivers/ibm/storwize_svc/ssh_handler.py index f07eba692..a7f0aab2c 100644 --- a/delfin/drivers/ibm/storwize_svc/ssh_handler.py +++ b/delfin/drivers/ibm/storwize_svc/ssh_handler.py @@ -77,7 +77,7 @@ def parse_alert(alert): alert_model['alert_id'] = str(alert_id) alert_model['alert_name'] = alert_name alert_model['severity'] = severity - alert_model['category'] = 'Fault' + alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = SSHHandler. \ handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1) @@ -130,7 +130,7 @@ def do_exec(command_str, ssh): raise exception.SSHConnectTimeout() elif 'No authentication methods available' in err \ or 'Authentication failed' in err: - raise exception.SSHInvalidUsernameOrPassword() + raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err: raise exception.InvalidPrivateKey() else: @@ -192,7 +192,7 @@ def get_storage(self): raw_capacity = self.parse_string(storage_map.get( 'total_drive_raw_capacity')) subscribed_capacity = self.parse_string(storage_map.get( - 'total_allocated_extent_capacity')) + 'virtual_capacity')) s = { 'name': storage_map.get('name'), 'vendor': 'IBM', @@ -248,8 +248,6 @@ def list_storage_pools(self, storage_id): total_cap = self.parse_string(pool_map.get('capacity')) free_cap = self.parse_string(pool_map.get('free_capacity')) used_cap = self.parse_string(pool_map.get('used_capacity')) - subscribed_cap = self.parse_string(pool_map. - get('real_capacity')) p = { 'name': pool_map.get('name'), 'storage_id': storage_id, @@ -258,7 +256,6 @@ def list_storage_pools(self, storage_id): 'status': status, 'storage_type': constants.StorageType.BLOCK, 'total_capacity': int(total_cap), - 'subscribed_capacity': int(subscribed_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap) } @@ -380,7 +377,7 @@ def list_alerts(self, query_para): 'alert_id': event_id, 'alert_name': alert_name, 'severity': severity, - 'category': 'Fault', + 'category': constants.Category.FAULT, 'type': 'EquipmentAlarm', 'sequence_number': alert_map.get('sequence_number'), 'occur_time': time_stamp, diff --git a/delfin/drivers/ibm/storwize_svc/storwize_svc.py b/delfin/drivers/ibm/storwize_svc/storwize_svc.py index e0350cca3..98ed7b5e7 100644 --- a/delfin/drivers/ibm/storwize_svc/storwize_svc.py +++ b/delfin/drivers/ibm/storwize_svc/storwize_svc.py @@ -14,6 +14,7 @@ from delfin.drivers import driver from delfin.drivers.ibm.storwize_svc import ssh_handler +from delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler class StorwizeSVCDriver(driver.StorageDriver): @@ -46,7 +47,7 @@ def remove_trap_config(self, context, trap_config): @staticmethod def parse_alert(context, alert): - return ssh_handler.SSHHandler().parse_alert(alert) + return SSHHandler.parse_alert(alert) def clear_alert(self, context, alert): pass diff --git a/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py index 3078c482e..864721450 100644 --- a/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py +++ b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py @@ -405,7 +405,7 @@ def test_ssh_pool_put(self): ssh_pool.remove(ssh) def test_parse_alert(self): - SSHHandler.parse_alert(trap_info) + self.driver.parse_alert(context, trap_info) def test_reset_connection(self): self.driver.reset_connection(context, **ACCESS_INFO) From a8ca53b82f622c04bcc969dda58e3a95ec9aa82e Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Sat, 28 Nov 2020 10:12:53 +0800 Subject: [PATCH 05/15] change pool status and some optimize for vsp (#395) --- delfin/drivers/hitachi/vsp/consts.py | 2 +- delfin/drivers/hitachi/vsp/rest_handler.py | 9 ++++--- delfin/drivers/hitachi/vsp/vsp_stor.py | 26 +++++++++++++------ delfin/drivers/utils/ssh_client.py | 4 +-- delfin/exception.py | 5 ---- .../hitachi/vsp/test_hitachi_vspstor.py | 4 +-- 6 files changed, 28 insertions(+), 22 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/consts.py b/delfin/drivers/hitachi/vsp/consts.py index fccd24983..268c390c5 100644 --- a/delfin/drivers/hitachi/vsp/consts.py +++ b/delfin/drivers/hitachi/vsp/consts.py @@ -15,6 +15,6 @@ ERROR_SESSION_INVALID_CODE = 403 ERROR_SESSION_IS_BEING_USED_CODE = 409 BLOCK_SIZE = 512 - +MAX_LDEV_NUMBER_OF_RESTAPI = 16383 SUPPORTED_VSP_SERIES = ('VSP G350', 'VSP G370', 'VSP G700', 'VSP G900', 'VSP F350', 'VSP F370', 'VSP F700', 'VSP F900') diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py index f4771f893..6537dff01 100644 --- a/delfin/drivers/hitachi/vsp/rest_handler.py +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -69,7 +69,7 @@ def call(self, url, data=None, method=None): except Exception as e: err_msg = "Get RestHandler.call failed: %s" % (six.text_type(e)) LOG.error(err_msg) - raise exception.InvalidResults(err_msg) + raise e def get_rest_info(self, url, data=None): result_json = None @@ -109,7 +109,7 @@ def login(self): LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", {"url": url, "reason": res.text}) - if 'invalid username or password' in res.text: + if 'authentication failed' in res.text: raise exception.InvalidUsernameOrPassword() else: raise exception.BadResponse(res.text) @@ -192,8 +192,9 @@ def get_all_pools(self): return result_json def get_all_volumes(self): - url = '%s/%s/ldevs' % \ - (RestHandler.COMM_URL, self.storage_device_id) + url = '%s/%s/ldevs?ldevOption=defined&count=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, + consts.MAX_LDEV_NUMBER_OF_RESTAPI) result_json = self.get_rest_info(url) return result_json diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index 540765ee6..3c59b2a91 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -29,7 +29,7 @@ class HitachiVspDriver(driver.StorageDriver): POOL_STATUS_MAP = {"POLN": constants.StoragePoolStatus.NORMAL, - "POLF": constants.StoragePoolStatus.ABNORMAL, + "POLF": constants.StoragePoolStatus.NORMAL, "POLS": constants.StoragePoolStatus.ABNORMAL, "POLE": constants.StoragePoolStatus.OFFLINE } @@ -38,6 +38,12 @@ class HitachiVspDriver(driver.StorageDriver): "Moderate": constants.Severity.WARNING, "Service": constants.Severity.INFORMATIONAL } + TRAP_ALERT_LEVEL_MAP = { + "1.3.6.1.4.1.116.3.11.4.1.1.0.1": constants.Severity.CRITICAL, + "1.3.6.1.4.1.116.3.11.4.1.1.0.2": constants.Severity.MAJOR, + "1.3.6.1.4.1.116.3.11.4.1.1.0.3": constants.Severity.WARNING, + "1.3.6.1.4.1.116.3.11.4.1.1.0.4": constants.Severity.INFORMATIONAL + } TIME_PATTERN = '%Y-%m-%dT%H:%M:%S' @@ -47,6 +53,7 @@ class HitachiVspDriver(driver.StorageDriver): TRAP_DATE_OID = '1.3.6.1.4.1.116.5.11.4.2.5' TRAP_NICKNAME_OID = '1.3.6.1.4.1.116.5.11.4.2.2' LOCATION_OID = '1.3.6.1.4.1.116.5.11.4.2.4' + OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' SECONDS_TO_MS = 1000 def __init__(self, **kwargs): @@ -131,7 +138,6 @@ def list_storage_pools(self, context): 'description': 'Hitachi VSP Pool', 'status': status, 'storage_type': storage_type, - 'subscribed_capacity': int(total_cap), 'total_capacity': int(total_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap), @@ -237,7 +243,7 @@ def parse_queried_alerts(alerts, alert_list, query_para=None): 'alert_name': alert.get('errorSection'), 'resource_type': constants.DEFAULT_RESOURCE_TYPE, 'occur_time': occur_time, - 'category': 'Fault', + 'category': constants.Category.FAULT, 'type': constants.EventType.EQUIPMENT_ALARM, 'severity': HitachiVspDriver.ALERT_LEVEL_MAP.get( alert.get('errorLevel'), @@ -273,12 +279,16 @@ def parse_alert(context, alert): alert_model = dict() alert_model['alert_id'] = alert.get(HitachiVspDriver.REFCODE_OID) alert_model['alert_name'] = alert.get(HitachiVspDriver.DESC_OID) - alert_model['severity'] = constants.Severity.INFORMATIONAL - alert_model['category'] = constants.Category.NOT_SPECIFIED + severity = HitachiVspDriver.TRAP_ALERT_LEVEL_MAP.get( + alert.get(HitachiVspDriver.OID_SEVERITY), + constants.Severity.INFORMATIONAL + ) + alert_model['severity'] = severity + alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM - aler_time = '%s %s' % (alert.get(HitachiVspDriver.TRAP_DATE_OID), - alert.get(HitachiVspDriver.TRAP_TIME_OID)) - pattern = '%Y-%m-%d %H:%M:%S' + aler_time = '%s%s' % (alert.get(HitachiVspDriver.TRAP_DATE_OID), + alert.get(HitachiVspDriver.TRAP_TIME_OID)) + pattern = '%Y/%m/%d%H:%M:%S' occur_time = time.strptime(aler_time, pattern) alert_model['occur_time'] = int(time.mktime(occur_time) * HitachiVspDriver.SECONDS_TO_MS) diff --git a/delfin/drivers/utils/ssh_client.py b/delfin/drivers/utils/ssh_client.py index 64d0573a8..4629b583e 100644 --- a/delfin/drivers/utils/ssh_client.py +++ b/delfin/drivers/utils/ssh_client.py @@ -117,7 +117,7 @@ def do_exec(self, command_str): raise exception.SSHConnectTimeout() elif 'No authentication methods available' in str(e) \ or 'Authentication failed' in str(e): - raise exception.SSHInvalidUsernameOrPassword() + raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in str(e): raise exception.InvalidPrivateKey() elif 'not found in known_hosts' in str(e): @@ -196,7 +196,7 @@ def create(self): raise exception.SSHConnectTimeout() elif 'No authentication methods available' in err \ or 'Authentication failed' in err: - raise exception.SSHInvalidUsernameOrPassword() + raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in err: raise exception.InvalidPrivateKey() elif 'not found in known_hosts' in err: diff --git a/delfin/exception.py b/delfin/exception.py index 9a03d4b48..3a663f2ca 100644 --- a/delfin/exception.py +++ b/delfin/exception.py @@ -236,11 +236,6 @@ class SSHConnectTimeout(DelfinException): code = 500 -class SSHInvalidUsernameOrPassword(DelfinException): - msg_fmt = _("SSH invalid username or password.") - code = 400 - - class SSHNotFoundKnownHosts(NotFound): msg_fmt = _("{0} not found in known_hosts.") code = 400 diff --git a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py index 10c15ebf0..cb7a58ede 100644 --- a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py +++ b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py @@ -266,11 +266,11 @@ def __init__(self): } TRAP_INFO = { "1.3.6.1.2.1.1.3.0": "0", - '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.2.6.190.3', + '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.116.3.11.4.1.1.0.1', '1.3.6.1.4.1.116.5.11.4.2.3': 'eeeeeeeee', '1.3.6.1.4.1.116.5.11.4.2.7': 'ddddddd', '1.3.6.1.4.1.116.5.11.4.2.6': '14:10:10', - '1.3.6.1.4.1.116.5.11.4.2.5': '2020-11-20', + '1.3.6.1.4.1.116.5.11.4.2.5': '2020/11/20', '1.3.6.1.4.1.116.5.11.4.2.2': ' System Version = 7.4.0.11 ', '1.3.6.1.4.1.116.5.11.4.2.4': '# FRU = None ' } From 7be276d22ec1a5693cebf521db31ab143eac5b43 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Tue, 1 Dec 2020 22:03:09 +0800 Subject: [PATCH 06/15] rest api change timeout and fix trap parse (#401) --- delfin/drivers/hitachi/vsp/consts.py | 2 +- delfin/drivers/hitachi/vsp/rest_handler.py | 18 ++++++++---------- delfin/drivers/hitachi/vsp/vsp_stor.py | 4 ++-- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/consts.py b/delfin/drivers/hitachi/vsp/consts.py index 268c390c5..9bc3c3d35 100644 --- a/delfin/drivers/hitachi/vsp/consts.py +++ b/delfin/drivers/hitachi/vsp/consts.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -SOCKET_TIMEOUT = 52 +SOCKET_TIMEOUT = 30 ERROR_SESSION_INVALID_CODE = 403 ERROR_SESSION_IS_BEING_USED_CODE = 409 BLOCK_SIZE = 512 diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py index 6537dff01..9bc068de0 100644 --- a/delfin/drivers/hitachi/vsp/rest_handler.py +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -40,10 +40,10 @@ def __init__(self, **kwargs): self.device_model = None self.serial_number = None - def call(self, url, data=None, method=None): + def call(self, url, data=None, method=None, + calltimeout=consts.SOCKET_TIMEOUT): try: - res = self.do_call(url, data, method, - calltimeout=consts.SOCKET_TIMEOUT) + res = self.do_call(url, data, method, calltimeout) if (res.status_code == consts.ERROR_SESSION_INVALID_CODE or res.status_code == consts.ERROR_SESSION_IS_BEING_USED_CODE): @@ -57,8 +57,7 @@ def call(self, url, data=None, method=None): access_session = self.login() if access_session is not None: res = self. \ - do_call(url, data, method, - calltimeout=consts.SOCKET_TIMEOUT) + do_call(url, data, method, calltimeout) else: LOG.error('Login error,get access_session failed') elif res.status_code == 503: @@ -71,9 +70,9 @@ def call(self, url, data=None, method=None): LOG.error(err_msg) raise e - def get_rest_info(self, url, data=None): + def get_rest_info(self, url, timeout=consts.SOCKET_TIMEOUT, data=None): result_json = None - res = self.call(url, data, 'GET') + res = self.call(url, data, 'GET', timeout) if res.status_code == 200: result_json = res.json() return result_json @@ -96,8 +95,7 @@ def login(self): self.rest_username, cryptor.decode(self.rest_password)) res = self. \ - do_call(url, data, 'POST', - calltimeout=consts.SOCKET_TIMEOUT) + do_call(url, data, 'POST', 10) if res.status_code == 200: result = res.json() self.session_id = result.get('sessionId') @@ -199,6 +197,6 @@ def get_all_volumes(self): return result_json def get_system_info(self): - result_json = self.get_rest_info(RestHandler.COMM_URL) + result_json = self.get_rest_info(RestHandler.COMM_URL, timeout=10) return result_json diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index 3c59b2a91..36af9e5ec 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -52,7 +52,6 @@ class HitachiVspDriver(driver.StorageDriver): TRAP_TIME_OID = '1.3.6.1.4.1.116.5.11.4.2.6' TRAP_DATE_OID = '1.3.6.1.4.1.116.5.11.4.2.5' TRAP_NICKNAME_OID = '1.3.6.1.4.1.116.5.11.4.2.2' - LOCATION_OID = '1.3.6.1.4.1.116.5.11.4.2.4' OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' SECONDS_TO_MS = 1000 @@ -294,7 +293,8 @@ def parse_alert(context, alert): HitachiVspDriver.SECONDS_TO_MS) alert_model['description'] = alert.get(HitachiVspDriver.DESC_OID) alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE - alert_model['location'] = alert.get(HitachiVspDriver.LOCATION_OID) + alert_model['location'] = alert.get(HitachiVspDriver. + TRAP_NICKNAME_OID) return alert_model except Exception as e: From 2bbaeb77e4bfea688e302182c86538444f34e498 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Tue, 1 Dec 2020 22:07:41 +0800 Subject: [PATCH 07/15] fix ssh excption error when the port is wrong and do some optimize (#402) --- .../drivers/ibm/storwize_svc/ssh_handler.py | 40 +++++++------------ delfin/drivers/utils/ssh_client.py | 2 +- 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/delfin/drivers/ibm/storwize_svc/ssh_handler.py b/delfin/drivers/ibm/storwize_svc/ssh_handler.py index a7f0aab2c..1be7f71f6 100644 --- a/delfin/drivers/ibm/storwize_svc/ssh_handler.py +++ b/delfin/drivers/ibm/storwize_svc/ssh_handler.py @@ -20,7 +20,7 @@ from oslo_utils import units from delfin import exception, utils -from delfin.common import constants +from delfin.common import constants, alert_util from delfin.drivers.utils.ssh_client import SSHPool LOG = logging.getLogger(__name__) @@ -193,13 +193,16 @@ def get_storage(self): 'total_drive_raw_capacity')) subscribed_capacity = self.parse_string(storage_map.get( 'virtual_capacity')) + firmware_version = '' + if storage_map.get('code_level') is not None: + firmware_version = storage_map.get('code_level').split(' ')[0] s = { 'name': storage_map.get('name'), 'vendor': 'IBM', 'model': storage_map.get('product_name'), 'status': status, 'serial_number': serial_number, - 'firmware_version': storage_map.get('code_level'), + 'firmware_version': firmware_version, 'location': location, 'total_capacity': int(free_capacity + used_capacity), 'raw_capacity': int(raw_capacity), @@ -248,6 +251,8 @@ def list_storage_pools(self, storage_id): total_cap = self.parse_string(pool_map.get('capacity')) free_cap = self.parse_string(pool_map.get('free_capacity')) used_cap = self.parse_string(pool_map.get('used_capacity')) + subscribed_capacity = self.parse_string(pool_map.get( + 'virtual_capacity')) p = { 'name': pool_map.get('name'), 'storage_id': storage_id, @@ -255,6 +260,7 @@ def list_storage_pools(self, storage_id): 'description': '', 'status': status, 'storage_type': constants.StorageType.BLOCK, + 'subscribed_capacity': int(subscribed_capacity), 'total_capacity': int(total_cap), 'used_capacity': int(used_cap), 'free_capacity': int(free_cap) @@ -329,25 +335,6 @@ def list_volumes(self, storage_id): LOG.error(err_msg) raise exception.InvalidResults(err_msg) - def judge_alert_time(self, alert_map, query_para): - if len(alert_map) <= 1: - return False - if query_para is None and len(alert_map) > 1: - return True - occur_time = int(alert_map.get('last_timestamp_epoch')) * \ - self.SECONDS_TO_MS - if query_para.get('begin_time') and query_para.get('end_time'): - if occur_time >= int(query_para.get('begin_time')) and \ - occur_time <= int(query_para.get('end_time')): - return True - if query_para.get('begin_time'): - if occur_time >= int(query_para.get('begin_time')): - return True - if query_para.get('end_time'): - if occur_time <= int(query_para.get('end_time')): - return True - return False - def list_alerts(self, query_para): try: alert_list = [] @@ -357,15 +344,16 @@ def list_alerts(self, query_para): if alert_res[i] is None or alert_res[i] == '': continue alert_str = ' '.join(alert_res[i].split()) - strinfo = alert_str.split(' ', 9) + strinfo = alert_str.split(' ', 1) detail_command = 'lseventlog %s' % strinfo[0] deltail_info = self.exec_ssh_command(detail_command) alert_map = {} self.handle_detail(deltail_info, alert_map, split=' ') - if self.judge_alert_time(alert_map, query_para) is False: - continue - time_stamp = int(alert_map.get('last_timestamp_epoch')) * \ + occur_time = int(alert_map.get('last_timestamp_epoch')) * \ self.SECONDS_TO_MS + if not alert_util.is_alert_in_time_range(query_para, + occur_time): + continue alert_name = alert_map.get('event_id_text', '') event_id = alert_map.get('event_id') location = alert_map.get('object_name', '') @@ -380,7 +368,7 @@ def list_alerts(self, query_para): 'category': constants.Category.FAULT, 'type': 'EquipmentAlarm', 'sequence_number': alert_map.get('sequence_number'), - 'occur_time': time_stamp, + 'occur_time': occur_time, 'description': alert_name, 'resource_type': resource_type, 'location': location diff --git a/delfin/drivers/utils/ssh_client.py b/delfin/drivers/utils/ssh_client.py index 4629b583e..bed423938 100644 --- a/delfin/drivers/utils/ssh_client.py +++ b/delfin/drivers/utils/ssh_client.py @@ -193,7 +193,7 @@ def create(self): err = six.text_type(e) LOG.error('doexec InvalidUsernameOrPassword error') if 'timed out' in err: - raise exception.SSHConnectTimeout() + raise exception.InvalidIpOrPort() elif 'No authentication methods available' in err \ or 'Authentication failed' in err: raise exception.InvalidUsernameOrPassword() From 87e65f38e7ab643c317120edd10ad907b5a4ef1d Mon Sep 17 00:00:00 2001 From: ThisIsClark Date: Sat, 19 Dec 2020 11:39:20 +0800 Subject: [PATCH 08/15] Update the value of 'update_at' of storage when going to sync the storage (#425) --- delfin/api/v1/storages.py | 1 + 1 file changed, 1 insertion(+) diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index b67153f93..563e48897 100644 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -217,4 +217,5 @@ def _set_synced_if_ok(context, storage_id, resource_count): storage['sync_status'] > 0: raise exception.StorageIsSyncing(storage['id']) storage['sync_status'] = resource_count * constants.ResourceSync.START + storage['updated_at'] = current_time db.storage_update(context, storage['id'], storage) From 34bdef695a82a39815aebf313f87f9eab69f08e6 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Thu, 31 Dec 2020 18:25:58 +0800 Subject: [PATCH 09/15] add dell_emc unity device information collection (#416) --- delfin/drivers/dell_emc/unity/__init__.py | 0 .../drivers/dell_emc/unity/alert_handler.py | 135 ++++ delfin/drivers/dell_emc/unity/consts.py | 18 + delfin/drivers/dell_emc/unity/rest_handler.py | 201 ++++++ delfin/drivers/dell_emc/unity/unity.py | 195 ++++++ .../unit/drivers/dell_emc/unity/__init__.py | 0 .../drivers/dell_emc/unity/test_emc_unity.py | 599 ++++++++++++++++++ setup.py | 1 + 8 files changed, 1149 insertions(+) create mode 100644 delfin/drivers/dell_emc/unity/__init__.py create mode 100644 delfin/drivers/dell_emc/unity/alert_handler.py create mode 100644 delfin/drivers/dell_emc/unity/consts.py create mode 100644 delfin/drivers/dell_emc/unity/rest_handler.py create mode 100644 delfin/drivers/dell_emc/unity/unity.py create mode 100644 delfin/tests/unit/drivers/dell_emc/unity/__init__.py create mode 100644 delfin/tests/unit/drivers/dell_emc/unity/test_emc_unity.py diff --git a/delfin/drivers/dell_emc/unity/__init__.py b/delfin/drivers/dell_emc/unity/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/dell_emc/unity/alert_handler.py b/delfin/drivers/dell_emc/unity/alert_handler.py new file mode 100644 index 000000000..1d29e643e --- /dev/null +++ b/delfin/drivers/dell_emc/unity/alert_handler.py @@ -0,0 +1,135 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +import six +from oslo_log import log + +from delfin import exception +from delfin.common import alert_util +from delfin.common import constants +from delfin.i18n import _ + +LOG = log.getLogger(__name__) + + +class AlertHandler(object): + + TIME_PATTERN = "%Y-%m-%dT%H:%M:%S.%fZ" + + OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' + OID_NODE = '1.3.6.1.4.1.1139.103.1.18.1.1' + OID_COMPONENT = '1.3.6.1.4.1.1139.103.1.18.1.2' + OID_SYMPTOMID = '1.3.6.1.4.1.1139.103.1.18.1.3' + OID_SYMPTOMTEXT = '1.3.6.1.4.1.1139.103.1.18.1.4' + OID_TIMESTAMP = '1.3.6.1.4.1.1139.103.1.18.1.5' + + ALERT_LEVEL_MAP = {0: constants.Severity.CRITICAL, + 1: constants.Severity.CRITICAL, + 2: constants.Severity.CRITICAL, + 3: constants.Severity.MAJOR, + 4: constants.Severity.WARNING, + 5: constants.Severity.FATAL, + 6: constants.Severity.INFORMATIONAL, + 7: constants.Severity.NOT_SPECIFIED + } + + TRAP_LEVEL_MAP = {'1.3.6.1.4.1.1139.103.1.18.2.0': + constants.Severity.CRITICAL, + '1.3.6.1.4.1.1139.103.1.18.2.1': + constants.Severity.CRITICAL, + '1.3.6.1.4.1.1139.103.1.18.2.2': + constants.Severity.CRITICAL, + '1.3.6.1.4.1.1139.103.1.18.2.3': + constants.Severity.MAJOR, + '1.3.6.1.4.1.1139.103.1.18.2.4': + constants.Severity.WARNING, + '1.3.6.1.4.1.1139.103.1.18.2.5': + constants.Severity.FATAL, + '1.3.6.1.4.1.1139.103.1.18.2.6': + constants.Severity.INFORMATIONAL, + '1.3.6.1.4.1.1139.103.1.18.2.7': + constants.Severity.NOT_SPECIFIED + } + SECONDS_TO_MS = 1000 + + @staticmethod + def parse_alert(context, alert): + try: + alert_model = dict() + alert_model['alert_id'] = alert.get(AlertHandler.OID_SYMPTOMID) + alert_model['alert_name'] = alert.get(AlertHandler.OID_COMPONENT) + alert_model['severity'] = AlertHandler.TRAP_LEVEL_MAP.get( + alert.get(AlertHandler.OID_SEVERITY), + constants.Severity.INFORMATIONAL) + alert_model['category'] = constants.Category.FAULT + alert_model['type'] = constants.EventType.EQUIPMENT_ALARM + occur_time = int(time.time()) * AlertHandler.SECONDS_TO_MS + alert_model['occur_time'] = occur_time + alert_model['description'] = alert.get( + AlertHandler.OID_SYMPTOMTEXT) + alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE + alert_model['location'] = alert.get(AlertHandler.OID_NODE) + + return alert_model + except Exception as e: + LOG.error(e) + msg = (_("Failed to build alert model as some attributes missing " + "in alert message.")) + raise exception.InvalidResults(msg) + + def parse_queried_alerts(self, alert_model_list, alert_list, query_para): + alerts = alert_list.get('entries') + for alert in alerts: + try: + occur_time = int(time.mktime(time.strptime( + alert.get('content').get('timestamp'), + self.TIME_PATTERN))) + if not alert_util.is_alert_in_time_range( + query_para, int(occur_time * + AlertHandler.SECONDS_TO_MS)): + continue + + alert_model = {} + location = '' + resource_type = constants.DEFAULT_RESOURCE_TYPE + if 'component' in alert: + resource_type = alert.get( + 'content').get('component').get('resource') + location = alert.get( + 'content').get('component').get('id') + + alert_model['alert_id'] = alert.get( + 'content').get('messageId') + alert_model['alert_name'] = alert.get( + 'content').get('message') + alert_model['severity'] = self.ALERT_LEVEL_MAP.get( + alert.get('content').get('severity'), + constants.Severity.INFORMATIONAL) + alert_model['category'] = constants.Category.FAULT + alert_model['type'] = constants.EventType.EQUIPMENT_ALARM + alert_model['sequence_number'] = alert.get('content').get('id') + alert_model['occur_time'] = int(occur_time * + AlertHandler.SECONDS_TO_MS) + alert_model['description'] = alert.get('content').get( + 'description') + alert_model['resource_type'] = resource_type + alert_model['location'] = location + + alert_model_list.append(alert_model) + except Exception as e: + LOG.error(e) + err_msg = "Failed to build alert model as some attributes " \ + "missing in queried alerts: %s" % (six.text_type(e)) + raise exception.InvalidResults(err_msg) diff --git a/delfin/drivers/dell_emc/unity/consts.py b/delfin/drivers/dell_emc/unity/consts.py new file mode 100644 index 000000000..c4a79462e --- /dev/null +++ b/delfin/drivers/dell_emc/unity/consts.py @@ -0,0 +1,18 @@ +# Copyright 2020 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +SOCKET_TIMEOUT = 10 +ERROR_SESSION_INVALID_CODE = 403 +ERROR_SESSION_IS_BEING_USED_CODE = 409 +HEALTH_OK = (5, 7) diff --git a/delfin/drivers/dell_emc/unity/rest_handler.py b/delfin/drivers/dell_emc/unity/rest_handler.py new file mode 100644 index 000000000..814286479 --- /dev/null +++ b/delfin/drivers/dell_emc/unity/rest_handler.py @@ -0,0 +1,201 @@ +# Copyright 2020 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import requests +import six +from oslo_log import log as logging + +from delfin import cryptor +from delfin import exception +from delfin import ssl_utils +from delfin.drivers.dell_emc.unity import consts +from delfin.drivers.utils.rest_client import RestClient + +LOG = logging.getLogger(__name__) + + +class RestHandler(RestClient): + REST_AUTH_URL = '/api/types/loginSessionInfo/instances' + REST_LOGOUT_URL = '/api/types/loginSessionInfo/action/logout' + REST_STORAGE_URL = '/api/types/system/instances' + REST_CAPACITY_URL = '/api/types/systemCapacity/instances' + REST_POOLS_URL = '/api/types/pool/instances' + REST_LUNS_URL = '/api/types/lun/instances' + REST_ALERTS_URL = '/api/types/alert/instances' + REST_DEL_ALERTS_URL = '/api/instances/alert/' + REST_DISK_URL = '/api/types/disk/instances' + REST_SOFT_VERSION_URL = '/api/types/installedSoftwareVersion/instances' + REST_AUTH_KEY = 'EMC-CSRF-TOKEN' + + def __init__(self, **kwargs): + super(RestHandler, self).__init__(**kwargs) + + def call(self, url, data=None, method=None): + try: + res = self.do_call(url, data, method, + calltimeout=consts.SOCKET_TIMEOUT) + if (res.status_code == consts.ERROR_SESSION_INVALID_CODE + or res.status_code == + consts.ERROR_SESSION_IS_BEING_USED_CODE): + LOG.error( + "Failed to get token=={0}=={1},get it again".format( + res.status_code, res.text)) + if RestHandler.REST_LOGOUT_URL in url: + return res + self.rest_auth_token = None + access_session = self.login() + # if get token,Revisit url + if access_session is not None: + res = self. \ + do_call(url, data, method, + calltimeout=consts.SOCKET_TIMEOUT) + else: + LOG.error('Login session is none') + elif res.status_code == 503: + raise exception.InvalidResults(res.text) + return res + except Exception as e: + err_msg = "Get restHandler.call failed: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + + def get_rest_info(self, url, data=None, method='GET'): + result_json = None + res = self.call(url, data, method) + if res.status_code == 200: + result_json = res.json() + return result_json + + def init_rest_client(self): + if self.session: + self.session.close() + self.session = requests.Session() + self.session.headers.update({ + 'Accept': 'application/json', + "Content-Type": "application/json", + "X-EMC-REST-CLIENT": "true"}) + self.session.auth = requests.auth.HTTPBasicAuth( + self.rest_username, + cryptor.decode(self.rest_password)) + if not self.verify: + self.session.verify = False + else: + LOG.debug("Enable certificate verification, ca_path: {0}".format( + self.verify)) + self.session.verify = self.verify + self.session.trust_env = False + self.session.mount("https://", ssl_utils.HostNameIgnoreAdapter()) + + def login(self): + try: + access_session = self.rest_auth_token + if self.rest_auth_token is None: + url = RestHandler.REST_AUTH_URL + data = {} + self.init_rest_client() + res = self. \ + do_call(url, data, 'GET', + calltimeout=consts.SOCKET_TIMEOUT) + if res.status_code == 200: + access_session = res.headers['EMC-CSRF-TOKEN'] + self.rest_auth_token = access_session + self.session.headers[ + RestHandler.REST_AUTH_KEY] = access_session + else: + LOG.error("Login error. URL: %(url)s\n" + "Reason: %(reason)s.", + {"url": url, "reason": res.text}) + if 'invalid username or password' in res.text: + raise exception.InvalidUsernameOrPassword() + else: + raise exception.BadResponse(res.text) + else: + LOG.error('Login Parameter error') + return access_session + except Exception as e: + LOG.error("Login error: %s", six.text_type(e)) + raise e + + def logout(self): + try: + url = RestHandler.REST_LOGOUT_URL + if self.rest_auth_token is not None: + url = '%s/%s' % (url, self.rest_auth_token) + self.rest_auth_token = None + if self.san_address: + self.call(url, method='POST') + if self.session: + self.session.close() + except exception.DelfinException as e: + err_msg = "Logout error: %s" % (e.msg) + LOG.error(err_msg) + raise e + except Exception as e: + err_msg = "Logout error: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + + def get_storage(self): + url = '%s?%s' % (RestHandler.REST_STORAGE_URL, + 'fields=name,model,serialNumber,health') + result_json = self.get_rest_info(url) + return result_json + + def get_capacity(self): + url = '%s?%s' % (RestHandler.REST_CAPACITY_URL, + 'fields=sizeFree,sizeTotal,sizeUsed,' + 'sizeSubscribed,totalLogicalSize') + result_json = self.get_rest_info(url) + return result_json + + def get_all_pools(self): + url = '%s?%s' % (RestHandler.REST_POOLS_URL, + 'fields=id,name,health,type,sizeFree,' + 'sizeTotal,sizeUsed,sizeSubscribed') + result_json = self.get_rest_info(url) + return result_json + + def get_all_luns(self, page_size): + url = '%s?%s&page=%s' % (RestHandler.REST_LUNS_URL, + 'fields=id,name,health,type,sizeAllocated,' + 'sizeTotal,sizeUsed,pool,wwn,isThinEnabled', + page_size) + result_json = self.get_rest_info(url) + return result_json + + def get_all_alerts(self, page_size): + url = '%s?%s&page=%s' % (RestHandler.REST_ALERTS_URL, + 'fields=id,timestamp,severity,component,' + 'messageId,message,description,descriptionId', + page_size) + result_json = self.get_rest_info(url) + return result_json + + def get_soft_version(self): + url = '%s?%s' % (RestHandler.REST_SOFT_VERSION_URL, + 'fields=version') + result_json = self.get_rest_info(url) + return result_json + + def get_disk_info(self): + url = '%s?%s' % (RestHandler.REST_DISK_URL, + 'fields=rawSize') + result_json = self.get_rest_info(url) + return result_json + + def remove_alert(self, alert_id): + url = RestHandler.REST_DEL_ALERTS_URL % alert_id + result_json = self.get_rest_info(url, method='DELETE') + return result_json diff --git a/delfin/drivers/dell_emc/unity/unity.py b/delfin/drivers/dell_emc/unity/unity.py new file mode 100644 index 000000000..7652a0959 --- /dev/null +++ b/delfin/drivers/dell_emc/unity/unity.py @@ -0,0 +1,195 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log + +from delfin.common import constants +from delfin.drivers import driver +from delfin.drivers.dell_emc.unity import rest_handler, alert_handler, consts +from delfin.drivers.dell_emc.unity.alert_handler import AlertHandler + +LOG = log.getLogger(__name__) + + +class UNITYStorDriver(driver.StorageDriver): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.rest_handler = rest_handler.RestHandler(**kwargs) + self.rest_handler.login() + + def reset_connection(self, context, **kwargs): + self.rest_handler.logout() + self.rest_handler.verify = kwargs.get('verify', False) + self.rest_handler.login() + + def close_connection(self): + self.rest_handler.logout() + + def get_storage(self, context): + system_info = self.rest_handler.get_storage() + capacity = self.rest_handler.get_capacity() + version_info = self.rest_handler.get_soft_version() + disk_info = self.rest_handler.get_disk_info() + status = constants.StorageStatus.OFFLINE + if system_info is not None and capacity is not None: + system_entries = system_info.get('entries') + for system in system_entries: + name = system.get('content').get('name') + model = system.get('content').get('model') + serial_number = system.get('content').get('serialNumber') + health_value = system.get('content').get('health').get('value') + if health_value in consts.HEALTH_OK: + status = constants.StorageStatus.NORMAL + else: + status = constants.StorageStatus.ABNORMAL + break + capacity_info = capacity.get('entries') + for per_capacity in capacity_info: + free = per_capacity.get('content').get('sizeFree') + total = per_capacity.get('content').get('sizeTotal') + used = per_capacity.get('content').get('sizeUsed') + subs = per_capacity.get('content').get('sizeSubscribed') + break + soft_version = version_info.get('entries') + for soft_info in soft_version: + version = soft_info.get('content').get('id') + break + disk_entrier = disk_info.get('entries') + raw = 0 + for disk in disk_entrier: + raw = raw + int(disk.get('content').get('rawSize')) + + result = { + 'name': name, + 'vendor': 'DELL EMC', + 'model': model, + 'status': status, + 'serial_number': serial_number, + 'firmware_version': version, + 'location': '', + 'subscribed_capacity': int(subs), + 'total_capacity': int(total), + 'raw_capacity': int(raw), + 'used_capacity': int(used), + 'free_capacity': int(free) + } + return result + + def list_storage_pools(self, context): + pool_info = self.rest_handler.get_all_pools() + pool_list = [] + pool_type = constants.StorageType.BLOCK + if pool_info is not None: + pool_entries = pool_info.get('entries') + for pool in pool_entries: + health_value = pool.get('content').get('health').get('value') + if health_value in consts.HEALTH_OK: + status = constants.StorageStatus.NORMAL + else: + status = constants.StorageStatus.ABNORMAL + p = { + 'name': pool.get('content').get('name'), + 'storage_id': self.storage_id, + 'native_storage_pool_id': str( + pool.get('content').get('id')), + 'description': pool.get('content').get('description'), + 'status': status, + 'storage_type': pool_type, + 'total_capacity': int(pool.get('content'). + get('sizeTotal')), + 'subscribed_capacity': int(pool.get('content').get( + 'sizeSubscribed')), + 'used_capacity': int(pool.get('content').get('sizeUsed')), + 'free_capacity': int(pool.get('content').get('sizeFree')) + } + pool_list.append(p) + return pool_list + + def volume_handler(self, volumes, volume_list): + if volumes is not None: + vol_entries = volumes.get('entries') + for volume in vol_entries: + total = volume.get('content').get('sizeTotal') + used = volume.get('content').get('sizeAllocated') + vol_type = constants.VolumeType.THICK + if volume.get('content').get('isThinEnabled') is True: + vol_type = constants.VolumeType.THIN + compressed = True + deduplicated = volume.get('content').\ + get('isAdvancedDedupEnabled') + health_value = volume.get('content').get('health').get('value') + if health_value in consts.HEALTH_OK: + status = constants.StorageStatus.NORMAL + else: + status = constants.StorageStatus.ABNORMAL + v = { + 'name': volume.get('content').get('name'), + 'storage_id': self.storage_id, + 'description': volume.get('content').get('description'), + 'status': status, + 'native_volume_id': str(volume.get('content').get('id')), + 'native_storage_pool_id': + volume.get('content').get('pool').get('id'), + 'wwn': volume.get('content').get('wwn'), + 'type': vol_type, + 'total_capacity': int(total), + 'used_capacity': int(used), + 'free_capacity': int(total - used), + 'compressed': compressed, + 'deduplicated': deduplicated + } + volume_list.append(v) + + def list_volumes(self, context): + page_size = 1 + volume_list = [] + while True: + luns = self.rest_handler.get_all_luns(page_size) + if 'entries' not in luns: + break + if len(luns['entries']) < 1: + break + self.volume_handler(luns, volume_list) + page_size = page_size + 1 + + return volume_list + + def list_alerts(self, context, query_para=None): + page_size = 1 + alert_model_list = [] + while True: + alert_list = self.rest_handler.get_all_alerts(page_size) + if 'entries' not in alert_list: + break + if len(alert_list['entries']) < 1: + break + alert_handler.AlertHandler() \ + .parse_queried_alerts(alert_model_list, alert_list, query_para) + page_size = page_size + 1 + + return alert_model_list + + def add_trap_config(self, context, trap_config): + pass + + def remove_trap_config(self, context, trap_config): + pass + + @staticmethod + def parse_alert(context, alert): + return AlertHandler.parse_alert(context, alert) + + def clear_alert(self, context, alert): + return self.rest_handler.remove_alert(context, alert) diff --git a/delfin/tests/unit/drivers/dell_emc/unity/__init__.py b/delfin/tests/unit/drivers/dell_emc/unity/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/dell_emc/unity/test_emc_unity.py b/delfin/tests/unit/drivers/dell_emc/unity/test_emc_unity.py new file mode 100644 index 000000000..e1783b8be --- /dev/null +++ b/delfin/tests/unit/drivers/dell_emc/unity/test_emc_unity.py @@ -0,0 +1,599 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest import TestCase, mock + +from requests import Session + +from delfin import context +from delfin.drivers.dell_emc.unity.rest_handler import RestHandler +from delfin.drivers.dell_emc.unity.unity import UNITYStorDriver + + +class Request: + def __init__(self): + self.environ = {'delfin.context': context.RequestContext()} + pass + + +ACCESS_INFO = { + "storage_id": "12345", + "rest": { + "host": "110.143.132.231", + "port": "8443", + "username": "username", + "password": "cGFzc3dvcmQ=" + }, + "ssh": { + "host": "110.143.132.231", + "port": "22", + "username": "username", + "password": "password", + "host_key": "weqewrerwerwerwe" + }, + "vendor": "dell_emc", + "model": "Unity 350F", + "extra_attributes": { + "array_id": "00112233" + } +} +GET_STORAGE = { + "@base": "https://8.44.162.244/api/types/system/instances?fields=name," + "model,serialNumber,health&per_page=2000", + "updated": "2020-10-19T08:38:21.009Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + } + ], + "entries": [ + { + "@base": "https://8.44.162.244/api/instances/system", + "updated": "2020-10-19T08:38:21.009Z", + "links": [ + { + "rel": "self", + "href": "/0" + } + ], + "content": { + "id": "0", + "health": { + "value": 20, + "descriptionIds": [ + "ALRT_SYSTEM_MAJOR_FAILURE" + ], + "descriptions": [ + "The system has experienced one or more major failures" + ], + "resolutionIds": [ + "fix_problems" + ], + "resolutions": [ + "/help/webhelp/en_US/index.html?#unity_t_fix_" + ] + }, + "name": "CETV3182000026", + "model": "Unity 350F", + "serialNumber": "CETV3182000026" + } + } + ] +} +GET_CAPACITY = { + "@base": "https://8.44.162.244/api/types/systemCapacity/instances", + "updated": "2020-10-19T08:42:43.788Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + } + ], + "entries": [ + { + "@base": "https://8.44.162.244/api/instances/systemCapacity", + "updated": "2020-10-19T08:42:43.788Z", + "links": [ + { + "rel": "self", + "href": "/0" + } + ], + "content": { + "id": "0", + "sizeFree": 2311766147072, + "sizeTotal": 8838774259712, + "sizeUsed": 6527008112640, + "sizeSubscribed": 307567976775680, + "totalLogicalSize": 307542206971904 + } + } + ] +} +GET_SOFT_VERSION = { + "@base": "https://8.44.162.244/api/types/installedSoftwareVersion", + "updated": "2020-10-19T08:42:43.788Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + } + ], + "entries": [ + { + "@base": "https://8.44.162.244/api/instances", + "updated": "2020-10-19T08:42:43.788Z", + "links": [ + { + "rel": "self", + "href": "/0" + } + ], + "content": { + "id": "4.7.1" + } + } + ] +} +GET_DISK_INFO = { + "@base": "https://8.44.162.244/api/types/disk/instances?fields=rawSize", + "updated": "2020-10-19T08:42:43.788Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + } + ], + "entries": [ + { + "@base": "https://8.44.162.244/api/instances/disk", + "updated": "2020-10-19T08:42:43.788Z", + "links": [ + { + "rel": "self", + "href": "/0" + } + ], + "content": { + "id": "0", + "rawSize": 2311766147072 + } + } + ] +} +GET_ALL_POOLS = { + "@base": "https://8.44.162.244/api/types/pool/instances", + "updated": "2020-10-19T08:45:43.217Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + } + ], + "entries": [ + { + "@base": "https://8.44.162.244/api/instances/pool", + "updated": "2020-10-19T08:45:43.217Z", + "links": [ + { + "rel": "self", + "href": "/pool_1" + } + ], + "content": { + "id": "pool_1", + "type": 2, + "health": { + "value": 7, + "descriptionIds": [ + "ALRT_POOL_USER_THRESH", + "ALRT_POOL_DISK_EOL_SEVERE", + "ALRT_POOL_DRIVE_EOL_IN_60_DAYS" + ], + "descriptions": [ + "This storage pool has exceeded the capacity", + ], + "resolutionIds": [ + "pool_add_space" + ], + "resolutions": [ + "/help/webhelp/en_US/index.html" + ] + }, + "name": "pool1", + "sizeFree": 2311766147072, + "sizeTotal": 8838774259712, + "sizeUsed": 6527008112640, + "sizeSubscribed": 310896039559168 + } + } + ] +} +GET_ALL_LUNS = { + "@base": "https://8.44.162.244/api/types/lun/instances", + "updated": "2020-10-19T08:55:15.776Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + } + ], + "entries": [ + { + "@base": "https://8.44.162.244/api/instances/lun", + "updated": "2020-10-19T08:55:15.776Z", + "links": [ + { + "rel": "self", + "href": "/sv_1" + } + ], + "content": { + "id": "sv_1", + "type": 2, + "health": { + "value": 5, + "descriptionIds": [ + "ALRT_VOL_OK" + ], + "descriptions": [ + "The LUN is operating normally. No action is required." + ] + }, + "name": "LUN-00", + "sizeTotal": 107374182400, + "sizeAllocated": 0, + "wwn": "60:06:01:60:0B:00:49:00:BE:CE:6C:5C:56:C1:9D:D2", + "pool": { + "id": "pool_1" + } + } + }, + { + "@base": "https://8.44.162.244/api/instances/lun", + "updated": "2020-10-19T08:55:15.776Z", + "links": [ + { + "rel": "self", + "href": "/sv_2" + } + ], + "content": { + "id": "sv_2", + "type": 2, + "health": { + "value": 5, + "descriptionIds": [ + "ALRT_VOL_OK" + ], + "descriptions": [ + "The LUN is operating normally. No action is required." + ] + }, + "name": "LUN-01", + "sizeTotal": 107374182400, + "sizeAllocated": 0, + "wwn": "60:06:01:60:0B:00:49:00:BE:CE:6C:5C:9B:86:B5:71", + "pool": { + "id": "pool_1" + } + } + } + ] +} +GET_ALL_LUNS_NULL = { + "@base": "https://8.44.162.244/api/types/alert/instances", + "updated": "2020-10-19T09:02:57.980Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + }, + { + "rel": "next", + "href": "&page=2" + } + ], + "entries": [] +} +GET_ALL_ALERTS = { + "@base": "https://8.44.162.244/api/types/alert/instances", + "updated": "2020-10-19T09:02:57.980Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + }, + { + "rel": "next", + "href": "&page=2" + } + ], + "entries": [ + { + "@base": "https://8.44.162.244/api/instances/alert", + "updated": "2020-10-19T09:02:57.980Z", + "links": [ + { + "rel": "self", + "href": "/alert_31523" + } + ], + "content": { + "id": "alert_31523", + "severity": 4, + "timestamp": "2020-10-12T09:09:52.609Z", + "component": { + "id": "Host_87", + "resource": "host" + }, + "messageId": "14:608fe", + "message": "Host hpux11iv2 does not have any initiators" + " logged into the storage system.", + "descriptionId": "ALRT_HOST_NO_LOGGED_IN_INITIATORS", + "description": "The host does not have any initiators.", + "resolutionId": "AddIntrWiz", + "resolution": "/help/webhelp/en_US/index.html" + } + }, + { + "@base": "https://8.44.162.244/api/instances/alert", + "updated": "2020-10-19T09:02:57.980Z", + "links": [ + { + "rel": "self", + "href": "/alert_31524" + } + ], + "content": { + "id": "alert_31524", + "severity": 6, + "timestamp": "2020-10-12T09:10:54.936Z", + "component": { + "id": "Host_87", + "resource": "host" + }, + "messageId": "14:608fc", + "message": "Host hpux11iv2 is operating normally.", + "descriptionId": "ALRT_COMPONENT_OK", + "description": "The component is operating normally.", + "resolutionId": "0", + "resolution": "0" + } + } + ] +} +GET_ALL_ALERTS_NULL = { + "@base": "https://8.44.162.244/api/types/alert/instances", + "updated": "2020-10-19T09:02:57.980Z", + "links": [ + { + "rel": "self", + "href": "&page=1" + }, + { + "rel": "next", + "href": "&page=2" + } + ], + "entries": [] +} +TRAP_INFO = { + "1.3.6.1.2.1.1.3.0": "0", + '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.103.1.18.2.0', + '1.3.6.1.4.1.1139.103.1.18.1.1': 'eeeeeeeee', + '1.3.6.1.4.1.1139.103.1.18.1.3': 'ddddddd', + '1.3.6.1.4.1.1139.103.1.18.1.4': 'this is test', + '1.3.6.1.4.1.1139.103.1.18.1.5': '2020/11/20 14:10:10', + '1.3.6.1.4.1.1139.103.1.18.1.2': 'test' +} +ALERT_INFO = [ + { + 'location': "test", + 'alertId': '223232', + 'alertIndex': '1111111', + 'errorDetail': 'test alert', + 'errorSection': 'someting wrong', + 'occurenceTime': '2020-11-20T10:10:10', + 'errorLevel': 'Serious' + } +] + +storage_result = { + 'free_capacity': 2311766147072, + 'serial_number': 'CETV3182000026', + 'subscribed_capacity': 307567976775680, + 'used_capacity': 6527008112640, + 'vendor': 'DELL EMC', + 'location': '', + 'total_capacity': 8838774259712, + 'status': 'abnormal', + 'name': 'CETV3182000026', + 'model': 'Unity 350F', + 'raw_capacity': 2311766147072, + 'firmware_version': '4.7.1' +} +pool_result = [ + { + 'native_storage_pool_id': 'pool_1', + 'status': 'normal', + 'free_capacity': 2311766147072, + 'name': 'pool1', + 'storage_type': 'block', + 'total_capacity': 8838774259712, + 'description': None, + 'subscribed_capacity': 310896039559168, + 'used_capacity': 6527008112640, + 'storage_id': '12345' + } +] +volume_result = [ + { + 'used_capacity': 0, + 'free_capacity': 107374182400, + 'native_storage_pool_id': 'pool_1', + 'description': None, + 'deduplicated': None, + 'native_volume_id': 'sv_1', + 'total_capacity': 107374182400, + 'storage_id': '12345', + 'wwn': '60:06:01:60:0B:00:49:00:BE:CE:6C:5C:56:C1:9D:D2', + 'type': 'thick', + 'compressed': True, + 'name': 'LUN-00', + 'status': 'normal' + }, { + 'used_capacity': 0, + 'free_capacity': 107374182400, + 'native_storage_pool_id': 'pool_1', + 'description': None, + 'deduplicated': None, + 'native_volume_id': 'sv_2', + 'total_capacity': 107374182400, + 'storage_id': '12345', + 'wwn': '60:06:01:60:0B:00:49:00:BE:CE:6C:5C:9B:86:B5:71', + 'type': 'thick', + 'compressed': True, + 'name': 'LUN-01', + 'status': 'normal' + } +] +alert_result = [ + { + 'severity': 'Warning', + 'location': '', + 'occur_time': 1602464992000, + 'type': 'EquipmentAlarm', + 'sequence_number': 'alert_31523', + 'alert_name': 'Host hpux11iv2 does not have any initiators ' + 'logged into the storage system.', + 'resource_type': 'Storage', + 'alert_id': '14:608fe', + 'description': 'The host does not have any initiators.', + 'category': 'Fault' + }, { + 'severity': 'Informational', + 'location': '', + 'occur_time': 1602465054000, + 'type': 'EquipmentAlarm', + 'sequence_number': 'alert_31524', + 'alert_name': 'Host hpux11iv2 is operating normally.', + 'resource_type': 'Storage', + 'alert_id': '14:608fc', + 'description': 'The component is operating normally.', + 'category': 'Fault' + } +] +trap_result = { + 'alert_id': 'ddddddd', + 'alert_name': 'test', + 'severity': 'Critical', + 'category': 'Fault', + 'type': 'EquipmentAlarm', + 'occur_time': 1605852610000, + 'description': 'this is test', + 'resource_type': 'Storage', + 'location': 'eeeeeeeee' +} + + +def create_driver(): + kwargs = ACCESS_INFO + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = { + "EMC-CSRF-TOKEN": "97c13b8082444b36bc2103026205fa64" + } + return UNITYStorDriver(**kwargs) + + +class TestUNITYStorDriver(TestCase): + driver = create_driver() + + def test_initrest(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 200 + kwargs = ACCESS_INFO + re = RestHandler(**kwargs) + self.assertIsNotNone(re) + + def test_get_storage(self): + RestHandler.get_rest_info = mock.Mock( + side_effect=[GET_STORAGE, GET_CAPACITY, GET_SOFT_VERSION, + GET_DISK_INFO]) + storage = self.driver.get_storage(context) + self.assertDictEqual(storage, storage_result) + + def test_list_storage_pools(self): + RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_POOLS) + pool = self.driver.list_storage_pools(context) + self.assertDictEqual(pool[0], pool_result[0]) + + def test_list_volumes(self): + RestHandler.get_rest_info = mock.Mock(side_effect=[ + GET_ALL_LUNS, GET_ALL_LUNS_NULL]) + volume = self.driver.list_volumes(context) + self.assertDictEqual(volume[0], volume_result[0]) + self.assertDictEqual(volume[1], volume_result[1]) + + def test_list_alerts(self): + RestHandler.get_rest_info = mock.Mock(side_effect=[ + GET_ALL_ALERTS, GET_ALL_ALERTS_NULL]) + alert = self.driver.list_alerts(context) + self.assertEqual(alert[0].get('alert_id'), + alert_result[0].get('alert_id')) + self.assertEqual(alert[1].get('alert_id'), + alert_result[1].get('alert_id')) + + def test_parse_alert(self): + trap = self.driver.parse_alert(context, TRAP_INFO) + self.assertEqual(trap.get('alert_id'), trap_result.get('alert_id')) + + def test_rest_close_connection(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'post', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = None + re = self.driver.close_connection() + self.assertIsNone(re) + + def test_rest_handler_call(self): + m = mock.MagicMock(status_code=403) + with self.assertRaises(Exception) as exc: + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 403 + m.json.return_value = None + url = 'http://test' + self.driver.rest_handler.call(url, '', 'GET') + self.assertIn('Bad response from server', str(exc.exception)) + + def test_reset_connection(self): + RestHandler.logout = mock.Mock(return_value={}) + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 201 + m.json.return_value = { + "EMC-CSRF-TOKEN": "97c13b8082444b36bc2103026205fa64" + } + kwargs = ACCESS_INFO + re = self.driver.reset_connection(context, **kwargs) + self.assertIsNone(re) + + def test_err_storage_pools(self): + with self.assertRaises(Exception) as exc: + self.driver.list_storage_pools(context) + self.assertIn('Bad response from server', + str(exc.exception)) diff --git a/setup.py b/setup.py index 903437f16..85771d225 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ ], 'delfin.storage.drivers': [ 'fake_storage fake_driver = delfin.drivers.fake_storage:FakeStorageDriver', + 'dellemc unity = delfin.drivers.dell_emc.unity.unity:UNITYStorDriver', 'dellemc vmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver', 'hitachi vsp = delfin.drivers.hitachi.vsp.vsp_stor:HitachiVspDriver', 'hpe 3par = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver', From 3ac30dea0ae2d0ba99a067c795695c966d66da36 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Wed, 6 Jan 2021 16:40:02 +0800 Subject: [PATCH 10/15] fix when there is more than 2000 volumes rest would timeout issue and change volume name as volume id when the volume name is none (#448) * fix when there is more than 2000 volumes rest would timeout issue and change volume name as volume id when the volume name is none Co-authored-by: Ashit Kumar --- delfin/drivers/hitachi/vsp/consts.py | 4 +- delfin/drivers/hitachi/vsp/rest_handler.py | 71 +++-- delfin/drivers/hitachi/vsp/vsp_stor.py | 45 +++- .../hitachi/vsp/test_hitachi_vspstor.py | 243 +++++++----------- 4 files changed, 184 insertions(+), 179 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/consts.py b/delfin/drivers/hitachi/vsp/consts.py index 9bc3c3d35..b8538606e 100644 --- a/delfin/drivers/hitachi/vsp/consts.py +++ b/delfin/drivers/hitachi/vsp/consts.py @@ -11,10 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -SOCKET_TIMEOUT = 30 +SOCKET_TIMEOUT = 90 ERROR_SESSION_INVALID_CODE = 403 ERROR_SESSION_IS_BEING_USED_CODE = 409 BLOCK_SIZE = 512 -MAX_LDEV_NUMBER_OF_RESTAPI = 16383 +LDEV_NUMBER_OF_PER_REQUEST = 300 SUPPORTED_VSP_SERIES = ('VSP G350', 'VSP G370', 'VSP G700', 'VSP G900', 'VSP F350', 'VSP F370', 'VSP F700', 'VSP F900') diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py index 9bc068de0..89b0a5c0c 100644 --- a/delfin/drivers/hitachi/vsp/rest_handler.py +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -43,7 +43,7 @@ def __init__(self, **kwargs): def call(self, url, data=None, method=None, calltimeout=consts.SOCKET_TIMEOUT): try: - res = self.do_call(url, data, method, calltimeout) + res = self.call_with_token(url, data, method, calltimeout) if (res.status_code == consts.ERROR_SESSION_INVALID_CODE or res.status_code == consts.ERROR_SESSION_IS_BEING_USED_CODE): @@ -53,11 +53,8 @@ def call(self, url, data=None, method=None, if method == 'DELETE' and RestHandler. \ LOGOUT_URL in url: return res - self.rest_auth_token = None - access_session = self.login() - if access_session is not None: - res = self. \ - do_call(url, data, method, calltimeout) + if self.get_token(): + res = self.call_with_token(url, data, method, calltimeout) else: LOG.error('Login error,get access_session failed') elif res.status_code == 503: @@ -70,17 +67,33 @@ def call(self, url, data=None, method=None, LOG.error(err_msg) raise e + def call_with_token(self, url, data, method, calltimeout): + auth_key = None + if self.session: + auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None) + if auth_key: + self.session.headers[RestHandler.AUTH_KEY] \ + = cryptor.decode(auth_key) + res = self. \ + do_call(url, data, method, calltimeout) + if auth_key: + self.session.headers[RestHandler.AUTH_KEY] = auth_key + return res + def get_rest_info(self, url, timeout=consts.SOCKET_TIMEOUT, data=None): result_json = None + if self.session and url != RestHandler.COMM_URL: + auth_key = self.session.headers.get(RestHandler.AUTH_KEY, None) + if auth_key is None: + self.get_token() res = self.call(url, data, 'GET', timeout) if res.status_code == 200: result_json = res.json() return result_json - def login(self): + def get_token(self): try: - self.get_device_id() - access_session = self.rest_auth_token + succeed = False if self.san_address: url = '%s/%s/sessions' % \ (RestHandler.COMM_URL, @@ -94,15 +107,16 @@ def login(self): requests.auth.HTTPBasicAuth( self.rest_username, cryptor.decode(self.rest_password)) - res = self. \ - do_call(url, data, 'POST', 10) + res = self.call_with_token(url, data, 'POST', 30) if res.status_code == 200: + succeed = True result = res.json() - self.session_id = result.get('sessionId') + self.session_id = cryptor.encode( + result.get('sessionId')) access_session = 'Session %s' % result.get('token') - self.rest_auth_token = access_session self.session.headers[ - RestHandler.AUTH_KEY] = access_session + RestHandler.AUTH_KEY] = cryptor.encode( + access_session) else: LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", @@ -112,9 +126,18 @@ def login(self): else: raise exception.BadResponse(res.text) else: - LOG.error('Login Parameter error') + LOG.error('Token Parameter error') - return access_session + return succeed + except Exception as e: + LOG.error("Get token error: %s", six.text_type(e)) + raise e + + def login(self): + try: + succeed = False + succeed = self.get_device_id() + return succeed except Exception as e: LOG.error("Login error: %s", six.text_type(e)) raise e @@ -126,15 +149,15 @@ def logout(self): url = '%s/%s/sessions/%s' % \ (RestHandler.COMM_URL, self.storage_device_id, - self.session_id) + cryptor.decode(self.session_id)) if self.san_address: self.call(url, method='DELETE') + url = None self.session_id = None self.storage_device_id = None self.device_model = None self.serial_number = None self.session = None - self.rest_auth_token = None else: LOG.error('logout error:session id not found') except Exception as err: @@ -144,11 +167,13 @@ def logout(self): def get_device_id(self): try: + succeed = False if self.session is None: self.init_http_head() storage_systems = self.get_system_info() system_info = storage_systems.get('data') for system in system_info: + succeed = True if system.get('model') in consts.SUPPORTED_VSP_SERIES: if system.get('ctl1Ip') == self.rest_host or \ system.get('ctl2Ip') == self.rest_host: @@ -163,6 +188,7 @@ def get_device_id(self): break if self.storage_device_id is None: LOG.error("Get device id fail,model or something is wrong") + return succeed except Exception as e: LOG.error("Get device id error: %s", six.text_type(e)) raise e @@ -189,10 +215,11 @@ def get_all_pools(self): result_json = self.get_rest_info(url) return result_json - def get_all_volumes(self): - url = '%s/%s/ldevs?ldevOption=defined&count=%s' % \ - (RestHandler.COMM_URL, self.storage_device_id, - consts.MAX_LDEV_NUMBER_OF_RESTAPI) + def get_volumes(self, head_id, + max_number=consts.LDEV_NUMBER_OF_PER_REQUEST): + url = '%s/%s/ldevs?headLdevId=%s&count=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, head_id, + max_number) result_json = self.get_rest_info(url) return result_json diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index 36af9e5ec..863dff8e3 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -155,15 +155,36 @@ def list_storage_pools(self, context): LOG.error(err_msg) raise exception.InvalidResults(err_msg) + @staticmethod + def to_vsp_lun_id_format(lun_id): + hex_str = hex(lun_id) + result = '' + hex_lun_id = hex_str[2::].rjust(6, '0') + is_first = True + for i in range(0, len(hex_lun_id), 2): + if is_first is True: + result = '%s' % (hex_lun_id[i:i + 2]) + is_first = False + else: + result = '%s:%s' % (result, hex_lun_id[i:i + 2]) + return result + def list_volumes(self, context): - try: - volumes_info = self.rest_handler.get_all_volumes() + head_id = 0 + is_end = False + volume_list = [] + while is_end is False: + is_end = self.get_volumes_paginated(volume_list, head_id) + head_id += consts.LDEV_NUMBER_OF_PER_REQUEST + return volume_list - volume_list = [] + def get_volumes_paginated(self, volume_list, head_id): + try: + volumes_info = self.rest_handler.get_volumes(head_id) volumes = volumes_info.get('data') for volume in volumes: if volume.get('emulationType') == 'NOT DEFINED': - continue + return True orig_pool_id = volume.get('poolId') compressed = False deduplicated = False @@ -190,17 +211,19 @@ def list_volumes(self, context): # Because there is only subscribed capacity in device,so free # capacity always 0 free_cap = 0 + native_volume_id = HitachiVspDriver.to_vsp_lun_id_format( + volume.get('ldevId')) if volume.get('label'): name = volume.get('label') else: - name = 'ldev_%s' % str(volume.get('ldevId')) + name = native_volume_id v = { 'name': name, 'storage_id': self.storage_id, 'description': 'Hitachi VSP volume', 'status': status, - 'native_volume_id': str(volume.get('ldevId')), + 'native_volume_id': str(native_volume_id), 'native_storage_pool_id': orig_pool_id, 'type': vol_type, 'total_capacity': total_cap, @@ -211,8 +234,7 @@ def list_volumes(self, context): } volume_list.append(v) - - return volume_list + return False except exception.DelfinException as err: err_msg = "Failed to get volumes metrics from hitachi vsp: %s" % \ (six.text_type(err)) @@ -236,7 +258,7 @@ def parse_queried_alerts(alerts, alert_list, query_para=None): continue a = { 'location': alert.get('location'), - 'alarm_id': alert.get('alertId'), + 'alert_id': alert.get('alertId'), 'sequence_number': alert.get('alertIndex'), 'description': alert.get('errorDetail'), 'alert_name': alert.get('errorSection'), @@ -263,6 +285,11 @@ def list_alerts(self, context, query_para=None): alert_list, query_para) HitachiVspDriver.parse_queried_alerts(alerts_info_dkc, alert_list, query_para) + else: + err_msg = "list_alerts is not supported in model %s" % \ + self.rest_handler.device_model + LOG.error(err_msg) + raise NotImplementedError(err_msg) return alert_list diff --git a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py index cb7a58ede..aa293e930 100644 --- a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py +++ b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py @@ -51,9 +51,9 @@ def __init__(self): "data": [ { "storageDeviceId": "800000011633", - "model": "VSP G350", + "model": "VSP F1500", "serialNumber": 11633, - "svpIp": "110.143.132.231" + "svpIp": "110.143.132.231", } ] } @@ -90,38 +90,6 @@ def __init__(self): "dataReductionRate": 0, "snapshotUsedCapacity": 0, "suspendSnapshot": True - }, - { - "poolId": 1, - "poolStatus": "POLF", - "usedCapacityRate": 78, - "snapshotCount": 0, - "poolName": "hjw_test", - "availableVolumeCapacity": 3530184, - "totalPoolCapacity": 16221576, - "numOfLdevs": 6, - "firstLdevId": 0, - "warningThreshold": 70, - "depletionThreshold": 80, - "virtualVolumeCapacityRate": -1, - "isMainframe": False, - "isShrinking": False, - "locatedVolumeCount": 24, - "totalLocatedCapacity": 12702144, - "blockingMode": "NB", - "totalReservedCapacity": 0, - "reservedVolumeCount": 0, - "poolType": "HDP", - "duplicationNumber": 0, - "dataReductionAccelerateCompCapacity": 0, - "dataReductionCapacity": 0, - "dataReductionBeforeCapacity": 0, - "dataReductionAccelerateCompRate": 0, - "duplicationRate": 0, - "compressionRate": 0, - "dataReductionRate": 0, - "snapshotUsedCapacity": 0, - "suspendSnapshot": True } ] } @@ -167,100 +135,8 @@ def __init__(self): "isAluaEnabled": False }, { - "ldevId": 1, - "clprId": 0, - "emulationType": "OPEN-V", - "byteFormatCapacity": "2.57 T", - "blockCapacity": 5538459648, - "composingPoolId": 1, - "attributes": [ - "POOL" - ], - "raidLevel": "RAID5", - "raidType": "3D+1P", - "numOfParityGroups": 1, - "parityGroupIds": [ - "5-1" - ], - "driveType": "SLB5E-M1R9SS", - "driveByteFormatCapacity": "1.74 T", - "driveBlockCapacity": 3750000030, - "status": "NML", - "mpBladeId": 4, - "ssid": "0004", - "resourceGroupId": 0, - "isAluaEnabled": False - }, - { - "ldevId": 2, - "clprId": 0, - "emulationType": "OPEN-V-CVS", - "byteFormatCapacity": "500.00 G", - "blockCapacity": 1048576000, - "numOfPorts": 4, - "ports": [ - { - "portId": "CL3-A", - "hostGroupNumber": 1, - "hostGroupName": "3A84", - "lun": 0 - }, - { - "portId": "CL2-B", - "hostGroupNumber": 0, - "hostGroupName": "2B-G00", - "lun": 0 - }, - { - "portId": "CL4-A", - "hostGroupNumber": 1, - "hostGroupName": "75_197b", - "lun": 0 - }, - { - "portId": "CL2-A", - "hostGroupNumber": 1, - "hostGroupName": "198_126b", - "lun": 0 - } - ], - "attributes": [ - "CVS", - "HDP" - ], - "label": "hjw_test_lun0", - "status": "NML", - "mpBladeId": 0, - "ssid": "0004", - "poolId": 1, - "numOfUsedBlock": 1048621056, - "isFullAllocationEnabled": False, - "resourceGroupId": 0, - "dataReductionStatus": "DISABLED", - "dataReductionMode": "disabled", - "isAluaEnabled": False - }, - { - "ldevId": 99, - "clprId": 0, - "emulationType": "OPEN-V-CVS", - "byteFormatCapacity": "500.00 G", - "blockCapacity": 1048576000, - "attributes": [ - "CVS", - "HDP" - ], - "label": "AIX_performance_test_zj", - "status": "NML", - "mpBladeId": 5, - "ssid": "0004", - "poolId": 0, - "numOfUsedBlock": 1048621056, - "isFullAllocationEnabled": False, - "resourceGroupId": 0, - "dataReductionStatus": "DISABLED", - "dataReductionMode": "disabled", - "isAluaEnabled": False + "ldevId": 0, + "emulationType": "NOT DEFINED", } ] } @@ -286,6 +162,79 @@ def __init__(self): } ] +storage_result = { + 'name': 'VSP F1500_110.143.132.231', + 'vendor': 'Hitachi', + 'description': 'Hitachi VSP Storage', + 'model': 'VSP F1500', + 'status': 'normal', + 'serial_number': '11633', + 'firmware_version': '80-06-70/00', + 'location': '', + 'raw_capacity': 18687222349824, + 'total_capacity': 18687222349824, + 'used_capacity': 10511909388288, + 'free_capacity': 8175312961536 +} + +volume_result = [ + { + 'name': '00:00:00', + 'storage_id': '12345', + 'description': 'Hitachi VSP volume', + 'status': 'normal', + 'native_volume_id': '00:00:00', + 'native_storage_pool_id': None, + 'type': 'thick', + 'total_capacity': 2835691339776, + 'used_capacity': 2835691339776, + 'free_capacity': 0, + 'compressed': False, + 'deduplicated': False, + } +] + +pool_result = [ + { + 'name': 'p3-1', + 'storage_id': '12345', + 'native_storage_pool_id': '0', + 'description': 'Hitachi VSP Pool', + 'status': 'normal', + 'storage_type': 'block', + 'total_capacity': 18687222349824, + 'used_capacity': 10511909388288, + 'free_capacity': 8175312961536, + } +] + +alert_result = [ + { + 'location': 'test', + 'alert_id': '223232', + 'sequence_number': '1111111', + 'description': 'test alert', + 'alert_name': 'someting wrong', + 'resource_type': 'Storage', + 'occur_time': 1605838210000, + 'category': 'Fault', + 'type': 'EquipmentAlarm', + 'severity': 'Major', + } +] + +trap_alert_result = { + 'alert_id': 'eeeeeeeee', + 'alert_name': 'ddddddd', + 'severity': 'Critical', + 'category': 'Fault', + 'type': 'EquipmentAlarm', + 'occur_time': 1605852610000, + 'description': 'ddddddd', + 'resource_type': 'Storage', + 'location': ' System Version = 7.4.0.11 ' +} + def create_driver(): kwargs = ACCESS_INFO @@ -318,28 +267,38 @@ def test_get_storage(self): RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) RestHandler.get_rest_info = mock.Mock( side_effect=[GET_ALL_POOLS, GET_SPECIFIC_STORAGE]) - self.driver.get_storage(context) + storage = self.driver.get_storage(context) + self.assertDictEqual(storage, storage_result) def test_list_storage_pools(self): RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_POOLS) - self.driver.list_storage_pools(context) + pool = self.driver.list_storage_pools(context) + self.assertDictEqual(pool[0], pool_result[0]) def test_list_volumes(self): RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_VOLUMES) - self.driver.list_volumes(context) + volume = self.driver.list_volumes(context) + self.assertDictEqual(volume[0], volume_result[0]) def test_list_alerts(self): - RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) - RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) - RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) - self.driver.list_alerts(context) + with self.assertRaises(Exception) as exc: + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + self.driver.list_alerts(context) + self.assertEqual('list_alerts is not supported in model VSP F1500', + str(exc.exception)) def test_parse_queried_alerts(self): alert_list = [] HitachiVspDriver.parse_queried_alerts(ALERT_INFO, alert_list) + self.assertEqual(alert_list[0].get('alert_id'), + alert_result[0].get('alert_id')) def test_parse_alert(self): - self.driver.parse_alert(context, TRAP_INFO) + trap_alert = self.driver.parse_alert(context, TRAP_INFO) + self.assertEqual(trap_alert.get('alert_id'), + trap_alert_result.get('alert_id')) def test_rest_close_connection(self): m = mock.MagicMock(status_code=200) @@ -390,13 +349,5 @@ def test_list_volumes_call(self): with mock.patch.object(Session, 'get', return_value=m): m.raise_for_status.return_value = 200 m.json.return_value = GET_ALL_VOLUMES - self.driver.list_volumes(context) - - def test_add_trap_config(self): - self.driver.add_trap_config(context, None) - - def test_remove_trap_config(self): - self.driver.remove_trap_config(context, None) - - def test_clear_alert(self): - self.driver.clear_alert(context, None) + volume = self.driver.list_volumes(context) + self.assertDictEqual(volume[0], volume_result[0]) From dfc0e2a478c27e9eb77879a69d7686d4e14d81d5 Mon Sep 17 00:00:00 2001 From: jiangyutan <69443713+jiangyutan@users.noreply.github.com> Date: Wed, 6 Jan 2021 17:05:39 +0800 Subject: [PATCH 11/15] add alert_fix fouction and change alerts filter (#449) * add alert_clear test Co-authored-by: Ashit Kumar Co-authored-by: ThisIsClark --- .../drivers/ibm/storwize_svc/ssh_handler.py | 21 ++- .../drivers/ibm/storwize_svc/storwize_svc.py | 2 +- .../ibm/storwize_svc/test_ibm_storwize_svc.py | 127 +++++++++++++++--- 3 files changed, 123 insertions(+), 27 deletions(-) diff --git a/delfin/drivers/ibm/storwize_svc/ssh_handler.py b/delfin/drivers/ibm/storwize_svc/ssh_handler.py index 1be7f71f6..c97fad797 100644 --- a/delfin/drivers/ibm/storwize_svc/ssh_handler.py +++ b/delfin/drivers/ibm/storwize_svc/ssh_handler.py @@ -46,6 +46,7 @@ class SSHHandler(object): } SECONDS_TO_MS = 1000 + ALERT_NOT_FOUND_CODE = 'CMMVC8275E' def __init__(self, **kwargs): self.ssh_pool = SSHPool(**kwargs) @@ -175,13 +176,9 @@ def parse_string(self, value): def get_storage(self): try: system_info = self.exec_ssh_command('lssystem') - enclosure_info = self.exec_ssh_command('lsenclosure -delim :') - enclosure_res = enclosure_info.split('\n') - enclosure = enclosure_res[1].split(':') - serial_number = enclosure[7] storage_map = {} self.handle_detail(system_info, storage_map, split=' ') - + serial_number = storage_map.get('id') status = 'normal' if storage_map.get('statistics_status') == 'on' \ else 'offline' location = storage_map.get('location') @@ -338,7 +335,8 @@ def list_volumes(self, storage_id): def list_alerts(self, query_para): try: alert_list = [] - alert_info = self.exec_ssh_command('lseventlog -monitoring yes') + alert_info = self.exec_ssh_command('lseventlog -monitoring yes ' + '-message no') alert_res = alert_info.split('\n') for i in range(1, len(alert_res)): if alert_res[i] is None or alert_res[i] == '': @@ -360,7 +358,8 @@ def list_alerts(self, query_para): resource_type = alert_map.get('object_type', '') severity = self.SEVERITY_MAP.get(alert_map. get('notification_type')) - + if severity == 'Informational' or severity is None: + continue alert_model = { 'alert_id': event_id, 'alert_name': alert_name, @@ -384,3 +383,11 @@ def list_alerts(self, query_para): err_msg = "Failed to get storage alert: %s" % (six.text_type(err)) LOG.error(err_msg) raise exception.InvalidResults(err_msg) + + def fix_alert(self, alert): + command_line = 'cheventlog -fix %s' % alert + result = self.exec_ssh_command(command_line) + if result: + if self.ALERT_NOT_FOUND_CODE not in result: + raise exception.InvalidResults(six.text_type(result)) + LOG.warning("Alert %s doesn't exist.", alert) diff --git a/delfin/drivers/ibm/storwize_svc/storwize_svc.py b/delfin/drivers/ibm/storwize_svc/storwize_svc.py index 98ed7b5e7..55963039f 100644 --- a/delfin/drivers/ibm/storwize_svc/storwize_svc.py +++ b/delfin/drivers/ibm/storwize_svc/storwize_svc.py @@ -50,4 +50,4 @@ def parse_alert(context, alert): return SSHHandler.parse_alert(alert) def clear_alert(self, context, alert): - pass + return self.ssh_hanlder.fix_alert(alert) diff --git a/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py index 864721450..365b7e26a 100644 --- a/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py +++ b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py @@ -270,7 +270,7 @@ def __init__(self): status message fixed no auto_fixed no -notification_type informational +notification_type warning event_id 980221 event_id_text Error log cleared error_code @@ -317,6 +317,84 @@ def __init__(self): 'storage_id': '4992d7f5-4f73-4123-a27b-6e27889f3852' } +storage_result = { + 'name': 'Cluster_192.168.70.125', + 'vendor': 'IBM', + 'model': 'IBM Storwize V7000', + 'status': 'normal', + 'serial_number': '00000200A1207E1F', + 'firmware_version': '7.4.0.11', + 'location': 'local', + 'total_capacity': 8961019766374, + 'raw_capacity': 12006666975313, + 'subscribed_capacity': 0, + 'used_capacity': 5552533720268, + 'free_capacity': 3408486046105 +} + +pool_result = [ + { + 'name': 'mdiskgrp0', + 'storage_id': '12345', + 'native_storage_pool_id': '1', + 'description': '', + 'status': 'normal', + 'storage_type': 'block', + 'subscribed_capacity': 6058309069045, + 'total_capacity': 8939029533818, + 'used_capacity': 5552533720268, + 'free_capacity': 3364505580994 + } +] + +volume_result = [ + { + 'description': '', + 'status': 'normal', + 'total_capacity': 53687091200, + 'used_capacity': 53687091200, + 'type': 'thick', + 'free_capacity': 0, + 'native_volume_id': '0', + 'deduplicated': True, + 'native_storage_pool_id': '1', + 'wwn': '60050768028401F87C00000000000000', + 'compressed': False, + 'name': 'V7000LUN_Mig', + 'storage_id': '12345' + } +] + +alert_result = [ + { + 'type': 'EquipmentAlarm', + 'location': 'node1', + 'category': 'Fault', + 'occur_time': 1605085070000, + 'sequence_number': '101', + 'resource_type': 'node', + 'alert_name': 'Error log cleared', + 'severity': 'warning', + 'alert_id': '980221', + 'description': 'Error log cleared' + } +] + +trap_alert_result = { + 'alert_id': '981004', + 'type': 'EquipmentAlarm', + 'severity': 'Informational', + 'sequence_number': '165', + 'description': 'FC discovery occurred, no configuration changes ' + 'were detected', + 'occur_time': 1604970507000, + 'alert_name': 'FC discovery occurred, no configuration changes ' + 'were detected', + 'resource_type': 'cluster', + 'location': 'Cluster_192.168.70.125', + 'category': 'Fault' +} + def create_driver(): @@ -338,33 +416,38 @@ def test_list_storage(self): SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( - side_effect=[system_info, enclosure_info]) - self.driver.get_storage(context) + side_effect=[system_info]) + storage = self.driver.get_storage(context) + self.assertDictEqual(storage, storage_result) def test_list_storage_pools(self): SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=[pools_info, pool_info]) - self.driver.list_storage_pools(context) + pool = self.driver.list_storage_pools(context) + self.assertDictEqual(pool[0], pool_result[0]) def test_list_volumes(self): SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=[volumes_info, volume_info]) - self.driver.list_volumes(context) + volume = self.driver.list_volumes(context) + self.assertDictEqual(volume[0], volume_result[0]) def test_list_alerts(self): query_para = { - "begin_time": 160508506000, - "end_time": 160508507000 + "begin_time": 1605085070000, + "end_time": 1605085070000 } SSHPool.get = mock.Mock( return_value={paramiko.SSHClient()}) SSHHandler.do_exec = mock.Mock( side_effect=[alerts_info, alert_info]) - self.driver.list_alerts(context, query_para) + alert = self.driver.list_alerts(context, query_para) + self.assertEqual(alert[0].get('alert_id'), + alert_result[0].get('alert_id')) def test_list_storage_with_error(self): with self.assertRaises(Exception) as exc: @@ -405,19 +488,25 @@ def test_ssh_pool_put(self): ssh_pool.remove(ssh) def test_parse_alert(self): - self.driver.parse_alert(context, trap_info) + alert = self.driver.parse_alert(context, trap_info) + self.assertEqual(alert.get('alert_id'), + trap_alert_result.get('alert_id')) def test_reset_connection(self): self.driver.reset_connection(context, **ACCESS_INFO) - def test_add_trap_config(self): - trap_config = '' - self.driver.add_trap_config(context, trap_config) - - def test_remove_trap_config(self): - trap_config = '' - self.driver.remove_trap_config(context, trap_config) - def test_clear_alert(self): - alert = '' - self.driver.clear_alert(context, alert) + alert_id = 101 + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=['CMMVC8275E']) + self.driver.clear_alert(context, alert_id) + with self.assertRaises(Exception) as exc: + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=['can not find alert']) + self.driver.clear_alert(context, alert_id) + self.assertIn('The results are invalid. can not find alert', + str(exc.exception)) From 4bca4b508a9c43c56e076809a90ec1868c7af6c9 Mon Sep 17 00:00:00 2001 From: ThisIsClark Date: Thu, 7 Jan 2021 09:50:59 +0800 Subject: [PATCH 12/15] Improvement for snmp validation (#446) * Improvement for snmp validation * Exception improvement * Add unit test Co-authored-by: Najmudheen <45681499+NajmudheenCT@users.noreply.github.com> --- delfin/alert_manager/snmp_validator.py | 51 +++---- delfin/task_manager/tasks/alerts.py | 4 + delfin/tests/unit/alert_manager/fakes.py | 21 +++ .../unit/alert_manager/test_snmp_validator.py | 135 ++++++++++++++++++ .../unit/task_manager/test_alert_task.py | 115 +++++++++++++++ 5 files changed, 301 insertions(+), 25 deletions(-) create mode 100644 delfin/tests/unit/alert_manager/test_snmp_validator.py create mode 100644 delfin/tests/unit/task_manager/test_alert_task.py diff --git a/delfin/alert_manager/snmp_validator.py b/delfin/alert_manager/snmp_validator.py index 1c51f419e..0aee41960 100644 --- a/delfin/alert_manager/snmp_validator.py +++ b/delfin/alert_manager/snmp_validator.py @@ -45,7 +45,6 @@ def validate(self, ctxt, alert_source): # engine id if engine id is empty. Therefore, engine id # should be saved in database. if not engine_id and alert_source.get('engine_id'): - alert_source_dict = { 'engine_id': alert_source.get('engine_id')} db.alert_source_update(ctxt, @@ -81,23 +80,33 @@ def validate_connectivity(alert_source): cmd_gen = cmdgen.CommandGenerator() - # Register engine observer to get engineId, - # Code reference from: http://snmplabs.com/pysnmp/ - observer_context = {} - cmd_gen.snmpEngine.observer.registerObserver( - lambda e, p, v, c: c.update( - securityEngineId=v['securityEngineId']), - 'rfc3412.prepareDataElements:internal', - cbCtx=observer_context - ) - version = alert_source.get('version') # Connect to alert source through snmp get to check the configuration try: + target = cmdgen.UdpTransportTarget((alert_source['host'], + alert_source['port']), + timeout=alert_source[ + 'expiration'], + retries=alert_source[ + 'retry_num']) + target.setLocalAddress((CONF.my_ip, 0)) if version.lower() == 'snmpv3': - auth_key = cryptor.decode(alert_source['auth_key']) - privacy_key = cryptor.decode(alert_source['privacy_key']) + # Register engine observer to get engineId, + # Code reference from: http://snmplabs.com/pysnmp/ + observer_context = {} + cmd_gen.snmpEngine.observer.registerObserver( + lambda e, p, v, c: c.update( + securityEngineId=v['securityEngineId']), + 'rfc3412.prepareDataElements:internal', + cbCtx=observer_context + ) + auth_key = None + if alert_source['auth_key']: + auth_key = cryptor.decode(alert_source['auth_key']) + privacy_key = None + if alert_source['privacy_key']: + privacy_key = cryptor.decode(alert_source['privacy_key']) auth_protocol = None privacy_protocol = None if alert_source['auth_protocol']: @@ -117,12 +126,7 @@ def validate_connectivity(alert_source): authProtocol=auth_protocol, privProtocol=privacy_protocol, securityEngineId=engine_id), - cmdgen.UdpTransportTarget((alert_source['host'], - alert_source['port']), - timeout=alert_source[ - 'expiration'], - retries=alert_source[ - 'retry_num']), + target, constants.SNMP_QUERY_OID, ) @@ -137,15 +141,12 @@ def validate_connectivity(alert_source): cmdgen.CommunityData( community_string, contextName=alert_source['context_name']), - cmdgen.UdpTransportTarget((alert_source['host'], - alert_source['port']), - timeout=alert_source[ - 'expiration'], - retries=alert_source[ - 'retry_num']), + target, constants.SNMP_QUERY_OID, ) + cmd_gen.snmpEngine.transportDispatcher.closeDispatcher() + if not error_indication: return alert_source diff --git a/delfin/task_manager/tasks/alerts.py b/delfin/task_manager/tasks/alerts.py index 318b95644..cc38783a4 100644 --- a/delfin/task_manager/tasks/alerts.py +++ b/delfin/task_manager/tasks/alerts.py @@ -16,6 +16,7 @@ from oslo_log import log from delfin import db +from delfin import exception from delfin.common import alert_util from delfin.drivers import api as driver_manager from delfin.exporter import base_exporter @@ -66,6 +67,9 @@ def clear_alerts(self, ctx, storage_id, sequence_number_list): try: self.driver_manager.clear_alert(ctx, storage_id, sequence_number) + except (exception.AccessInfoNotFound, + exception.StorageNotFound) as e: + LOG.warning("Ignore the situation: %s", e.msg) except Exception as e: LOG.error("Failed to clear alert with sequence number: %s " "for storage: %s, reason: %s.", diff --git a/delfin/tests/unit/alert_manager/fakes.py b/delfin/tests/unit/alert_manager/fakes.py index 20ab20b65..25321c7b7 100644 --- a/delfin/tests/unit/alert_manager/fakes.py +++ b/delfin/tests/unit/alert_manager/fakes.py @@ -110,3 +110,24 @@ def mock_add_transport(snmpEngine, transportDomain, transport): def config_delv3_exception(snmp_engine, username, securityEngineId): raise exception.InvalidResults("Config delete failed.") + + +def mock_cmdgen_get_cmd(self, authData, transportTarget, *varNames, **kwargs): + self.snmpEngine.transportDispatcher = AsyncoreDispatcher() + return None, None, None, None + + +def fake_v2_alert_source(): + return {'storage_id': 'abcd-1234-5678', + 'version': 'snmpv2c', + 'community_string': 'YWJjZDEyMzQ1Njc=', + } + + +FAKE_STOTRAGE = { + 'id': 1, + 'name': 'fake_storage', + 'vendor': 'fake_vendor', + 'model': 'fake_model', + 'serial_number': '12345678', +} diff --git a/delfin/tests/unit/alert_manager/test_snmp_validator.py b/delfin/tests/unit/alert_manager/test_snmp_validator.py new file mode 100644 index 000000000..77d128147 --- /dev/null +++ b/delfin/tests/unit/alert_manager/test_snmp_validator.py @@ -0,0 +1,135 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import random +from datetime import datetime +from unittest import mock + +from pysnmp.entity.rfc3413.oneliner import cmdgen + +from delfin import context +from delfin import db +from delfin import test +from delfin.alert_manager import snmp_validator +from delfin.common import constants +from delfin.exporter import base_exporter +from delfin.tests.unit.alert_manager import fakes + + +class TestSNMPValidator(test.TestCase): + @mock.patch.object(db, 'alert_source_update', + mock.Mock()) + @mock.patch('delfin.alert_manager.snmp_validator.' + 'SNMPValidator.validate_connectivity') + def test_validate(self, mock_validate_connectivity): + validator = snmp_validator.SNMPValidator() + + mock_validate_connectivity.return_value = fakes.fake_v3_alert_source() + v3_alert_source_without_engine_id = fakes.fake_v3_alert_source() + v3_alert_source_without_engine_id.pop('engine_id') + validator.validate(context, v3_alert_source_without_engine_id) + self.assertEqual(db.alert_source_update.call_count, 1) + + mock_validate_connectivity.return_value = fakes.fake_v3_alert_source() + validator.validate(context, + fakes.fake_v3_alert_source()) + self.assertEqual(db.alert_source_update.call_count, 1) + + @mock.patch.object(cmdgen.UdpTransportTarget, '_resolveAddr', + mock.Mock()) + @mock.patch.object(cmdgen.UdpTransportTarget, 'setLocalAddress', + mock.Mock()) + @mock.patch.object(cmdgen.CommandGenerator, 'getCmd', + fakes.mock_cmdgen_get_cmd) + @mock.patch('pysnmp.entity.observer.MetaObserver.registerObserver') + @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' + '.closeDispatcher') + def test_validate_connectivity(self, mock_close_dispatcher, + mock_register_observer): + # Get a random host + a = random.randint(0, 255) + b = random.randint(0, 255) + c = random.randint(0, 255) + d = random.randint(0, 255) + host = str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d) + # Get a random port + port = random.randint(1024, 65535) + # snmpv3 + v3_alert_source = fakes.fake_v3_alert_source() + v3_alert_source['host'] = host + v3_alert_source['port'] = port + snmp_validator.SNMPValidator.validate_connectivity( + v3_alert_source) + self.assertEqual(mock_close_dispatcher.call_count, 1) + self.assertEqual(mock_register_observer.call_count, 1) + # snmpv2c + v2_alert_source = fakes.fake_v2_alert_source() + v2_alert_source['host'] = host + v2_alert_source['port'] = port + snmp_validator.SNMPValidator.validate_connectivity( + v2_alert_source) + self.assertEqual(mock_close_dispatcher.call_count, 2) + self.assertEqual(mock_register_observer.call_count, 1) + + @mock.patch.object(db, 'storage_get', + mock.Mock(return_value=fakes.FAKE_STOTRAGE)) + @mock.patch.object(snmp_validator.SNMPValidator, + '_dispatch_snmp_validation_alert', mock.Mock()) + def test_handle_validation_result(self): + validator = snmp_validator.SNMPValidator() + + validator._handle_validation_result( + context, fakes.FAKE_STOTRAGE['id'], + constants.Category.FAULT) + snmp_validator.SNMPValidator._dispatch_snmp_validation_alert \ + .assert_called_with(context, + fakes.FAKE_STOTRAGE, + constants.Category.FAULT) + + validator._handle_validation_result( + context, fakes.FAKE_STOTRAGE['id'], + constants.Category.RECOVERY) + snmp_validator.SNMPValidator._dispatch_snmp_validation_alert \ + .assert_called_with(context, + fakes.FAKE_STOTRAGE, + constants.Category.RECOVERY) + + @mock.patch.object(base_exporter.AlertExporterManager, 'dispatch', + mock.Mock()) + def test_dispatch_snmp_validation_alert(self): + validator = snmp_validator.SNMPValidator() + storage = fakes.FAKE_STOTRAGE + alert = { + 'storage_id': storage['id'], + 'storage_name': storage['name'], + 'vendor': storage['vendor'], + 'model': storage['model'], + 'serial_number': storage['serial_number'], + 'alert_id': constants.SNMP_CONNECTION_FAILED_ALERT_ID, + 'sequence_number': 0, + 'alert_name': 'SNMP connect failed', + 'category': constants.Category.FAULT, + 'severity': constants.Severity.MAJOR, + 'type': constants.EventType.COMMUNICATIONS_ALARM, + 'location': 'NetworkEntity=%s' % storage['name'], + 'description': "SNMP connection to the storage failed. " + "SNMP traps from storage will not be received.", + 'recovery_advice': "1. The network connection is abnormal. " + "2. SNMP authentication parameters " + "are invalid.", + 'occur_time': int(datetime.utcnow().timestamp()) * 1000, + } + validator._dispatch_snmp_validation_alert( + context, storage, constants.Category.FAULT) + base_exporter.AlertExporterManager(). \ + dispatch.assert_called_once_with(context, alert) diff --git a/delfin/tests/unit/task_manager/test_alert_task.py b/delfin/tests/unit/task_manager/test_alert_task.py new file mode 100644 index 000000000..1deebce22 --- /dev/null +++ b/delfin/tests/unit/task_manager/test_alert_task.py @@ -0,0 +1,115 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest import mock + +from delfin import context +from delfin import db +from delfin import exception +from delfin import test +from delfin.common import constants +from delfin.task_manager.tasks import alerts + +fake_storage = { + 'id': '12c2d52f-01bc-41f5-b73f-7abf6f38a2a6', + 'name': 'fake_driver', + 'description': 'it is a fake driver.', + 'vendor': 'fake_vendor', + 'model': 'fake_model', + 'status': 'normal', + 'serial_number': '2102453JPN12KA000011', + 'firmware_version': '1.0.0', + 'location': 'HK', + 'total_capacity': 1024 * 1024, + 'used_capacity': 3126, + 'free_capacity': 1045449, +} + +fake_alerts = [ + { + 'alert_id': '1050', + 'alert_name': 'SAMPLE_ALERT_NAME', + 'severity': constants.Severity.WARNING, + 'category': constants.Category.NOT_SPECIFIED, + 'type': constants.EventType.EQUIPMENT_ALARM, + 'sequence_number': 79, + 'description': 'Diagnostic event trace triggered.', + 'recovery_advice': 'NA', + 'resource_type': constants.DEFAULT_RESOURCE_TYPE, + 'location': 'Array id=000192601409,Component type=location1 ' + 'Group,Component name=comp1,Event source=symmetrix', + }, + { + 'alert_id': '2000', + 'alert_name': 'SAMPLE_ALERT_NAME_2', + 'severity': constants.Severity.CRITICAL, + 'category': constants.Category.RECOVERY, + 'type': constants.EventType.PROCESSING_ERROR_ALARM, + 'sequence_number': 50, + 'description': 'This is a fake alert.', + 'recovery_advice': 'NA', + 'resource_type': constants.DEFAULT_RESOURCE_TYPE, + 'location': 'Array id=000192601409,Component type=location1 ' + 'Group,Component name=comp1,Event source=symmetrix', + }, +] + + +class TestAlertTask(test.TestCase): + + @mock.patch.object(db, 'storage_get', + mock.Mock(return_value=fake_storage)) + @mock.patch('delfin.exporter.base_exporter.AlertExporterManager.dispatch') + @mock.patch('delfin.common.alert_util.fill_storage_attributes') + @mock.patch('delfin.drivers.api.API.list_alerts') + def test_sync_alerts(self, mock_list_alerts, + mock_fill_storage_attributes, mock_dispatch): + task = alerts.AlertSyncTask() + storage_id = fake_storage['id'] + # No alert + mock_list_alerts.return_value = [] + task.sync_alerts(context, storage_id, None) + self.assertEqual(db.storage_get.call_count, 1) + self.assertEqual(mock_list_alerts.call_count, 1) + self.assertEqual(mock_dispatch.call_count, 0) + self.assertEqual(mock_fill_storage_attributes.call_count, 0) + # Has alert + mock_list_alerts.return_value = fake_alerts + task.sync_alerts(context, storage_id, None) + self.assertEqual(db.storage_get.call_count, 2) + self.assertEqual(mock_list_alerts.call_count, 2) + self.assertEqual(mock_dispatch.call_count, 1) + self.assertEqual(mock_fill_storage_attributes.call_count, + len(fake_alerts)) + + @mock.patch('delfin.drivers.api.API.clear_alert') + def test_clear_alerts(self, mock_clear_alert): + task = alerts.AlertSyncTask() + storage_id = fake_storage['id'] + task.clear_alerts(context, storage_id, []) + self.assertEqual(mock_clear_alert.call_count, 0) + + sequence_number_list = ['sequence_number_1', 'sequence_number_2'] + task.clear_alerts(context, storage_id, sequence_number_list) + self.assertEqual(mock_clear_alert.call_count, + len(sequence_number_list)) + + mock_clear_alert.side_effect = \ + exception.AccessInfoNotFound(storage_id) + ret = task.clear_alerts(context, storage_id, sequence_number_list) + self.assertEqual(ret, []) + + mock_clear_alert.side_effect = \ + exception.Invalid('Fake exception') + ret = task.clear_alerts(context, storage_id, sequence_number_list) + self.assertEqual(ret, sequence_number_list) From eb0ea7a20fbd88d49bb6988d958788ada55e013d Mon Sep 17 00:00:00 2001 From: jiangyutan <306848916@qq.com> Date: Mon, 18 Jan 2021 15:45:53 +0800 Subject: [PATCH 13/15] add ibm init file which is lost last commit --- delfin/drivers/ibm/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 delfin/drivers/ibm/__init__.py diff --git a/delfin/drivers/ibm/__init__.py b/delfin/drivers/ibm/__init__.py new file mode 100644 index 000000000..e69de29bb From 47b6eb65eb4b673fb2c3748a077c93293bb240a9 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Tue, 2 Feb 2021 18:56:26 +0530 Subject: [PATCH 14/15] Framework changes for CLI & SMI-S support (#475) --- delfin/api/schemas/access_info.py | 32 +++++++++++++++++++- delfin/api/schemas/storages.py | 32 +++++++++++++++++++- delfin/common/constants.py | 2 +- delfin/db/sqlalchemy/models.py | 2 ++ delfin/tests/unit/api/v1/test_access_info.py | 4 +++ 5 files changed, 69 insertions(+), 3 deletions(-) diff --git a/delfin/api/schemas/access_info.py b/delfin/api/schemas/access_info.py index 3d7f543e5..5120f20e7 100644 --- a/delfin/api/schemas/access_info.py +++ b/delfin/api/schemas/access_info.py @@ -46,6 +46,34 @@ 'required': ['host', 'port', 'username'], 'additionalProperties': False }, + 'cli': { + 'type': 'object', + 'properties': { + 'host': parameter_types.hostname_or_ip_address, + 'port': parameter_types.tcp_udp_port, + 'username': {'type': 'string', 'minLength': 1, + 'maxLength': 255}, + 'password': {'type': 'string', 'minLength': 1, + 'maxLength': 255} + }, + 'required': ['host', 'username', 'password'], + 'additionalProperties': False + }, + 'smis': { + 'type': 'object', + 'properties': { + 'host': parameter_types.hostname_or_ip_address, + 'port': parameter_types.tcp_udp_port, + 'username': {'type': 'string', 'minLength': 1, + 'maxLength': 255}, + 'password': {'type': 'string', 'minLength': 1, + 'maxLength': 255}, + 'namespace': {'type': 'string', 'minLength': 1, + 'maxLength': 255} + }, + 'required': ['host', 'username', 'password'], + 'additionalProperties': False + }, 'extra_attributes': { 'type': 'object', 'patternProperties': { @@ -57,7 +85,9 @@ }, 'anyOf': [ {'required': ['rest']}, - {'required': ['ssh']} + {'required': ['ssh']}, + {'required': ['cli']}, + {'required': ['smis']} ], 'additionalProperties': False } diff --git a/delfin/api/schemas/storages.py b/delfin/api/schemas/storages.py index 6f40c1184..e7d765d6a 100644 --- a/delfin/api/schemas/storages.py +++ b/delfin/api/schemas/storages.py @@ -48,6 +48,34 @@ 'required': ['host', 'port', 'username', 'password', 'pub_key'], 'additionalProperties': False }, + 'cli': { + 'type': 'object', + 'properties': { + 'host': parameter_types.hostname_or_ip_address, + 'port': parameter_types.tcp_udp_port, + 'username': {'type': 'string', 'minLength': 1, + 'maxLength': 255}, + 'password': {'type': 'string', 'minLength': 1, + 'maxLength': 255} + }, + 'required': ['host', 'username', 'password'], + 'additionalProperties': False + }, + 'smis': { + 'type': 'object', + 'properties': { + 'host': parameter_types.hostname_or_ip_address, + 'port': parameter_types.tcp_udp_port, + 'username': {'type': 'string', 'minLength': 1, + 'maxLength': 255}, + 'password': {'type': 'string', 'minLength': 1, + 'maxLength': 255}, + 'namespace': {'type': 'string', 'minLength': 1, + 'maxLength': 255} + }, + 'required': ['host', 'username', 'password'], + 'additionalProperties': False + }, 'extra_attributes': { 'type': 'object', 'patternProperties': { @@ -60,7 +88,9 @@ 'required': ['vendor', 'model'], 'anyOf': [ {'required': ['rest']}, - {'required': ['ssh']} + {'required': ['ssh']}, + {'required': ['cli']}, + {'required': ['smis']} ], 'additionalProperties': False } diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 52cfc724c..d0dc26eb7 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -19,7 +19,7 @@ DB_MAX_INT = 0x7FFFFFFF # Valid access type supported currently. -ACCESS_TYPE = ['rest', 'ssh'] +ACCESS_TYPE = ['rest', 'ssh', 'cli', 'smis'] # Custom fields for Delfin objects diff --git a/delfin/db/sqlalchemy/models.py b/delfin/db/sqlalchemy/models.py index ec775c3c1..00f7760fb 100644 --- a/delfin/db/sqlalchemy/models.py +++ b/delfin/db/sqlalchemy/models.py @@ -54,6 +54,8 @@ class AccessInfo(BASE, DelfinBase): model = Column(String(255)) rest = Column(JsonEncodedDict) ssh = Column(JsonEncodedDict) + cli = Column(JsonEncodedDict) + smis = Column(JsonEncodedDict) extra_attributes = Column(JsonEncodedDict) diff --git a/delfin/tests/unit/api/v1/test_access_info.py b/delfin/tests/unit/api/v1/test_access_info.py index d92ae956c..b6ce25698 100644 --- a/delfin/tests/unit/api/v1/test_access_info.py +++ b/delfin/tests/unit/api/v1/test_access_info.py @@ -48,6 +48,8 @@ def test_show(self): "username": "admin" }, "ssh": None, + "cli": None, + "smis": None, "extra_attributes": { "array_id": "0001234567897" }, @@ -99,6 +101,8 @@ def test_access_info_update(self): "port": 1234 }, "ssh": None, + "cli": None, + "smis": None, "extra_attributes": { "array_id": "0001234567897" }, From d63b5f19efabc1c6ef94f0244e9f89c2ecceb7ed Mon Sep 17 00:00:00 2001 From: Najmudheen <45681499+NajmudheenCT@users.noreply.github.com> Date: Mon, 15 Mar 2021 13:30:53 +0530 Subject: [PATCH 15/15] Fix for travis CI issue on cryptography dependency (#494) (#508) --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index cfa5f737b..268f33629 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,4 +34,5 @@ tooz>=1.58.0 # Apache-2.0 WebOb>=1.7.1 # MIT pysnmp>=4.4.11 # BSD redis>=3.3.8 # MIT +cryptography<3.4; # Apache-2.0 pyopenssl==19.1.0 # Apache-2.0